1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All Rights Reserved. 6 * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved. 7 * Copyright (c) 2018 Matthew Macy 8 * Copyright (c) 2022-2025 Gleb Smirnoff <glebius@FreeBSD.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * UNIX Domain (Local) Sockets 37 * 38 * This is an implementation of UNIX (local) domain sockets. Each socket has 39 * an associated struct unpcb (UNIX protocol control block). Stream sockets 40 * may be connected to 0 or 1 other socket. Datagram sockets may be 41 * connected to 0, 1, or many other sockets. Sockets may be created and 42 * connected in pairs (socketpair(2)), or bound/connected to using the file 43 * system name space. For most purposes, only the receive socket buffer is 44 * used, as sending on one socket delivers directly to the receive socket 45 * buffer of a second socket. 46 * 47 * The implementation is substantially complicated by the fact that 48 * "ancillary data", such as file descriptors or credentials, may be passed 49 * across UNIX domain sockets. The potential for passing UNIX domain sockets 50 * over other UNIX domain sockets requires the implementation of a simple 51 * garbage collector to find and tear down cycles of disconnected sockets. 52 * 53 * TODO: 54 * RDM 55 * rethink name space problems 56 * need a proper out-of-band 57 */ 58 59 #include <sys/cdefs.h> 60 #include "opt_ddb.h" 61 62 #include <sys/param.h> 63 #include <sys/capsicum.h> 64 #include <sys/domain.h> 65 #include <sys/eventhandler.h> 66 #include <sys/fcntl.h> 67 #include <sys/file.h> 68 #include <sys/filedesc.h> 69 #include <sys/kernel.h> 70 #include <sys/lock.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/mount.h> 74 #include <sys/mutex.h> 75 #include <sys/namei.h> 76 #include <sys/poll.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/queue.h> 80 #include <sys/resourcevar.h> 81 #include <sys/rwlock.h> 82 #include <sys/socket.h> 83 #include <sys/socketvar.h> 84 #include <sys/signalvar.h> 85 #include <sys/stat.h> 86 #include <sys/sx.h> 87 #include <sys/sysctl.h> 88 #include <sys/systm.h> 89 #include <sys/taskqueue.h> 90 #include <sys/un.h> 91 #include <sys/unpcb.h> 92 #include <sys/vnode.h> 93 94 #include <net/vnet.h> 95 96 #ifdef DDB 97 #include <ddb/ddb.h> 98 #endif 99 100 #include <security/mac/mac_framework.h> 101 102 #include <vm/uma.h> 103 104 MALLOC_DECLARE(M_FILECAPS); 105 106 static struct domain localdomain; 107 108 static uma_zone_t unp_zone; 109 static unp_gen_t unp_gencnt; /* (l) */ 110 static u_int unp_count; /* (l) Count of local sockets. */ 111 static ino_t unp_ino; /* Prototype for fake inode numbers. */ 112 static int unp_rights; /* (g) File descriptors in flight. */ 113 static struct unp_head unp_shead; /* (l) List of stream sockets. */ 114 static struct unp_head unp_dhead; /* (l) List of datagram sockets. */ 115 static struct unp_head unp_sphead; /* (l) List of seqpacket sockets. */ 116 static struct mtx_pool *unp_vp_mtxpool; 117 118 struct unp_defer { 119 SLIST_ENTRY(unp_defer) ud_link; 120 struct file *ud_fp; 121 }; 122 static SLIST_HEAD(, unp_defer) unp_defers; 123 static int unp_defers_count; 124 125 static const struct sockaddr sun_noname = { 126 .sa_len = sizeof(sun_noname), 127 .sa_family = AF_LOCAL, 128 }; 129 130 /* 131 * Garbage collection of cyclic file descriptor/socket references occurs 132 * asynchronously in a taskqueue context in order to avoid recursion and 133 * reentrance in the UNIX domain socket, file descriptor, and socket layer 134 * code. See unp_gc() for a full description. 135 */ 136 static struct timeout_task unp_gc_task; 137 138 /* 139 * The close of unix domain sockets attached as SCM_RIGHTS is 140 * postponed to the taskqueue, to avoid arbitrary recursion depth. 141 * The attached sockets might have another sockets attached. 142 */ 143 static struct task unp_defer_task; 144 145 /* 146 * SOCK_STREAM and SOCK_SEQPACKET unix(4) sockets fully bypass the send buffer, 147 * however the notion of send buffer still makes sense with them. Its size is 148 * the amount of space that a send(2) syscall may copyin(9) before checking 149 * with the receive buffer of a peer. Although not linked anywhere yet, 150 * pointed to by a stack variable, effectively it is a buffer that needs to be 151 * sized. 152 * 153 * SOCK_DGRAM sockets really use the sendspace as the maximum datagram size, 154 * and don't really want to reserve the sendspace. Their recvspace should be 155 * large enough for at least one max-size datagram plus address. 156 */ 157 #ifndef PIPSIZ 158 #define PIPSIZ 8192 159 #endif 160 static u_long unpst_sendspace = PIPSIZ; 161 static u_long unpst_recvspace = PIPSIZ; 162 static u_long unpdg_maxdgram = 8*1024; /* support 8KB syslog msgs */ 163 static u_long unpdg_recvspace = 16*1024; 164 static u_long unpsp_sendspace = PIPSIZ; 165 static u_long unpsp_recvspace = PIPSIZ; 166 167 static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 168 "Local domain"); 169 static SYSCTL_NODE(_net_local, SOCK_STREAM, stream, 170 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 171 "SOCK_STREAM"); 172 static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, 173 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 174 "SOCK_DGRAM"); 175 static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket, 176 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 177 "SOCK_SEQPACKET"); 178 179 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, 180 &unpst_sendspace, 0, "Default stream send space."); 181 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, 182 &unpst_recvspace, 0, "Default stream receive space."); 183 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, 184 &unpdg_maxdgram, 0, "Maximum datagram size."); 185 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, 186 &unpdg_recvspace, 0, "Default datagram receive space."); 187 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW, 188 &unpsp_sendspace, 0, "Default seqpacket send space."); 189 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW, 190 &unpsp_recvspace, 0, "Default seqpacket receive space."); 191 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, 192 "File descriptors in flight."); 193 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD, 194 &unp_defers_count, 0, 195 "File descriptors deferred to taskqueue for close."); 196 197 /* 198 * Locking and synchronization: 199 * 200 * Several types of locks exist in the local domain socket implementation: 201 * - a global linkage lock 202 * - a global connection list lock 203 * - the mtxpool lock 204 * - per-unpcb mutexes 205 * 206 * The linkage lock protects the global socket lists, the generation number 207 * counter and garbage collector state. 208 * 209 * The connection list lock protects the list of referring sockets in a datagram 210 * socket PCB. This lock is also overloaded to protect a global list of 211 * sockets whose buffers contain socket references in the form of SCM_RIGHTS 212 * messages. To avoid recursion, such references are released by a dedicated 213 * thread. 214 * 215 * The mtxpool lock protects the vnode from being modified while referenced. 216 * Lock ordering rules require that it be acquired before any PCB locks. 217 * 218 * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the 219 * unpcb. This includes the unp_conn field, which either links two connected 220 * PCBs together (for connected socket types) or points at the destination 221 * socket (for connectionless socket types). The operations of creating or 222 * destroying a connection therefore involve locking multiple PCBs. To avoid 223 * lock order reversals, in some cases this involves dropping a PCB lock and 224 * using a reference counter to maintain liveness. 225 * 226 * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer, 227 * allocated in pr_attach() and freed in pr_detach(). The validity of that 228 * pointer is an invariant, so no lock is required to dereference the so_pcb 229 * pointer if a valid socket reference is held by the caller. In practice, 230 * this is always true during operations performed on a socket. Each unpcb 231 * has a back-pointer to its socket, unp_socket, which will be stable under 232 * the same circumstances. 233 * 234 * This pointer may only be safely dereferenced as long as a valid reference 235 * to the unpcb is held. Typically, this reference will be from the socket, 236 * or from another unpcb when the referring unpcb's lock is held (in order 237 * that the reference not be invalidated during use). For example, to follow 238 * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee 239 * that detach is not run clearing unp_socket. 240 * 241 * Blocking with UNIX domain sockets is a tricky issue: unlike most network 242 * protocols, bind() is a non-atomic operation, and connect() requires 243 * potential sleeping in the protocol, due to potentially waiting on local or 244 * distributed file systems. We try to separate "lookup" operations, which 245 * may sleep, and the IPC operations themselves, which typically can occur 246 * with relative atomicity as locks can be held over the entire operation. 247 * 248 * Another tricky issue is simultaneous multi-threaded or multi-process 249 * access to a single UNIX domain socket. These are handled by the flags 250 * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or 251 * binding, both of which involve dropping UNIX domain socket locks in order 252 * to perform namei() and other file system operations. 253 */ 254 static struct rwlock unp_link_rwlock; 255 static struct mtx unp_defers_lock; 256 257 #define UNP_LINK_LOCK_INIT() rw_init(&unp_link_rwlock, \ 258 "unp_link_rwlock") 259 260 #define UNP_LINK_LOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 261 RA_LOCKED) 262 #define UNP_LINK_UNLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 263 RA_UNLOCKED) 264 265 #define UNP_LINK_RLOCK() rw_rlock(&unp_link_rwlock) 266 #define UNP_LINK_RUNLOCK() rw_runlock(&unp_link_rwlock) 267 #define UNP_LINK_WLOCK() rw_wlock(&unp_link_rwlock) 268 #define UNP_LINK_WUNLOCK() rw_wunlock(&unp_link_rwlock) 269 #define UNP_LINK_WLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 270 RA_WLOCKED) 271 #define UNP_LINK_WOWNED() rw_wowned(&unp_link_rwlock) 272 273 #define UNP_DEFERRED_LOCK_INIT() mtx_init(&unp_defers_lock, \ 274 "unp_defer", NULL, MTX_DEF) 275 #define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock) 276 #define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock) 277 278 #define UNP_REF_LIST_LOCK() UNP_DEFERRED_LOCK(); 279 #define UNP_REF_LIST_UNLOCK() UNP_DEFERRED_UNLOCK(); 280 281 #define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \ 282 "unp", "unp", \ 283 MTX_DUPOK|MTX_DEF) 284 #define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx) 285 #define UNP_PCB_LOCKPTR(unp) (&(unp)->unp_mtx) 286 #define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx) 287 #define UNP_PCB_TRYLOCK(unp) mtx_trylock(&(unp)->unp_mtx) 288 #define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx) 289 #define UNP_PCB_OWNED(unp) mtx_owned(&(unp)->unp_mtx) 290 #define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED) 291 #define UNP_PCB_UNLOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED) 292 293 static int uipc_connect2(struct socket *, struct socket *); 294 static int uipc_ctloutput(struct socket *, struct sockopt *); 295 static int unp_connect(struct socket *, struct sockaddr *, 296 struct thread *); 297 static int unp_connectat(int, struct socket *, struct sockaddr *, 298 struct thread *, bool); 299 static void unp_connect2(struct socket *, struct socket *, bool); 300 static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2); 301 static void unp_dispose(struct socket *so); 302 static void unp_drop(struct unpcb *); 303 static void unp_gc(__unused void *, int); 304 static void unp_scan(struct mbuf *, void (*)(struct filedescent **, int)); 305 static void unp_discard(struct file *); 306 static void unp_freerights(struct filedescent **, int); 307 static int unp_internalize(struct mbuf *, struct mchain *, 308 struct thread *); 309 static void unp_internalize_fp(struct file *); 310 static int unp_externalize(struct mbuf *, struct mbuf **, int); 311 static int unp_externalize_fp(struct file *); 312 static void unp_addsockcred(struct thread *, struct mchain *, int); 313 static void unp_process_defers(void * __unused, int); 314 315 static void uipc_wrknl_lock(void *); 316 static void uipc_wrknl_unlock(void *); 317 static void uipc_wrknl_assert_lock(void *, int); 318 319 static void 320 unp_pcb_hold(struct unpcb *unp) 321 { 322 u_int old __unused; 323 324 old = refcount_acquire(&unp->unp_refcount); 325 KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp)); 326 } 327 328 static __result_use_check bool 329 unp_pcb_rele(struct unpcb *unp) 330 { 331 bool ret; 332 333 UNP_PCB_LOCK_ASSERT(unp); 334 335 if ((ret = refcount_release(&unp->unp_refcount))) { 336 UNP_PCB_UNLOCK(unp); 337 UNP_PCB_LOCK_DESTROY(unp); 338 uma_zfree(unp_zone, unp); 339 } 340 return (ret); 341 } 342 343 static void 344 unp_pcb_rele_notlast(struct unpcb *unp) 345 { 346 bool ret __unused; 347 348 ret = refcount_release(&unp->unp_refcount); 349 KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp)); 350 } 351 352 static void 353 unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2) 354 { 355 UNP_PCB_UNLOCK_ASSERT(unp); 356 UNP_PCB_UNLOCK_ASSERT(unp2); 357 358 if (unp == unp2) { 359 UNP_PCB_LOCK(unp); 360 } else if ((uintptr_t)unp2 > (uintptr_t)unp) { 361 UNP_PCB_LOCK(unp); 362 UNP_PCB_LOCK(unp2); 363 } else { 364 UNP_PCB_LOCK(unp2); 365 UNP_PCB_LOCK(unp); 366 } 367 } 368 369 static void 370 unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2) 371 { 372 UNP_PCB_UNLOCK(unp); 373 if (unp != unp2) 374 UNP_PCB_UNLOCK(unp2); 375 } 376 377 /* 378 * Try to lock the connected peer of an already locked socket. In some cases 379 * this requires that we unlock the current socket. The pairbusy counter is 380 * used to block concurrent connection attempts while the lock is dropped. The 381 * caller must be careful to revalidate PCB state. 382 */ 383 static struct unpcb * 384 unp_pcb_lock_peer(struct unpcb *unp) 385 { 386 struct unpcb *unp2; 387 388 UNP_PCB_LOCK_ASSERT(unp); 389 unp2 = unp->unp_conn; 390 if (unp2 == NULL) 391 return (NULL); 392 if (__predict_false(unp == unp2)) 393 return (unp); 394 395 UNP_PCB_UNLOCK_ASSERT(unp2); 396 397 if (__predict_true(UNP_PCB_TRYLOCK(unp2))) 398 return (unp2); 399 if ((uintptr_t)unp2 > (uintptr_t)unp) { 400 UNP_PCB_LOCK(unp2); 401 return (unp2); 402 } 403 unp->unp_pairbusy++; 404 unp_pcb_hold(unp2); 405 UNP_PCB_UNLOCK(unp); 406 407 UNP_PCB_LOCK(unp2); 408 UNP_PCB_LOCK(unp); 409 KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL, 410 ("%s: socket %p was reconnected", __func__, unp)); 411 if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) { 412 unp->unp_flags &= ~UNP_WAITING; 413 wakeup(unp); 414 } 415 if (unp_pcb_rele(unp2)) { 416 /* unp2 is unlocked. */ 417 return (NULL); 418 } 419 if (unp->unp_conn == NULL) { 420 UNP_PCB_UNLOCK(unp2); 421 return (NULL); 422 } 423 return (unp2); 424 } 425 426 /* 427 * Try to lock peer of our socket for purposes of sending data to it. 428 */ 429 static int 430 uipc_lock_peer(struct socket *so, struct unpcb **unp2) 431 { 432 struct unpcb *unp; 433 int error; 434 435 unp = sotounpcb(so); 436 UNP_PCB_LOCK(unp); 437 *unp2 = unp_pcb_lock_peer(unp); 438 if (__predict_false(so->so_error != 0)) { 439 error = so->so_error; 440 so->so_error = 0; 441 UNP_PCB_UNLOCK(unp); 442 if (*unp2 != NULL) 443 UNP_PCB_UNLOCK(*unp2); 444 return (error); 445 } 446 if (__predict_false(*unp2 == NULL)) { 447 /* 448 * Different error code for a previously connected socket and 449 * a never connected one. The SS_ISDISCONNECTED is set in the 450 * unp_soisdisconnected() and is synchronized by the pcb lock. 451 */ 452 error = so->so_state & SS_ISDISCONNECTED ? EPIPE : ENOTCONN; 453 UNP_PCB_UNLOCK(unp); 454 return (error); 455 } 456 UNP_PCB_UNLOCK(unp); 457 458 return (0); 459 } 460 461 static void 462 uipc_abort(struct socket *so) 463 { 464 struct unpcb *unp, *unp2; 465 466 unp = sotounpcb(so); 467 KASSERT(unp != NULL, ("uipc_abort: unp == NULL")); 468 UNP_PCB_UNLOCK_ASSERT(unp); 469 470 UNP_PCB_LOCK(unp); 471 unp2 = unp->unp_conn; 472 if (unp2 != NULL) { 473 unp_pcb_hold(unp2); 474 UNP_PCB_UNLOCK(unp); 475 unp_drop(unp2); 476 } else 477 UNP_PCB_UNLOCK(unp); 478 } 479 480 static int 481 uipc_attach(struct socket *so, int proto, struct thread *td) 482 { 483 u_long sendspace, recvspace; 484 struct unpcb *unp; 485 int error; 486 bool locked; 487 488 KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL")); 489 switch (so->so_type) { 490 case SOCK_DGRAM: 491 STAILQ_INIT(&so->so_rcv.uxdg_mb); 492 STAILQ_INIT(&so->so_snd.uxdg_mb); 493 TAILQ_INIT(&so->so_rcv.uxdg_conns); 494 /* 495 * Since send buffer is either bypassed or is a part 496 * of one-to-many receive buffer, we assign both space 497 * limits to unpdg_recvspace. 498 */ 499 sendspace = recvspace = unpdg_recvspace; 500 break; 501 502 case SOCK_STREAM: 503 sendspace = unpst_sendspace; 504 recvspace = unpst_recvspace; 505 goto common; 506 507 case SOCK_SEQPACKET: 508 sendspace = unpsp_sendspace; 509 recvspace = unpsp_recvspace; 510 common: 511 /* 512 * XXXGL: we need to initialize the mutex with MTX_DUPOK. 513 * Ideally, protocols that have PR_SOCKBUF should be 514 * responsible for mutex initialization officially, and then 515 * this uglyness with mtx_destroy(); mtx_init(); would go away. 516 */ 517 mtx_destroy(&so->so_rcv_mtx); 518 mtx_init(&so->so_rcv_mtx, "so_rcv", NULL, MTX_DEF | MTX_DUPOK); 519 knlist_init(&so->so_wrsel.si_note, so, uipc_wrknl_lock, 520 uipc_wrknl_unlock, uipc_wrknl_assert_lock); 521 STAILQ_INIT(&so->so_rcv.uxst_mbq); 522 break; 523 default: 524 panic("uipc_attach"); 525 } 526 error = soreserve(so, sendspace, recvspace); 527 if (error) 528 return (error); 529 unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO); 530 if (unp == NULL) 531 return (ENOBUFS); 532 LIST_INIT(&unp->unp_refs); 533 UNP_PCB_LOCK_INIT(unp); 534 unp->unp_socket = so; 535 so->so_pcb = unp; 536 refcount_init(&unp->unp_refcount, 1); 537 unp->unp_mode = ACCESSPERMS; 538 539 if ((locked = UNP_LINK_WOWNED()) == false) 540 UNP_LINK_WLOCK(); 541 542 unp->unp_gencnt = ++unp_gencnt; 543 unp->unp_ino = ++unp_ino; 544 unp_count++; 545 switch (so->so_type) { 546 case SOCK_STREAM: 547 LIST_INSERT_HEAD(&unp_shead, unp, unp_link); 548 break; 549 550 case SOCK_DGRAM: 551 LIST_INSERT_HEAD(&unp_dhead, unp, unp_link); 552 break; 553 554 case SOCK_SEQPACKET: 555 LIST_INSERT_HEAD(&unp_sphead, unp, unp_link); 556 break; 557 558 default: 559 panic("uipc_attach"); 560 } 561 562 if (locked == false) 563 UNP_LINK_WUNLOCK(); 564 565 return (0); 566 } 567 568 static int 569 uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) 570 { 571 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 572 struct vattr vattr; 573 int error, namelen; 574 struct nameidata nd; 575 struct unpcb *unp; 576 struct vnode *vp; 577 struct mount *mp; 578 cap_rights_t rights; 579 char *buf; 580 mode_t mode; 581 582 if (nam->sa_family != AF_UNIX) 583 return (EAFNOSUPPORT); 584 585 unp = sotounpcb(so); 586 KASSERT(unp != NULL, ("uipc_bind: unp == NULL")); 587 588 if (soun->sun_len > sizeof(struct sockaddr_un)) 589 return (EINVAL); 590 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 591 if (namelen <= 0) 592 return (EINVAL); 593 594 /* 595 * We don't allow simultaneous bind() calls on a single UNIX domain 596 * socket, so flag in-progress operations, and return an error if an 597 * operation is already in progress. 598 * 599 * Historically, we have not allowed a socket to be rebound, so this 600 * also returns an error. Not allowing re-binding simplifies the 601 * implementation and avoids a great many possible failure modes. 602 */ 603 UNP_PCB_LOCK(unp); 604 if (unp->unp_vnode != NULL) { 605 UNP_PCB_UNLOCK(unp); 606 return (EINVAL); 607 } 608 if (unp->unp_flags & UNP_BINDING) { 609 UNP_PCB_UNLOCK(unp); 610 return (EALREADY); 611 } 612 unp->unp_flags |= UNP_BINDING; 613 mode = unp->unp_mode & ~td->td_proc->p_pd->pd_cmask; 614 UNP_PCB_UNLOCK(unp); 615 616 buf = malloc(namelen + 1, M_TEMP, M_WAITOK); 617 bcopy(soun->sun_path, buf, namelen); 618 buf[namelen] = 0; 619 620 restart: 621 NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | NOCACHE, 622 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT)); 623 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 624 error = namei(&nd); 625 if (error) 626 goto error; 627 vp = nd.ni_vp; 628 if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) { 629 NDFREE_PNBUF(&nd); 630 if (nd.ni_dvp == vp) 631 vrele(nd.ni_dvp); 632 else 633 vput(nd.ni_dvp); 634 if (vp != NULL) { 635 vrele(vp); 636 error = EADDRINUSE; 637 goto error; 638 } 639 error = vn_start_write(NULL, &mp, V_XSLEEP | V_PCATCH); 640 if (error) 641 goto error; 642 goto restart; 643 } 644 VATTR_NULL(&vattr); 645 vattr.va_type = VSOCK; 646 vattr.va_mode = mode; 647 #ifdef MAC 648 error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd, 649 &vattr); 650 #endif 651 if (error == 0) { 652 /* 653 * The prior lookup may have left LK_SHARED in cn_lkflags, 654 * and VOP_CREATE technically only requires the new vnode to 655 * be locked shared. Most filesystems will return the new vnode 656 * locked exclusive regardless, but we should explicitly 657 * specify that here since we require it and assert to that 658 * effect below. 659 */ 660 nd.ni_cnd.cn_lkflags = (nd.ni_cnd.cn_lkflags & ~LK_SHARED) | 661 LK_EXCLUSIVE; 662 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 663 } 664 NDFREE_PNBUF(&nd); 665 if (error) { 666 VOP_VPUT_PAIR(nd.ni_dvp, NULL, true); 667 vn_finished_write(mp); 668 if (error == ERELOOKUP) 669 goto restart; 670 goto error; 671 } 672 vp = nd.ni_vp; 673 ASSERT_VOP_ELOCKED(vp, "uipc_bind"); 674 soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK); 675 676 UNP_PCB_LOCK(unp); 677 VOP_UNP_BIND(vp, unp); 678 unp->unp_vnode = vp; 679 unp->unp_addr = soun; 680 unp->unp_flags &= ~UNP_BINDING; 681 UNP_PCB_UNLOCK(unp); 682 vref(vp); 683 VOP_VPUT_PAIR(nd.ni_dvp, &vp, true); 684 vn_finished_write(mp); 685 free(buf, M_TEMP); 686 return (0); 687 688 error: 689 UNP_PCB_LOCK(unp); 690 unp->unp_flags &= ~UNP_BINDING; 691 UNP_PCB_UNLOCK(unp); 692 free(buf, M_TEMP); 693 return (error); 694 } 695 696 static int 697 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 698 { 699 700 return (uipc_bindat(AT_FDCWD, so, nam, td)); 701 } 702 703 static int 704 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 705 { 706 int error; 707 708 KASSERT(td == curthread, ("uipc_connect: td != curthread")); 709 error = unp_connect(so, nam, td); 710 return (error); 711 } 712 713 static int 714 uipc_connectat(int fd, struct socket *so, struct sockaddr *nam, 715 struct thread *td) 716 { 717 int error; 718 719 KASSERT(td == curthread, ("uipc_connectat: td != curthread")); 720 error = unp_connectat(fd, so, nam, td, false); 721 return (error); 722 } 723 724 static void 725 uipc_close(struct socket *so) 726 { 727 struct unpcb *unp, *unp2; 728 struct vnode *vp = NULL; 729 struct mtx *vplock; 730 731 unp = sotounpcb(so); 732 KASSERT(unp != NULL, ("uipc_close: unp == NULL")); 733 734 vplock = NULL; 735 if ((vp = unp->unp_vnode) != NULL) { 736 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 737 mtx_lock(vplock); 738 } 739 UNP_PCB_LOCK(unp); 740 if (vp && unp->unp_vnode == NULL) { 741 mtx_unlock(vplock); 742 vp = NULL; 743 } 744 if (vp != NULL) { 745 VOP_UNP_DETACH(vp); 746 unp->unp_vnode = NULL; 747 } 748 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 749 unp_disconnect(unp, unp2); 750 else 751 UNP_PCB_UNLOCK(unp); 752 if (vp) { 753 mtx_unlock(vplock); 754 vrele(vp); 755 } 756 } 757 758 static int 759 uipc_chmod(struct socket *so, mode_t mode, struct ucred *cred __unused, 760 struct thread *td __unused) 761 { 762 struct unpcb *unp; 763 int error; 764 765 if ((mode & ~ACCESSPERMS) != 0) 766 return (EINVAL); 767 768 error = 0; 769 unp = sotounpcb(so); 770 UNP_PCB_LOCK(unp); 771 if (unp->unp_vnode != NULL || (unp->unp_flags & UNP_BINDING) != 0) 772 error = EINVAL; 773 else 774 unp->unp_mode = mode; 775 UNP_PCB_UNLOCK(unp); 776 return (error); 777 } 778 779 static int 780 uipc_connect2(struct socket *so1, struct socket *so2) 781 { 782 struct unpcb *unp, *unp2; 783 784 if (so1->so_type != so2->so_type) 785 return (EPROTOTYPE); 786 787 unp = so1->so_pcb; 788 KASSERT(unp != NULL, ("uipc_connect2: unp == NULL")); 789 unp2 = so2->so_pcb; 790 KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL")); 791 unp_pcb_lock_pair(unp, unp2); 792 unp_connect2(so1, so2, false); 793 unp_pcb_unlock_pair(unp, unp2); 794 795 return (0); 796 } 797 798 static void 799 uipc_detach(struct socket *so) 800 { 801 struct unpcb *unp, *unp2; 802 struct mtx *vplock; 803 struct vnode *vp; 804 int local_unp_rights; 805 806 unp = sotounpcb(so); 807 KASSERT(unp != NULL, ("uipc_detach: unp == NULL")); 808 809 vp = NULL; 810 vplock = NULL; 811 812 if (!SOLISTENING(so)) 813 unp_dispose(so); 814 815 UNP_LINK_WLOCK(); 816 LIST_REMOVE(unp, unp_link); 817 if (unp->unp_gcflag & UNPGC_DEAD) 818 LIST_REMOVE(unp, unp_dead); 819 unp->unp_gencnt = ++unp_gencnt; 820 --unp_count; 821 UNP_LINK_WUNLOCK(); 822 823 UNP_PCB_UNLOCK_ASSERT(unp); 824 restart: 825 if ((vp = unp->unp_vnode) != NULL) { 826 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 827 mtx_lock(vplock); 828 } 829 UNP_PCB_LOCK(unp); 830 if (unp->unp_vnode != vp && unp->unp_vnode != NULL) { 831 if (vplock) 832 mtx_unlock(vplock); 833 UNP_PCB_UNLOCK(unp); 834 goto restart; 835 } 836 if ((vp = unp->unp_vnode) != NULL) { 837 VOP_UNP_DETACH(vp); 838 unp->unp_vnode = NULL; 839 } 840 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 841 unp_disconnect(unp, unp2); 842 else 843 UNP_PCB_UNLOCK(unp); 844 845 UNP_REF_LIST_LOCK(); 846 while (!LIST_EMPTY(&unp->unp_refs)) { 847 struct unpcb *ref = LIST_FIRST(&unp->unp_refs); 848 849 unp_pcb_hold(ref); 850 UNP_REF_LIST_UNLOCK(); 851 852 MPASS(ref != unp); 853 UNP_PCB_UNLOCK_ASSERT(ref); 854 unp_drop(ref); 855 UNP_REF_LIST_LOCK(); 856 } 857 UNP_REF_LIST_UNLOCK(); 858 859 UNP_PCB_LOCK(unp); 860 local_unp_rights = unp_rights; 861 unp->unp_socket->so_pcb = NULL; 862 unp->unp_socket = NULL; 863 free(unp->unp_addr, M_SONAME); 864 unp->unp_addr = NULL; 865 if (!unp_pcb_rele(unp)) 866 UNP_PCB_UNLOCK(unp); 867 if (vp) { 868 mtx_unlock(vplock); 869 vrele(vp); 870 } 871 if (local_unp_rights) 872 taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1); 873 874 switch (so->so_type) { 875 case SOCK_STREAM: 876 case SOCK_SEQPACKET: 877 MPASS(SOLISTENING(so) || (STAILQ_EMPTY(&so->so_rcv.uxst_mbq) && 878 so->so_rcv.uxst_peer == NULL)); 879 break; 880 case SOCK_DGRAM: 881 /* 882 * Everything should have been unlinked/freed by unp_dispose() 883 * and/or unp_disconnect(). 884 */ 885 MPASS(so->so_rcv.uxdg_peeked == NULL); 886 MPASS(STAILQ_EMPTY(&so->so_rcv.uxdg_mb)); 887 MPASS(TAILQ_EMPTY(&so->so_rcv.uxdg_conns)); 888 MPASS(STAILQ_EMPTY(&so->so_snd.uxdg_mb)); 889 } 890 } 891 892 static int 893 uipc_disconnect(struct socket *so) 894 { 895 struct unpcb *unp, *unp2; 896 897 unp = sotounpcb(so); 898 KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL")); 899 900 UNP_PCB_LOCK(unp); 901 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 902 unp_disconnect(unp, unp2); 903 else 904 UNP_PCB_UNLOCK(unp); 905 return (0); 906 } 907 908 static int 909 uipc_listen(struct socket *so, int backlog, struct thread *td) 910 { 911 struct unpcb *unp; 912 int error; 913 914 MPASS(so->so_type != SOCK_DGRAM); 915 916 /* 917 * Synchronize with concurrent connection attempts. 918 */ 919 error = 0; 920 unp = sotounpcb(so); 921 UNP_PCB_LOCK(unp); 922 if (unp->unp_conn != NULL || (unp->unp_flags & UNP_CONNECTING) != 0) 923 error = EINVAL; 924 else if (unp->unp_vnode == NULL) 925 error = EDESTADDRREQ; 926 if (error != 0) { 927 UNP_PCB_UNLOCK(unp); 928 return (error); 929 } 930 931 SOCK_LOCK(so); 932 error = solisten_proto_check(so); 933 if (error == 0) { 934 cru2xt(td, &unp->unp_peercred); 935 if (!SOLISTENING(so)) { 936 (void)chgsbsize(so->so_cred->cr_uidinfo, 937 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 938 (void)chgsbsize(so->so_cred->cr_uidinfo, 939 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 940 } 941 solisten_proto(so, backlog); 942 } 943 SOCK_UNLOCK(so); 944 UNP_PCB_UNLOCK(unp); 945 return (error); 946 } 947 948 static int 949 uipc_peeraddr(struct socket *so, struct sockaddr *ret) 950 { 951 struct unpcb *unp, *unp2; 952 const struct sockaddr *sa; 953 954 unp = sotounpcb(so); 955 KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL")); 956 957 UNP_PCB_LOCK(unp); 958 unp2 = unp_pcb_lock_peer(unp); 959 if (unp2 != NULL) { 960 if (unp2->unp_addr != NULL) 961 sa = (struct sockaddr *)unp2->unp_addr; 962 else 963 sa = &sun_noname; 964 bcopy(sa, ret, sa->sa_len); 965 unp_pcb_unlock_pair(unp, unp2); 966 } else { 967 UNP_PCB_UNLOCK(unp); 968 sa = &sun_noname; 969 bcopy(sa, ret, sa->sa_len); 970 } 971 return (0); 972 } 973 974 /* 975 * pr_sosend() called with mbuf instead of uio is a kernel thread. NFS, 976 * netgraph(4) and other subsystems can call into socket code. The 977 * function will condition the mbuf so that it can be safely put onto socket 978 * buffer and calculate its char count and mbuf count. 979 * 980 * Note: we don't support receiving control data from a kernel thread. Our 981 * pr_sosend methods have MPASS() to check that. This may change. 982 */ 983 static void 984 uipc_reset_kernel_mbuf(struct mbuf *m, struct mchain *mc) 985 { 986 987 M_ASSERTPKTHDR(m); 988 989 m_clrprotoflags(m); 990 m_tag_delete_chain(m, NULL); 991 m->m_pkthdr.rcvif = NULL; 992 m->m_pkthdr.flowid = 0; 993 m->m_pkthdr.csum_flags = 0; 994 m->m_pkthdr.fibnum = 0; 995 m->m_pkthdr.rsstype = 0; 996 997 mc_init_m(mc, m); 998 MPASS(m->m_pkthdr.len == mc->mc_len); 999 } 1000 1001 #ifdef SOCKBUF_DEBUG 1002 static inline void 1003 uipc_stream_sbcheck(struct sockbuf *sb) 1004 { 1005 struct mbuf *d; 1006 u_int dacc, dccc, dctl, dmbcnt; 1007 bool notready = false; 1008 1009 dacc = dccc = dctl = dmbcnt = 0; 1010 STAILQ_FOREACH(d, &sb->uxst_mbq, m_stailq) { 1011 if (d == sb->uxst_fnrdy) { 1012 MPASS(d->m_flags & M_NOTREADY); 1013 notready = true; 1014 } 1015 if (d->m_type == MT_CONTROL) 1016 dctl += d->m_len; 1017 else if (d->m_type == MT_DATA) { 1018 dccc += d->m_len; 1019 if (!notready) 1020 dacc += d->m_len; 1021 } else 1022 MPASS(0); 1023 dmbcnt += MSIZE; 1024 if (d->m_flags & M_EXT) 1025 dmbcnt += d->m_ext.ext_size; 1026 if (d->m_stailq.stqe_next == NULL) 1027 MPASS(sb->uxst_mbq.stqh_last == &d->m_stailq.stqe_next); 1028 } 1029 MPASS(sb->uxst_fnrdy == NULL || notready); 1030 MPASS(dacc == sb->sb_acc); 1031 MPASS(dccc == sb->sb_ccc); 1032 MPASS(dctl == sb->sb_ctl); 1033 MPASS(dmbcnt == sb->sb_mbcnt); 1034 (void)STAILQ_EMPTY(&sb->uxst_mbq); 1035 } 1036 #define UIPC_STREAM_SBCHECK(sb) uipc_stream_sbcheck(sb) 1037 #else 1038 #define UIPC_STREAM_SBCHECK(sb) do {} while (0) 1039 #endif 1040 1041 /* 1042 * uipc_stream_sbspace() returns how much a writer can send, limited by char 1043 * count or mbuf memory use, whatever ends first. 1044 * 1045 * An obvious and legitimate reason for a socket having more data than allowed, 1046 * is lowering the limit with setsockopt(SO_RCVBUF) on already full buffer. 1047 * Also, sb_mbcnt may overcommit sb_mbmax in case if previous write observed 1048 * 'space < mbspace', but mchain allocated to hold 'space' bytes of data ended 1049 * up with 'mc_mlen > mbspace'. A typical scenario would be a full buffer with 1050 * writer trying to push in a large write, and a slow reader, that reads just 1051 * a few bytes at a time. In that case writer will keep creating new mbufs 1052 * with mc_split(). These mbufs will carry little chars, but will all point at 1053 * the same cluster, thus each adding cluster size to sb_mbcnt. This means we 1054 * will count same cluster many times potentially underutilizing socket buffer. 1055 * We aren't optimizing towards ineffective readers. Classic socket buffer had 1056 * the same "feature". 1057 */ 1058 static inline u_int 1059 uipc_stream_sbspace(struct sockbuf *sb) 1060 { 1061 u_int space, mbspace; 1062 1063 if (__predict_true(sb->sb_hiwat >= sb->sb_ccc + sb->sb_ctl)) 1064 space = sb->sb_hiwat - sb->sb_ccc - sb->sb_ctl; 1065 else 1066 return (0); 1067 if (__predict_true(sb->sb_mbmax >= sb->sb_mbcnt)) 1068 mbspace = sb->sb_mbmax - sb->sb_mbcnt; 1069 else 1070 return (0); 1071 1072 return (min(space, mbspace)); 1073 } 1074 1075 static int 1076 uipc_sosend_stream_or_seqpacket(struct socket *so, struct sockaddr *addr, 1077 struct uio *uio0, struct mbuf *m, struct mbuf *c, int flags, 1078 struct thread *td) 1079 { 1080 struct unpcb *unp2; 1081 struct socket *so2; 1082 struct sockbuf *sb; 1083 struct uio *uio; 1084 struct mchain mc, cmc; 1085 size_t resid, sent; 1086 bool nonblock, eor, aio; 1087 int error; 1088 1089 MPASS((uio0 != NULL && m == NULL) || (m != NULL && uio0 == NULL)); 1090 MPASS(m == NULL || c == NULL); 1091 1092 if (__predict_false(flags & MSG_OOB)) 1093 return (EOPNOTSUPP); 1094 1095 nonblock = (so->so_state & SS_NBIO) || 1096 (flags & (MSG_DONTWAIT | MSG_NBIO)); 1097 eor = flags & MSG_EOR; 1098 1099 mc = MCHAIN_INITIALIZER(&mc); 1100 cmc = MCHAIN_INITIALIZER(&cmc); 1101 sent = 0; 1102 aio = false; 1103 1104 if (m == NULL) { 1105 if (c != NULL && (error = unp_internalize(c, &cmc, td))) 1106 goto out; 1107 /* 1108 * This function may read more data from the uio than it would 1109 * then place on socket. That would leave uio inconsistent 1110 * upon return. Normally uio is allocated on the stack of the 1111 * syscall thread and we don't care about leaving it consistent. 1112 * However, aio(9) will allocate a uio as part of job and will 1113 * use it to track progress. We detect aio(9) checking the 1114 * SB_AIO_RUNNING flag. It is safe to check it without lock 1115 * cause it is set and cleared in the same taskqueue thread. 1116 * 1117 * This check can also produce a false positive: there is 1118 * aio(9) job and also there is a syscall we are serving now. 1119 * No sane software does that, it would leave to a mess in 1120 * the socket buffer, as aio(9) doesn't grab the I/O sx(9). 1121 * But syzkaller can create this mess. For such false positive 1122 * our goal is just don't panic or leak memory. 1123 */ 1124 if (__predict_false(so->so_snd.sb_flags & SB_AIO_RUNNING)) { 1125 uio = cloneuio(uio0); 1126 aio = true; 1127 } else { 1128 uio = uio0; 1129 resid = uio->uio_resid; 1130 } 1131 /* 1132 * Optimization for a case when our send fits into the receive 1133 * buffer - do the copyin before taking any locks, sized to our 1134 * send buffer. Later copyins will also take into account 1135 * space in the peer's receive buffer. 1136 */ 1137 error = mc_uiotomc(&mc, uio, so->so_snd.sb_hiwat, 0, M_WAITOK, 1138 eor ? M_EOR : 0); 1139 if (__predict_false(error)) 1140 goto out2; 1141 } else 1142 uipc_reset_kernel_mbuf(m, &mc); 1143 1144 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); 1145 if (error) 1146 goto out2; 1147 1148 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 1149 goto out3; 1150 1151 if (unp2->unp_flags & UNP_WANTCRED_MASK) { 1152 /* 1153 * Credentials are passed only once on SOCK_STREAM and 1154 * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or 1155 * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS). 1156 */ 1157 unp_addsockcred(td, &cmc, unp2->unp_flags); 1158 unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT; 1159 } 1160 1161 /* 1162 * Cycle through the data to send and available space in the peer's 1163 * receive buffer. Put a reference on the peer socket, so that it 1164 * doesn't get freed while we sbwait(). If peer goes away, we will 1165 * observe the SBS_CANTRCVMORE and our sorele() will finalize peer's 1166 * socket destruction. 1167 */ 1168 so2 = unp2->unp_socket; 1169 soref(so2); 1170 UNP_PCB_UNLOCK(unp2); 1171 sb = &so2->so_rcv; 1172 while (mc.mc_len + cmc.mc_len > 0) { 1173 struct mchain mcnext = MCHAIN_INITIALIZER(&mcnext); 1174 u_int space; 1175 1176 SOCK_RECVBUF_LOCK(so2); 1177 restart: 1178 UIPC_STREAM_SBCHECK(sb); 1179 if (__predict_false(cmc.mc_len > sb->sb_hiwat)) { 1180 SOCK_RECVBUF_UNLOCK(so2); 1181 error = EMSGSIZE; 1182 goto out4; 1183 } 1184 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1185 SOCK_RECVBUF_UNLOCK(so2); 1186 error = EPIPE; 1187 goto out4; 1188 } 1189 /* 1190 * Wait on the peer socket receive buffer until we have enough 1191 * space to put at least control. The data is a stream and can 1192 * be put partially, but control is really a datagram. 1193 */ 1194 space = uipc_stream_sbspace(sb); 1195 if (space < sb->sb_lowat || space < cmc.mc_len) { 1196 if (nonblock) { 1197 if (aio) 1198 sb->uxst_flags |= UXST_PEER_AIO; 1199 SOCK_RECVBUF_UNLOCK(so2); 1200 if (aio) { 1201 SOCK_SENDBUF_LOCK(so); 1202 so->so_snd.sb_ccc = 1203 so->so_snd.sb_hiwat - space; 1204 SOCK_SENDBUF_UNLOCK(so); 1205 } 1206 error = EWOULDBLOCK; 1207 goto out4; 1208 } 1209 if ((error = sbwait(so2, SO_RCV)) != 0) { 1210 SOCK_RECVBUF_UNLOCK(so2); 1211 goto out4; 1212 } else 1213 goto restart; 1214 } 1215 MPASS(space >= cmc.mc_len); 1216 space -= cmc.mc_len; 1217 if (space == 0) { 1218 /* There is space only to send control. */ 1219 MPASS(!STAILQ_EMPTY(&cmc.mc_q)); 1220 mcnext = mc; 1221 mc = MCHAIN_INITIALIZER(&mc); 1222 } else if (space < mc.mc_len) { 1223 /* Not enough space. */ 1224 if (__predict_false(mc_split(&mc, &mcnext, space, 1225 M_NOWAIT) == ENOMEM)) { 1226 /* 1227 * If allocation failed use M_WAITOK and merge 1228 * the chain back. Next time mc_split() will 1229 * easily split at the same place. Only if we 1230 * race with setsockopt(SO_RCVBUF) shrinking 1231 * sb_hiwat can this happen more than once. 1232 */ 1233 SOCK_RECVBUF_UNLOCK(so2); 1234 (void)mc_split(&mc, &mcnext, space, M_WAITOK); 1235 mc_concat(&mc, &mcnext); 1236 SOCK_RECVBUF_LOCK(so2); 1237 goto restart; 1238 } 1239 MPASS(mc.mc_len == space); 1240 } 1241 if (!STAILQ_EMPTY(&cmc.mc_q)) { 1242 STAILQ_CONCAT(&sb->uxst_mbq, &cmc.mc_q); 1243 sb->sb_ctl += cmc.mc_len; 1244 sb->sb_mbcnt += cmc.mc_mlen; 1245 cmc.mc_len = 0; 1246 } 1247 sent += mc.mc_len; 1248 if (sb->uxst_fnrdy == NULL) 1249 sb->sb_acc += mc.mc_len; 1250 sb->sb_ccc += mc.mc_len; 1251 sb->sb_mbcnt += mc.mc_mlen; 1252 STAILQ_CONCAT(&sb->uxst_mbq, &mc.mc_q); 1253 UIPC_STREAM_SBCHECK(sb); 1254 space = uipc_stream_sbspace(sb); 1255 sorwakeup_locked(so2); 1256 if (!STAILQ_EMPTY(&mcnext.mc_q)) { 1257 /* 1258 * Such assignment is unsafe in general, but it is 1259 * safe with !STAILQ_EMPTY(&mcnext.mc_q). In C++ we 1260 * could reload = for STAILQs :) 1261 */ 1262 mc = mcnext; 1263 } else if (uio != NULL && uio->uio_resid > 0) { 1264 /* 1265 * Copyin sum of peer's receive buffer space and our 1266 * sb_hiwat, which is our virtual send buffer size. 1267 * See comment above unpst_sendspace declaration. 1268 * We are reading sb_hiwat locklessly, cause a) we 1269 * don't care about an application that does send(2) 1270 * and setsockopt(2) racing internally, and for an 1271 * application that does this in sequence we will see 1272 * the correct value cause sbsetopt() uses buffer lock 1273 * and we also have already acquired it at least once. 1274 */ 1275 error = mc_uiotomc(&mc, uio, space + 1276 atomic_load_int(&so->so_snd.sb_hiwat), 0, M_WAITOK, 1277 eor ? M_EOR : 0); 1278 if (__predict_false(error)) 1279 goto out4; 1280 } else 1281 mc = MCHAIN_INITIALIZER(&mc); 1282 } 1283 1284 MPASS(STAILQ_EMPTY(&mc.mc_q)); 1285 1286 td->td_ru.ru_msgsnd++; 1287 out4: 1288 sorele(so2); 1289 out3: 1290 SOCK_IO_SEND_UNLOCK(so); 1291 out2: 1292 if (aio) { 1293 freeuio(uio); 1294 uioadvance(uio0, sent); 1295 } else if (uio != NULL) 1296 uio->uio_resid = resid - sent; 1297 if (!mc_empty(&cmc)) 1298 unp_scan(mc_first(&cmc), unp_freerights); 1299 out: 1300 mc_freem(&mc); 1301 mc_freem(&cmc); 1302 1303 return (error); 1304 } 1305 1306 /* 1307 * Our version of sowakeup(), used by recv(2) and shutdown(2). 1308 * 1309 * @param so Points to a connected stream socket with receive buffer locked 1310 * 1311 * In a blocking mode peer is sleeping on our receive buffer, and we need just 1312 * wakeup(9) on it. But to wake up various event engines, we need to reach 1313 * over to peer's selinfo. This can be safely done as the socket buffer 1314 * receive lock is protecting us from the peer going away. 1315 */ 1316 static void 1317 uipc_wakeup(struct socket *so) 1318 { 1319 struct sockbuf *sb = &so->so_rcv; 1320 struct selinfo *sel; 1321 1322 SOCK_RECVBUF_LOCK_ASSERT(so); 1323 MPASS(sb->uxst_peer != NULL); 1324 1325 sel = &sb->uxst_peer->so_wrsel; 1326 1327 if (sb->uxst_flags & UXST_PEER_SEL) { 1328 selwakeuppri(sel, PSOCK); 1329 /* 1330 * XXXGL: sowakeup() does SEL_WAITING() without locks. 1331 */ 1332 if (!SEL_WAITING(sel)) 1333 sb->uxst_flags &= ~UXST_PEER_SEL; 1334 } 1335 if (sb->sb_flags & SB_WAIT) { 1336 sb->sb_flags &= ~SB_WAIT; 1337 wakeup(&sb->sb_acc); 1338 } 1339 KNOTE_LOCKED(&sel->si_note, 0); 1340 SOCK_RECVBUF_UNLOCK(so); 1341 } 1342 1343 static void 1344 uipc_cantrcvmore(struct socket *so) 1345 { 1346 1347 SOCK_RECVBUF_LOCK(so); 1348 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 1349 if (so->so_rcv.uxst_peer != NULL) 1350 uipc_wakeup(so); 1351 else 1352 SOCK_RECVBUF_UNLOCK(so); 1353 } 1354 1355 static int 1356 uipc_soreceive_stream_or_seqpacket(struct socket *so, struct sockaddr **psa, 1357 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1358 { 1359 struct sockbuf *sb = &so->so_rcv; 1360 struct mbuf *control, *m, *first, *last, *next; 1361 u_int ctl, space, datalen, mbcnt, lastlen; 1362 int error, flags; 1363 bool nonblock, waitall, peek; 1364 1365 MPASS(mp0 == NULL); 1366 1367 if (psa != NULL) 1368 *psa = NULL; 1369 if (controlp != NULL) 1370 *controlp = NULL; 1371 1372 flags = flagsp != NULL ? *flagsp : 0; 1373 nonblock = (so->so_state & SS_NBIO) || 1374 (flags & (MSG_DONTWAIT | MSG_NBIO)); 1375 peek = flags & MSG_PEEK; 1376 waitall = (flags & MSG_WAITALL) && !peek; 1377 1378 /* 1379 * This check may fail only on a socket that never went through 1380 * connect(2). We can check this locklessly, cause: a) for a new born 1381 * socket we don't care about applications that may race internally 1382 * between connect(2) and recv(2), and b) for a dying socket if we 1383 * miss update by unp_sosidisconnected(), we would still get the check 1384 * correct. For dying socket we would observe SBS_CANTRCVMORE later. 1385 */ 1386 if (__predict_false((atomic_load_short(&so->so_state) & 1387 (SS_ISCONNECTED|SS_ISDISCONNECTED)) == 0)) 1388 return (ENOTCONN); 1389 1390 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); 1391 if (__predict_false(error)) 1392 return (error); 1393 1394 restart: 1395 SOCK_RECVBUF_LOCK(so); 1396 UIPC_STREAM_SBCHECK(sb); 1397 while (sb->sb_acc < sb->sb_lowat && 1398 (sb->sb_ctl == 0 || controlp == NULL)) { 1399 if (so->so_error) { 1400 error = so->so_error; 1401 if (!peek) 1402 so->so_error = 0; 1403 SOCK_RECVBUF_UNLOCK(so); 1404 SOCK_IO_RECV_UNLOCK(so); 1405 return (error); 1406 } 1407 if (sb->sb_state & SBS_CANTRCVMORE) { 1408 SOCK_RECVBUF_UNLOCK(so); 1409 SOCK_IO_RECV_UNLOCK(so); 1410 return (0); 1411 } 1412 if (nonblock) { 1413 SOCK_RECVBUF_UNLOCK(so); 1414 SOCK_IO_RECV_UNLOCK(so); 1415 return (EWOULDBLOCK); 1416 } 1417 error = sbwait(so, SO_RCV); 1418 if (error) { 1419 SOCK_RECVBUF_UNLOCK(so); 1420 SOCK_IO_RECV_UNLOCK(so); 1421 return (error); 1422 } 1423 } 1424 1425 MPASS(STAILQ_FIRST(&sb->uxst_mbq)); 1426 MPASS(sb->sb_acc > 0 || sb->sb_ctl > 0); 1427 1428 mbcnt = 0; 1429 ctl = 0; 1430 first = STAILQ_FIRST(&sb->uxst_mbq); 1431 if (first->m_type == MT_CONTROL) { 1432 control = first; 1433 STAILQ_FOREACH_FROM(first, &sb->uxst_mbq, m_stailq) { 1434 if (first->m_type != MT_CONTROL) 1435 break; 1436 ctl += first->m_len; 1437 mbcnt += MSIZE; 1438 if (first->m_flags & M_EXT) 1439 mbcnt += first->m_ext.ext_size; 1440 } 1441 } else 1442 control = NULL; 1443 1444 /* 1445 * Find split point for the next copyout. On exit from the loop: 1446 * last == NULL - socket to be flushed 1447 * last != NULL 1448 * lastlen > last->m_len - uio to be filled, last to be adjusted 1449 * lastlen == 0 - MT_CONTROL, M_EOR or M_NOTREADY encountered 1450 */ 1451 space = uio->uio_resid; 1452 datalen = 0; 1453 for (m = first, last = sb->uxst_fnrdy, lastlen = 0; 1454 m != sb->uxst_fnrdy; 1455 m = STAILQ_NEXT(m, m_stailq)) { 1456 if (m->m_type != MT_DATA) { 1457 last = m; 1458 lastlen = 0; 1459 break; 1460 } 1461 if (space >= m->m_len) { 1462 space -= m->m_len; 1463 datalen += m->m_len; 1464 mbcnt += MSIZE; 1465 if (m->m_flags & M_EXT) 1466 mbcnt += m->m_ext.ext_size; 1467 if (m->m_flags & M_EOR) { 1468 last = STAILQ_NEXT(m, m_stailq); 1469 lastlen = 0; 1470 flags |= MSG_EOR; 1471 break; 1472 } 1473 } else { 1474 datalen += space; 1475 last = m; 1476 lastlen = space; 1477 break; 1478 } 1479 } 1480 1481 UIPC_STREAM_SBCHECK(sb); 1482 if (!peek) { 1483 if (last == NULL) 1484 STAILQ_INIT(&sb->uxst_mbq); 1485 else { 1486 STAILQ_FIRST(&sb->uxst_mbq) = last; 1487 MPASS(last->m_len > lastlen); 1488 last->m_len -= lastlen; 1489 last->m_data += lastlen; 1490 } 1491 MPASS(sb->sb_acc >= datalen); 1492 sb->sb_acc -= datalen; 1493 sb->sb_ccc -= datalen; 1494 MPASS(sb->sb_ctl >= ctl); 1495 sb->sb_ctl -= ctl; 1496 MPASS(sb->sb_mbcnt >= mbcnt); 1497 sb->sb_mbcnt -= mbcnt; 1498 UIPC_STREAM_SBCHECK(sb); 1499 if (__predict_true(sb->uxst_peer != NULL)) { 1500 struct unpcb *unp2; 1501 bool aio; 1502 1503 if ((aio = sb->uxst_flags & UXST_PEER_AIO)) 1504 sb->uxst_flags &= ~UXST_PEER_AIO; 1505 1506 uipc_wakeup(so); 1507 /* 1508 * XXXGL: need to go through uipc_lock_peer() after 1509 * the receive buffer lock dropped, it was protecting 1510 * us from unp_soisdisconnected(). The aio workarounds 1511 * should be refactored to the aio(4) side. 1512 */ 1513 if (aio && uipc_lock_peer(so, &unp2) == 0) { 1514 struct socket *so2 = unp2->unp_socket; 1515 1516 SOCK_SENDBUF_LOCK(so2); 1517 so2->so_snd.sb_ccc -= datalen; 1518 sowakeup_aio(so2, SO_SND); 1519 SOCK_SENDBUF_UNLOCK(so2); 1520 UNP_PCB_UNLOCK(unp2); 1521 } 1522 } else 1523 SOCK_RECVBUF_UNLOCK(so); 1524 } else 1525 SOCK_RECVBUF_UNLOCK(so); 1526 1527 while (control != NULL && control->m_type == MT_CONTROL) { 1528 if (!peek) { 1529 struct mbuf *c; 1530 1531 /* 1532 * unp_externalize() failure must abort entire read(2). 1533 * Such failure should also free the problematic 1534 * control, but link back the remaining data to the head 1535 * of the buffer, so that socket is not left in a state 1536 * where it can't progress forward with reading. 1537 * Probability of such a failure is really low, so it 1538 * is fine that we need to perform pretty complex 1539 * operation here to reconstruct the buffer. 1540 * XXXGL: unp_externalize() used to be 1541 * dom_externalize() KBI and it frees whole chain, so 1542 * we need to feed it with mbufs one by one. 1543 */ 1544 c = control; 1545 control = STAILQ_NEXT(c, m_stailq); 1546 STAILQ_NEXT(c, m_stailq) = NULL; 1547 error = unp_externalize(c, controlp, flags); 1548 if (__predict_false(error && control != NULL)) { 1549 struct mchain cmc; 1550 1551 mc_init_m(&cmc, control); 1552 1553 SOCK_RECVBUF_LOCK(so); 1554 MPASS(!(sb->sb_state & SBS_CANTRCVMORE)); 1555 1556 if (__predict_false(cmc.mc_len + sb->sb_ccc + 1557 sb->sb_ctl > sb->sb_hiwat)) { 1558 /* 1559 * Too bad, while unp_externalize() was 1560 * failing, the other side had filled 1561 * the buffer and we can't prepend data 1562 * back. Losing data! 1563 */ 1564 SOCK_RECVBUF_UNLOCK(so); 1565 SOCK_IO_RECV_UNLOCK(so); 1566 unp_scan(mc_first(&cmc), 1567 unp_freerights); 1568 mc_freem(&cmc); 1569 return (error); 1570 } 1571 1572 UIPC_STREAM_SBCHECK(sb); 1573 /* XXXGL: STAILQ_PREPEND */ 1574 STAILQ_CONCAT(&cmc.mc_q, &sb->uxst_mbq); 1575 STAILQ_SWAP(&cmc.mc_q, &sb->uxst_mbq, mbuf); 1576 1577 sb->sb_ctl = sb->sb_acc = sb->sb_ccc = 1578 sb->sb_mbcnt = 0; 1579 STAILQ_FOREACH(m, &sb->uxst_mbq, m_stailq) { 1580 if (m->m_type == MT_DATA) { 1581 sb->sb_acc += m->m_len; 1582 sb->sb_ccc += m->m_len; 1583 } else { 1584 sb->sb_ctl += m->m_len; 1585 } 1586 sb->sb_mbcnt += MSIZE; 1587 if (m->m_flags & M_EXT) 1588 sb->sb_mbcnt += 1589 m->m_ext.ext_size; 1590 } 1591 UIPC_STREAM_SBCHECK(sb); 1592 SOCK_RECVBUF_UNLOCK(so); 1593 SOCK_IO_RECV_UNLOCK(so); 1594 return (error); 1595 } 1596 if (controlp != NULL) { 1597 while (*controlp != NULL) 1598 controlp = &(*controlp)->m_next; 1599 } 1600 } else { 1601 /* 1602 * XXXGL 1603 * 1604 * In MSG_PEEK case control is not externalized. This 1605 * means we are leaking some kernel pointers to the 1606 * userland. They are useless to a law-abiding 1607 * application, but may be useful to a malware. This 1608 * is what the historical implementation in the 1609 * soreceive_generic() did. To be improved? 1610 */ 1611 if (controlp != NULL) { 1612 *controlp = m_copym(control, 0, control->m_len, 1613 M_WAITOK); 1614 controlp = &(*controlp)->m_next; 1615 } 1616 control = STAILQ_NEXT(control, m_stailq); 1617 } 1618 } 1619 1620 for (m = first; m != last; m = next) { 1621 next = STAILQ_NEXT(m, m_stailq); 1622 error = uiomove(mtod(m, char *), m->m_len, uio); 1623 if (__predict_false(error)) { 1624 SOCK_IO_RECV_UNLOCK(so); 1625 if (!peek) 1626 for (; m != last; m = next) { 1627 next = STAILQ_NEXT(m, m_stailq); 1628 m_free(m); 1629 } 1630 return (error); 1631 } 1632 if (!peek) 1633 m_free(m); 1634 } 1635 if (last != NULL && lastlen > 0) { 1636 if (!peek) { 1637 MPASS(!(m->m_flags & M_PKTHDR)); 1638 MPASS(last->m_data - M_START(last) >= lastlen); 1639 error = uiomove(mtod(last, char *) - lastlen, 1640 lastlen, uio); 1641 } else 1642 error = uiomove(mtod(last, char *), lastlen, uio); 1643 if (__predict_false(error)) { 1644 SOCK_IO_RECV_UNLOCK(so); 1645 return (error); 1646 } 1647 } 1648 if (waitall && !(flags & MSG_EOR) && uio->uio_resid > 0) 1649 goto restart; 1650 SOCK_IO_RECV_UNLOCK(so); 1651 1652 if (flagsp != NULL) 1653 *flagsp |= flags; 1654 1655 uio->uio_td->td_ru.ru_msgrcv++; 1656 1657 return (0); 1658 } 1659 1660 static int 1661 uipc_sopoll_stream_or_seqpacket(struct socket *so, int events, 1662 struct thread *td) 1663 { 1664 struct unpcb *unp = sotounpcb(so); 1665 int revents; 1666 1667 UNP_PCB_LOCK(unp); 1668 if (SOLISTENING(so)) { 1669 /* The above check is safe, since conversion to listening uses 1670 * both protocol and socket lock. 1671 */ 1672 SOCK_LOCK(so); 1673 if (!(events & (POLLIN | POLLRDNORM))) 1674 revents = 0; 1675 else if (!TAILQ_EMPTY(&so->sol_comp)) 1676 revents = events & (POLLIN | POLLRDNORM); 1677 else if (so->so_error) 1678 revents = (events & (POLLIN | POLLRDNORM)) | POLLHUP; 1679 else { 1680 selrecord(td, &so->so_rdsel); 1681 revents = 0; 1682 } 1683 SOCK_UNLOCK(so); 1684 } else { 1685 if (so->so_state & SS_ISDISCONNECTED) 1686 revents = POLLHUP; 1687 else 1688 revents = 0; 1689 if (events & (POLLIN | POLLRDNORM | POLLRDHUP)) { 1690 SOCK_RECVBUF_LOCK(so); 1691 if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat || 1692 so->so_error || so->so_rerror) 1693 revents |= events & (POLLIN | POLLRDNORM); 1694 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1695 revents |= events & POLLRDHUP; 1696 if (!(revents & (POLLIN | POLLRDNORM | POLLRDHUP))) { 1697 selrecord(td, &so->so_rdsel); 1698 so->so_rcv.sb_flags |= SB_SEL; 1699 } 1700 SOCK_RECVBUF_UNLOCK(so); 1701 } 1702 if (events & (POLLOUT | POLLWRNORM)) { 1703 struct socket *so2 = so->so_rcv.uxst_peer; 1704 1705 if (so2 != NULL) { 1706 struct sockbuf *sb = &so2->so_rcv; 1707 1708 SOCK_RECVBUF_LOCK(so2); 1709 if (uipc_stream_sbspace(sb) >= sb->sb_lowat) 1710 revents |= events & 1711 (POLLOUT | POLLWRNORM); 1712 if (sb->sb_state & SBS_CANTRCVMORE) 1713 revents |= POLLHUP; 1714 if (!(revents & (POLLOUT | POLLWRNORM))) { 1715 so2->so_rcv.uxst_flags |= UXST_PEER_SEL; 1716 selrecord(td, &so->so_wrsel); 1717 } 1718 SOCK_RECVBUF_UNLOCK(so2); 1719 } else 1720 selrecord(td, &so->so_wrsel); 1721 } 1722 } 1723 UNP_PCB_UNLOCK(unp); 1724 return (revents); 1725 } 1726 1727 static void 1728 uipc_wrknl_lock(void *arg) 1729 { 1730 struct socket *so = arg; 1731 struct unpcb *unp = sotounpcb(so); 1732 1733 retry: 1734 if (SOLISTENING(so)) { 1735 SOLISTEN_LOCK(so); 1736 } else { 1737 UNP_PCB_LOCK(unp); 1738 if (__predict_false(SOLISTENING(so))) { 1739 UNP_PCB_UNLOCK(unp); 1740 goto retry; 1741 } 1742 if (so->so_rcv.uxst_peer != NULL) 1743 SOCK_RECVBUF_LOCK(so->so_rcv.uxst_peer); 1744 } 1745 } 1746 1747 static void 1748 uipc_wrknl_unlock(void *arg) 1749 { 1750 struct socket *so = arg; 1751 struct unpcb *unp = sotounpcb(so); 1752 1753 if (SOLISTENING(so)) 1754 SOLISTEN_UNLOCK(so); 1755 else { 1756 if (so->so_rcv.uxst_peer != NULL) 1757 SOCK_RECVBUF_UNLOCK(so->so_rcv.uxst_peer); 1758 UNP_PCB_UNLOCK(unp); 1759 } 1760 } 1761 1762 static void 1763 uipc_wrknl_assert_lock(void *arg, int what) 1764 { 1765 struct socket *so = arg; 1766 1767 if (SOLISTENING(so)) { 1768 if (what == LA_LOCKED) 1769 SOLISTEN_LOCK_ASSERT(so); 1770 else 1771 SOLISTEN_UNLOCK_ASSERT(so); 1772 } else { 1773 /* 1774 * The pr_soreceive method will put a note without owning the 1775 * unp lock, so we can't assert it here. But we can safely 1776 * dereference uxst_peer pointer, since receive buffer lock 1777 * is assumed to be held here. 1778 */ 1779 if (what == LA_LOCKED && so->so_rcv.uxst_peer != NULL) 1780 SOCK_RECVBUF_LOCK_ASSERT(so->so_rcv.uxst_peer); 1781 } 1782 } 1783 1784 static void 1785 uipc_filt_sowdetach(struct knote *kn) 1786 { 1787 struct socket *so = kn->kn_fp->f_data; 1788 1789 uipc_wrknl_lock(so); 1790 knlist_remove(&so->so_wrsel.si_note, kn, 1); 1791 uipc_wrknl_unlock(so); 1792 } 1793 1794 static int 1795 uipc_filt_sowrite(struct knote *kn, long hint) 1796 { 1797 struct socket *so = kn->kn_fp->f_data, *so2; 1798 struct unpcb *unp = sotounpcb(so), *unp2 = unp->unp_conn; 1799 1800 if (SOLISTENING(so)) 1801 return (0); 1802 1803 if (unp2 == NULL) { 1804 if (so->so_state & SS_ISDISCONNECTED) { 1805 kn->kn_flags |= EV_EOF; 1806 kn->kn_fflags = so->so_error; 1807 return (1); 1808 } else 1809 return (0); 1810 } 1811 1812 so2 = unp2->unp_socket; 1813 SOCK_RECVBUF_LOCK_ASSERT(so2); 1814 kn->kn_data = uipc_stream_sbspace(&so2->so_rcv); 1815 1816 if (so2->so_rcv.sb_state & SBS_CANTRCVMORE) { 1817 /* 1818 * XXXGL: maybe kn->kn_flags |= EV_EOF ? 1819 */ 1820 return (1); 1821 } else if (kn->kn_sfflags & NOTE_LOWAT) 1822 return (kn->kn_data >= kn->kn_sdata); 1823 else 1824 return (kn->kn_data >= so2->so_rcv.sb_lowat); 1825 } 1826 1827 static int 1828 uipc_filt_soempty(struct knote *kn, long hint) 1829 { 1830 struct socket *so = kn->kn_fp->f_data, *so2; 1831 struct unpcb *unp = sotounpcb(so), *unp2 = unp->unp_conn; 1832 1833 if (SOLISTENING(so) || unp2 == NULL) 1834 return (1); 1835 1836 so2 = unp2->unp_socket; 1837 SOCK_RECVBUF_LOCK_ASSERT(so2); 1838 kn->kn_data = uipc_stream_sbspace(&so2->so_rcv); 1839 1840 return (kn->kn_data == 0 ? 1 : 0); 1841 } 1842 1843 static const struct filterops uipc_write_filtops = { 1844 .f_isfd = 1, 1845 .f_detach = uipc_filt_sowdetach, 1846 .f_event = uipc_filt_sowrite, 1847 }; 1848 static const struct filterops uipc_empty_filtops = { 1849 .f_isfd = 1, 1850 .f_detach = uipc_filt_sowdetach, 1851 .f_event = uipc_filt_soempty, 1852 }; 1853 1854 static int 1855 uipc_kqfilter_stream_or_seqpacket(struct socket *so, struct knote *kn) 1856 { 1857 struct unpcb *unp = sotounpcb(so); 1858 struct knlist *knl; 1859 1860 switch (kn->kn_filter) { 1861 case EVFILT_READ: 1862 return (sokqfilter_generic(so, kn)); 1863 case EVFILT_WRITE: 1864 kn->kn_fop = &uipc_write_filtops; 1865 break; 1866 case EVFILT_EMPTY: 1867 kn->kn_fop = &uipc_empty_filtops; 1868 break; 1869 default: 1870 return (EINVAL); 1871 } 1872 1873 knl = &so->so_wrsel.si_note; 1874 UNP_PCB_LOCK(unp); 1875 if (SOLISTENING(so)) { 1876 SOLISTEN_LOCK(so); 1877 knlist_add(knl, kn, 1); 1878 SOLISTEN_UNLOCK(so); 1879 } else { 1880 struct socket *so2 = so->so_rcv.uxst_peer; 1881 1882 if (so2 != NULL) 1883 SOCK_RECVBUF_LOCK(so2); 1884 knlist_add(knl, kn, 1); 1885 if (so2 != NULL) 1886 SOCK_RECVBUF_UNLOCK(so2); 1887 } 1888 UNP_PCB_UNLOCK(unp); 1889 return (0); 1890 } 1891 1892 /* PF_UNIX/SOCK_DGRAM version of sbspace() */ 1893 static inline bool 1894 uipc_dgram_sbspace(struct sockbuf *sb, u_int cc, u_int mbcnt) 1895 { 1896 u_int bleft, mleft; 1897 1898 /* 1899 * Negative space may happen if send(2) is followed by 1900 * setsockopt(SO_SNDBUF/SO_RCVBUF) that shrinks maximum. 1901 */ 1902 if (__predict_false(sb->sb_hiwat < sb->uxdg_cc || 1903 sb->sb_mbmax < sb->uxdg_mbcnt)) 1904 return (false); 1905 1906 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) 1907 return (false); 1908 1909 bleft = sb->sb_hiwat - sb->uxdg_cc; 1910 mleft = sb->sb_mbmax - sb->uxdg_mbcnt; 1911 1912 return (bleft >= cc && mleft >= mbcnt); 1913 } 1914 1915 /* 1916 * PF_UNIX/SOCK_DGRAM send 1917 * 1918 * Allocate a record consisting of 3 mbufs in the sequence of 1919 * from -> control -> data and append it to the socket buffer. 1920 * 1921 * The first mbuf carries sender's name and is a pkthdr that stores 1922 * overall length of datagram, its memory consumption and control length. 1923 */ 1924 #define ctllen PH_loc.thirtytwo[1] 1925 _Static_assert(offsetof(struct pkthdr, memlen) + sizeof(u_int) <= 1926 offsetof(struct pkthdr, ctllen), "unix/dgram can not store ctllen"); 1927 static int 1928 uipc_sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, 1929 struct mbuf *m, struct mbuf *c, int flags, struct thread *td) 1930 { 1931 struct unpcb *unp, *unp2; 1932 const struct sockaddr *from; 1933 struct socket *so2; 1934 struct sockbuf *sb; 1935 struct mchain cmc = MCHAIN_INITIALIZER(&cmc); 1936 struct mbuf *f; 1937 u_int cc, ctl, mbcnt; 1938 u_int dcc __diagused, dctl __diagused, dmbcnt __diagused; 1939 int error; 1940 1941 MPASS((uio != NULL && m == NULL) || (m != NULL && uio == NULL)); 1942 1943 error = 0; 1944 f = NULL; 1945 1946 if (__predict_false(flags & MSG_OOB)) { 1947 error = EOPNOTSUPP; 1948 goto out; 1949 } 1950 if (m == NULL) { 1951 if (__predict_false(uio->uio_resid > unpdg_maxdgram)) { 1952 error = EMSGSIZE; 1953 goto out; 1954 } 1955 m = m_uiotombuf(uio, M_WAITOK, 0, max_hdr, M_PKTHDR); 1956 if (__predict_false(m == NULL)) { 1957 error = EFAULT; 1958 goto out; 1959 } 1960 f = m_gethdr(M_WAITOK, MT_SONAME); 1961 cc = m->m_pkthdr.len; 1962 mbcnt = MSIZE + m->m_pkthdr.memlen; 1963 if (c != NULL && (error = unp_internalize(c, &cmc, td))) 1964 goto out; 1965 } else { 1966 struct mchain mc; 1967 1968 uipc_reset_kernel_mbuf(m, &mc); 1969 cc = mc.mc_len; 1970 mbcnt = mc.mc_mlen; 1971 if (__predict_false(m->m_pkthdr.len > unpdg_maxdgram)) { 1972 error = EMSGSIZE; 1973 goto out; 1974 } 1975 if ((f = m_gethdr(M_NOWAIT, MT_SONAME)) == NULL) { 1976 error = ENOBUFS; 1977 goto out; 1978 } 1979 } 1980 1981 unp = sotounpcb(so); 1982 MPASS(unp); 1983 1984 /* 1985 * XXXGL: would be cool to fully remove so_snd out of the equation 1986 * and avoid this lock, which is not only extraneous, but also being 1987 * released, thus still leaving possibility for a race. We can easily 1988 * handle SBS_CANTSENDMORE/SS_ISCONNECTED complement in unpcb, but it 1989 * is more difficult to invent something to handle so_error. 1990 */ 1991 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); 1992 if (error) 1993 goto out2; 1994 SOCK_SENDBUF_LOCK(so); 1995 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1996 SOCK_SENDBUF_UNLOCK(so); 1997 error = EPIPE; 1998 goto out3; 1999 } 2000 if (so->so_error != 0) { 2001 error = so->so_error; 2002 so->so_error = 0; 2003 SOCK_SENDBUF_UNLOCK(so); 2004 goto out3; 2005 } 2006 if (((so->so_state & SS_ISCONNECTED) == 0) && addr == NULL) { 2007 SOCK_SENDBUF_UNLOCK(so); 2008 error = EDESTADDRREQ; 2009 goto out3; 2010 } 2011 SOCK_SENDBUF_UNLOCK(so); 2012 2013 if (addr != NULL) { 2014 if ((error = unp_connectat(AT_FDCWD, so, addr, td, true))) 2015 goto out3; 2016 UNP_PCB_LOCK_ASSERT(unp); 2017 unp2 = unp->unp_conn; 2018 UNP_PCB_LOCK_ASSERT(unp2); 2019 } else { 2020 UNP_PCB_LOCK(unp); 2021 unp2 = unp_pcb_lock_peer(unp); 2022 if (unp2 == NULL) { 2023 UNP_PCB_UNLOCK(unp); 2024 error = ENOTCONN; 2025 goto out3; 2026 } 2027 } 2028 2029 if (unp2->unp_flags & UNP_WANTCRED_MASK) 2030 unp_addsockcred(td, &cmc, unp2->unp_flags); 2031 if (unp->unp_addr != NULL) 2032 from = (struct sockaddr *)unp->unp_addr; 2033 else 2034 from = &sun_noname; 2035 f->m_len = from->sa_len; 2036 MPASS(from->sa_len <= MLEN); 2037 bcopy(from, mtod(f, void *), from->sa_len); 2038 2039 /* 2040 * Concatenate mbufs: from -> control -> data. 2041 * Save overall cc and mbcnt in "from" mbuf. 2042 */ 2043 if (!STAILQ_EMPTY(&cmc.mc_q)) { 2044 f->m_next = mc_first(&cmc); 2045 mc_last(&cmc)->m_next = m; 2046 /* XXXGL: This is dirty as well as rollback after ENOBUFS. */ 2047 STAILQ_INIT(&cmc.mc_q); 2048 } else 2049 f->m_next = m; 2050 m = NULL; 2051 ctl = f->m_len + cmc.mc_len; 2052 mbcnt += cmc.mc_mlen; 2053 #ifdef INVARIANTS 2054 dcc = dctl = dmbcnt = 0; 2055 for (struct mbuf *mb = f; mb != NULL; mb = mb->m_next) { 2056 if (mb->m_type == MT_DATA) 2057 dcc += mb->m_len; 2058 else 2059 dctl += mb->m_len; 2060 dmbcnt += MSIZE; 2061 if (mb->m_flags & M_EXT) 2062 dmbcnt += mb->m_ext.ext_size; 2063 } 2064 MPASS(dcc == cc); 2065 MPASS(dctl == ctl); 2066 MPASS(dmbcnt == mbcnt); 2067 #endif 2068 f->m_pkthdr.len = cc + ctl; 2069 f->m_pkthdr.memlen = mbcnt; 2070 f->m_pkthdr.ctllen = ctl; 2071 2072 /* 2073 * Destination socket buffer selection. 2074 * 2075 * Unconnected sends, when !(so->so_state & SS_ISCONNECTED) and the 2076 * destination address is supplied, create a temporary connection for 2077 * the run time of the function (see call to unp_connectat() above and 2078 * to unp_disconnect() below). We distinguish them by condition of 2079 * (addr != NULL). We intentionally avoid adding 'bool connected' for 2080 * that condition, since, again, through the run time of this code we 2081 * are always connected. For such "unconnected" sends, the destination 2082 * buffer would be the receive buffer of destination socket so2. 2083 * 2084 * For connected sends, data lands on the send buffer of the sender's 2085 * socket "so". Then, if we just added the very first datagram 2086 * on this send buffer, we need to add the send buffer on to the 2087 * receiving socket's buffer list. We put ourselves on top of the 2088 * list. Such logic gives infrequent senders priority over frequent 2089 * senders. 2090 * 2091 * Note on byte count management. As long as event methods kevent(2), 2092 * select(2) are not protocol specific (yet), we need to maintain 2093 * meaningful values on the receive buffer. So, the receive buffer 2094 * would accumulate counters from all connected buffers potentially 2095 * having sb_ccc > sb_hiwat or sb_mbcnt > sb_mbmax. 2096 */ 2097 so2 = unp2->unp_socket; 2098 sb = (addr == NULL) ? &so->so_snd : &so2->so_rcv; 2099 SOCK_RECVBUF_LOCK(so2); 2100 if (uipc_dgram_sbspace(sb, cc + ctl, mbcnt)) { 2101 if (addr == NULL && STAILQ_EMPTY(&sb->uxdg_mb)) 2102 TAILQ_INSERT_HEAD(&so2->so_rcv.uxdg_conns, &so->so_snd, 2103 uxdg_clist); 2104 STAILQ_INSERT_TAIL(&sb->uxdg_mb, f, m_stailqpkt); 2105 sb->uxdg_cc += cc + ctl; 2106 sb->uxdg_ctl += ctl; 2107 sb->uxdg_mbcnt += mbcnt; 2108 so2->so_rcv.sb_acc += cc + ctl; 2109 so2->so_rcv.sb_ccc += cc + ctl; 2110 so2->so_rcv.sb_ctl += ctl; 2111 so2->so_rcv.sb_mbcnt += mbcnt; 2112 sorwakeup_locked(so2); 2113 f = NULL; 2114 } else { 2115 soroverflow_locked(so2); 2116 error = ENOBUFS; 2117 if (f->m_next->m_type == MT_CONTROL) { 2118 STAILQ_FIRST(&cmc.mc_q) = f->m_next; 2119 f->m_next = NULL; 2120 } 2121 } 2122 2123 if (addr != NULL) 2124 unp_disconnect(unp, unp2); 2125 else 2126 unp_pcb_unlock_pair(unp, unp2); 2127 2128 td->td_ru.ru_msgsnd++; 2129 2130 out3: 2131 SOCK_IO_SEND_UNLOCK(so); 2132 out2: 2133 if (!mc_empty(&cmc)) 2134 unp_scan(mc_first(&cmc), unp_freerights); 2135 out: 2136 if (f) 2137 m_freem(f); 2138 mc_freem(&cmc); 2139 if (m) 2140 m_freem(m); 2141 2142 return (error); 2143 } 2144 2145 /* 2146 * PF_UNIX/SOCK_DGRAM receive with MSG_PEEK. 2147 * The mbuf has already been unlinked from the uxdg_mb of socket buffer 2148 * and needs to be linked onto uxdg_peeked of receive socket buffer. 2149 */ 2150 static int 2151 uipc_peek_dgram(struct socket *so, struct mbuf *m, struct sockaddr **psa, 2152 struct uio *uio, struct mbuf **controlp, int *flagsp) 2153 { 2154 ssize_t len = 0; 2155 int error; 2156 2157 so->so_rcv.uxdg_peeked = m; 2158 so->so_rcv.uxdg_cc += m->m_pkthdr.len; 2159 so->so_rcv.uxdg_ctl += m->m_pkthdr.ctllen; 2160 so->so_rcv.uxdg_mbcnt += m->m_pkthdr.memlen; 2161 SOCK_RECVBUF_UNLOCK(so); 2162 2163 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); 2164 if (psa != NULL) 2165 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK); 2166 2167 m = m->m_next; 2168 KASSERT(m, ("%s: no data or control after soname", __func__)); 2169 2170 /* 2171 * With MSG_PEEK the control isn't executed, just copied. 2172 */ 2173 while (m != NULL && m->m_type == MT_CONTROL) { 2174 if (controlp != NULL) { 2175 *controlp = m_copym(m, 0, m->m_len, M_WAITOK); 2176 controlp = &(*controlp)->m_next; 2177 } 2178 m = m->m_next; 2179 } 2180 KASSERT(m == NULL || m->m_type == MT_DATA, 2181 ("%s: not MT_DATA mbuf %p", __func__, m)); 2182 while (m != NULL && uio->uio_resid > 0) { 2183 len = uio->uio_resid; 2184 if (len > m->m_len) 2185 len = m->m_len; 2186 error = uiomove(mtod(m, char *), (int)len, uio); 2187 if (error) { 2188 SOCK_IO_RECV_UNLOCK(so); 2189 return (error); 2190 } 2191 if (len == m->m_len) 2192 m = m->m_next; 2193 } 2194 SOCK_IO_RECV_UNLOCK(so); 2195 2196 if (flagsp != NULL) { 2197 if (m != NULL) { 2198 if (*flagsp & MSG_TRUNC) { 2199 /* Report real length of the packet */ 2200 uio->uio_resid -= m_length(m, NULL) - len; 2201 } 2202 *flagsp |= MSG_TRUNC; 2203 } else 2204 *flagsp &= ~MSG_TRUNC; 2205 } 2206 2207 return (0); 2208 } 2209 2210 /* 2211 * PF_UNIX/SOCK_DGRAM receive 2212 */ 2213 static int 2214 uipc_soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, 2215 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2216 { 2217 struct sockbuf *sb = NULL; 2218 struct mbuf *m; 2219 int flags, error; 2220 ssize_t len = 0; 2221 bool nonblock; 2222 2223 MPASS(mp0 == NULL); 2224 2225 if (psa != NULL) 2226 *psa = NULL; 2227 if (controlp != NULL) 2228 *controlp = NULL; 2229 2230 flags = flagsp != NULL ? *flagsp : 0; 2231 nonblock = (so->so_state & SS_NBIO) || 2232 (flags & (MSG_DONTWAIT | MSG_NBIO)); 2233 2234 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); 2235 if (__predict_false(error)) 2236 return (error); 2237 2238 /* 2239 * Loop blocking while waiting for a datagram. Prioritize connected 2240 * peers over unconnected sends. Set sb to selected socket buffer 2241 * containing an mbuf on exit from the wait loop. A datagram that 2242 * had already been peeked at has top priority. 2243 */ 2244 SOCK_RECVBUF_LOCK(so); 2245 while ((m = so->so_rcv.uxdg_peeked) == NULL && 2246 (sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) == NULL && 2247 (m = STAILQ_FIRST(&so->so_rcv.uxdg_mb)) == NULL) { 2248 if (so->so_error) { 2249 error = so->so_error; 2250 if (!(flags & MSG_PEEK)) 2251 so->so_error = 0; 2252 SOCK_RECVBUF_UNLOCK(so); 2253 SOCK_IO_RECV_UNLOCK(so); 2254 return (error); 2255 } 2256 if (so->so_rcv.sb_state & SBS_CANTRCVMORE || 2257 uio->uio_resid == 0) { 2258 SOCK_RECVBUF_UNLOCK(so); 2259 SOCK_IO_RECV_UNLOCK(so); 2260 return (0); 2261 } 2262 if (nonblock) { 2263 SOCK_RECVBUF_UNLOCK(so); 2264 SOCK_IO_RECV_UNLOCK(so); 2265 return (EWOULDBLOCK); 2266 } 2267 error = sbwait(so, SO_RCV); 2268 if (error) { 2269 SOCK_RECVBUF_UNLOCK(so); 2270 SOCK_IO_RECV_UNLOCK(so); 2271 return (error); 2272 } 2273 } 2274 2275 if (sb == NULL) 2276 sb = &so->so_rcv; 2277 else if (m == NULL) 2278 m = STAILQ_FIRST(&sb->uxdg_mb); 2279 else 2280 MPASS(m == so->so_rcv.uxdg_peeked); 2281 2282 MPASS(sb->uxdg_cc > 0); 2283 M_ASSERTPKTHDR(m); 2284 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); 2285 2286 if (uio->uio_td) 2287 uio->uio_td->td_ru.ru_msgrcv++; 2288 2289 if (__predict_true(m != so->so_rcv.uxdg_peeked)) { 2290 STAILQ_REMOVE_HEAD(&sb->uxdg_mb, m_stailqpkt); 2291 if (STAILQ_EMPTY(&sb->uxdg_mb) && sb != &so->so_rcv) 2292 TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist); 2293 } else 2294 so->so_rcv.uxdg_peeked = NULL; 2295 2296 sb->uxdg_cc -= m->m_pkthdr.len; 2297 sb->uxdg_ctl -= m->m_pkthdr.ctllen; 2298 sb->uxdg_mbcnt -= m->m_pkthdr.memlen; 2299 2300 if (__predict_false(flags & MSG_PEEK)) 2301 return (uipc_peek_dgram(so, m, psa, uio, controlp, flagsp)); 2302 2303 so->so_rcv.sb_acc -= m->m_pkthdr.len; 2304 so->so_rcv.sb_ccc -= m->m_pkthdr.len; 2305 so->so_rcv.sb_ctl -= m->m_pkthdr.ctllen; 2306 so->so_rcv.sb_mbcnt -= m->m_pkthdr.memlen; 2307 SOCK_RECVBUF_UNLOCK(so); 2308 2309 if (psa != NULL) 2310 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK); 2311 m = m_free(m); 2312 KASSERT(m, ("%s: no data or control after soname", __func__)); 2313 2314 /* 2315 * Packet to copyout() is now in 'm' and it is disconnected from the 2316 * queue. 2317 * 2318 * Process one or more MT_CONTROL mbufs present before any data mbufs 2319 * in the first mbuf chain on the socket buffer. We call into the 2320 * unp_externalize() to perform externalization (or freeing if 2321 * controlp == NULL). In some cases there can be only MT_CONTROL mbufs 2322 * without MT_DATA mbufs. 2323 */ 2324 while (m != NULL && m->m_type == MT_CONTROL) { 2325 struct mbuf *cm; 2326 2327 /* XXXGL: unp_externalize() is also dom_externalize() KBI and 2328 * it frees whole chain, so we must disconnect the mbuf. 2329 */ 2330 cm = m; m = m->m_next; cm->m_next = NULL; 2331 error = unp_externalize(cm, controlp, flags); 2332 if (error != 0) { 2333 SOCK_IO_RECV_UNLOCK(so); 2334 unp_scan(m, unp_freerights); 2335 m_freem(m); 2336 return (error); 2337 } 2338 if (controlp != NULL) { 2339 while (*controlp != NULL) 2340 controlp = &(*controlp)->m_next; 2341 } 2342 } 2343 KASSERT(m == NULL || m->m_type == MT_DATA, 2344 ("%s: not MT_DATA mbuf %p", __func__, m)); 2345 while (m != NULL && uio->uio_resid > 0) { 2346 len = uio->uio_resid; 2347 if (len > m->m_len) 2348 len = m->m_len; 2349 error = uiomove(mtod(m, char *), (int)len, uio); 2350 if (error) { 2351 SOCK_IO_RECV_UNLOCK(so); 2352 m_freem(m); 2353 return (error); 2354 } 2355 if (len == m->m_len) 2356 m = m_free(m); 2357 else { 2358 m->m_data += len; 2359 m->m_len -= len; 2360 } 2361 } 2362 SOCK_IO_RECV_UNLOCK(so); 2363 2364 if (m != NULL) { 2365 if (flagsp != NULL) { 2366 if (flags & MSG_TRUNC) { 2367 /* Report real length of the packet */ 2368 uio->uio_resid -= m_length(m, NULL); 2369 } 2370 *flagsp |= MSG_TRUNC; 2371 } 2372 m_freem(m); 2373 } else if (flagsp != NULL) 2374 *flagsp &= ~MSG_TRUNC; 2375 2376 return (0); 2377 } 2378 2379 static int 2380 uipc_sendfile_wait(struct socket *so, off_t need, int *space) 2381 { 2382 struct unpcb *unp2; 2383 struct socket *so2; 2384 struct sockbuf *sb; 2385 bool nonblock, sockref; 2386 int error; 2387 2388 MPASS(so->so_type == SOCK_STREAM); 2389 MPASS(need > 0); 2390 MPASS(space != NULL); 2391 2392 nonblock = so->so_state & SS_NBIO; 2393 sockref = false; 2394 2395 if (__predict_false((so->so_state & SS_ISCONNECTED) == 0)) 2396 return (ENOTCONN); 2397 2398 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 2399 return (error); 2400 2401 so2 = unp2->unp_socket; 2402 sb = &so2->so_rcv; 2403 SOCK_RECVBUF_LOCK(so2); 2404 UNP_PCB_UNLOCK(unp2); 2405 while ((*space = uipc_stream_sbspace(sb)) < need && 2406 (*space < so->so_snd.sb_hiwat / 2)) { 2407 UIPC_STREAM_SBCHECK(sb); 2408 if (nonblock) { 2409 SOCK_RECVBUF_UNLOCK(so2); 2410 return (EAGAIN); 2411 } 2412 if (!sockref) 2413 soref(so2); 2414 error = sbwait(so2, SO_RCV); 2415 if (error == 0 && 2416 __predict_false(sb->sb_state & SBS_CANTRCVMORE)) 2417 error = EPIPE; 2418 if (error) { 2419 SOCK_RECVBUF_UNLOCK(so2); 2420 sorele(so2); 2421 return (error); 2422 } 2423 } 2424 UIPC_STREAM_SBCHECK(sb); 2425 SOCK_RECVBUF_UNLOCK(so2); 2426 if (sockref) 2427 sorele(so2); 2428 2429 return (0); 2430 } 2431 2432 /* 2433 * Although this is a pr_send method, for unix(4) it is called only via 2434 * sendfile(2) path. This means we can be sure that mbufs are clear of 2435 * any extra flags and don't require any conditioning. 2436 */ 2437 static int 2438 uipc_sendfile(struct socket *so, int flags, struct mbuf *m, 2439 struct sockaddr *from, struct mbuf *control, struct thread *td) 2440 { 2441 struct mchain mc; 2442 struct unpcb *unp2; 2443 struct socket *so2; 2444 struct sockbuf *sb; 2445 bool notready, wakeup; 2446 int error; 2447 2448 MPASS(so->so_type == SOCK_STREAM); 2449 MPASS(from == NULL && control == NULL); 2450 KASSERT(!(m->m_flags & M_EXTPG), 2451 ("unix(4): TLS sendfile(2) not supported")); 2452 2453 notready = flags & PRUS_NOTREADY; 2454 2455 if (__predict_false((so->so_state & SS_ISCONNECTED) == 0)) { 2456 error = ENOTCONN; 2457 goto out; 2458 } 2459 2460 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 2461 goto out; 2462 2463 mc_init_m(&mc, m); 2464 2465 so2 = unp2->unp_socket; 2466 sb = &so2->so_rcv; 2467 SOCK_RECVBUF_LOCK(so2); 2468 UNP_PCB_UNLOCK(unp2); 2469 UIPC_STREAM_SBCHECK(sb); 2470 sb->sb_ccc += mc.mc_len; 2471 sb->sb_mbcnt += mc.mc_mlen; 2472 if (sb->uxst_fnrdy == NULL) { 2473 if (notready) { 2474 wakeup = false; 2475 STAILQ_FOREACH(m, &mc.mc_q, m_stailq) { 2476 if (m->m_flags & M_NOTREADY) { 2477 sb->uxst_fnrdy = m; 2478 break; 2479 } else { 2480 sb->sb_acc += m->m_len; 2481 wakeup = true; 2482 } 2483 } 2484 } else { 2485 wakeup = true; 2486 sb->sb_acc += mc.mc_len; 2487 } 2488 } else { 2489 wakeup = false; 2490 } 2491 STAILQ_CONCAT(&sb->uxst_mbq, &mc.mc_q); 2492 UIPC_STREAM_SBCHECK(sb); 2493 if (wakeup) 2494 sorwakeup_locked(so2); 2495 else 2496 SOCK_RECVBUF_UNLOCK(so2); 2497 2498 return (0); 2499 out: 2500 /* 2501 * In case of not ready data, uipc_ready() is responsible 2502 * for freeing memory. 2503 */ 2504 if (m != NULL && !notready) 2505 m_freem(m); 2506 2507 return (error); 2508 } 2509 2510 static int 2511 uipc_sbready(struct sockbuf *sb, struct mbuf *m, int count) 2512 { 2513 bool blocker; 2514 2515 /* assert locked */ 2516 2517 blocker = (sb->uxst_fnrdy == m); 2518 STAILQ_FOREACH_FROM(m, &sb->uxst_mbq, m_stailq) { 2519 if (count > 0) { 2520 MPASS(m->m_flags & M_NOTREADY); 2521 m->m_flags &= ~M_NOTREADY; 2522 if (blocker) 2523 sb->sb_acc += m->m_len; 2524 count--; 2525 } else if (m->m_flags & M_NOTREADY) 2526 break; 2527 else if (blocker) 2528 sb->sb_acc += m->m_len; 2529 } 2530 if (blocker) { 2531 sb->uxst_fnrdy = m; 2532 return (0); 2533 } else 2534 return (EINPROGRESS); 2535 } 2536 2537 static bool 2538 uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp) 2539 { 2540 struct mbuf *mb; 2541 struct sockbuf *sb; 2542 2543 SOCK_LOCK(so); 2544 if (SOLISTENING(so)) { 2545 SOCK_UNLOCK(so); 2546 return (false); 2547 } 2548 mb = NULL; 2549 sb = &so->so_rcv; 2550 SOCK_RECVBUF_LOCK(so); 2551 if (sb->uxst_fnrdy != NULL) { 2552 STAILQ_FOREACH(mb, &sb->uxst_mbq, m_stailq) { 2553 if (mb == m) { 2554 *errorp = uipc_sbready(sb, m, count); 2555 break; 2556 } 2557 } 2558 } 2559 SOCK_RECVBUF_UNLOCK(so); 2560 SOCK_UNLOCK(so); 2561 return (mb != NULL); 2562 } 2563 2564 static int 2565 uipc_ready(struct socket *so, struct mbuf *m, int count) 2566 { 2567 struct unpcb *unp, *unp2; 2568 int error; 2569 2570 MPASS(so->so_type == SOCK_STREAM); 2571 2572 if (__predict_true(uipc_lock_peer(so, &unp2) == 0)) { 2573 struct socket *so2; 2574 struct sockbuf *sb; 2575 2576 so2 = unp2->unp_socket; 2577 sb = &so2->so_rcv; 2578 SOCK_RECVBUF_LOCK(so2); 2579 UNP_PCB_UNLOCK(unp2); 2580 UIPC_STREAM_SBCHECK(sb); 2581 error = uipc_sbready(sb, m, count); 2582 UIPC_STREAM_SBCHECK(sb); 2583 if (error == 0) 2584 sorwakeup_locked(so2); 2585 else 2586 SOCK_RECVBUF_UNLOCK(so2); 2587 } else { 2588 /* 2589 * The receiving socket has been disconnected, but may still 2590 * be valid. In this case, the not-ready mbufs are still 2591 * present in its socket buffer, so perform an exhaustive 2592 * search before giving up and freeing the mbufs. 2593 */ 2594 UNP_LINK_RLOCK(); 2595 LIST_FOREACH(unp, &unp_shead, unp_link) { 2596 if (uipc_ready_scan(unp->unp_socket, m, count, &error)) 2597 break; 2598 } 2599 UNP_LINK_RUNLOCK(); 2600 2601 if (unp == NULL) { 2602 for (int i = 0; i < count; i++) 2603 m = m_free(m); 2604 return (ECONNRESET); 2605 } 2606 } 2607 return (error); 2608 } 2609 2610 static int 2611 uipc_sense(struct socket *so, struct stat *sb) 2612 { 2613 struct unpcb *unp; 2614 2615 unp = sotounpcb(so); 2616 KASSERT(unp != NULL, ("uipc_sense: unp == NULL")); 2617 2618 sb->st_blksize = so->so_snd.sb_hiwat; 2619 sb->st_dev = NODEV; 2620 sb->st_ino = unp->unp_ino; 2621 return (0); 2622 } 2623 2624 static int 2625 uipc_shutdown(struct socket *so, enum shutdown_how how) 2626 { 2627 struct unpcb *unp = sotounpcb(so); 2628 int error; 2629 2630 SOCK_LOCK(so); 2631 if (SOLISTENING(so)) { 2632 if (how != SHUT_WR) { 2633 so->so_error = ECONNABORTED; 2634 solisten_wakeup(so); /* unlocks so */ 2635 } else 2636 SOCK_UNLOCK(so); 2637 return (ENOTCONN); 2638 } else if ((so->so_state & 2639 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 2640 /* 2641 * POSIX mandates us to just return ENOTCONN when shutdown(2) is 2642 * invoked on a datagram sockets, however historically we would 2643 * actually tear socket down. This is known to be leveraged by 2644 * some applications to unblock process waiting in recv(2) by 2645 * other process that it shares that socket with. Try to meet 2646 * both backward-compatibility and POSIX requirements by forcing 2647 * ENOTCONN but still flushing buffers and performing wakeup(9). 2648 * 2649 * XXXGL: it remains unknown what applications expect this 2650 * behavior and is this isolated to unix/dgram or inet/dgram or 2651 * both. See: D10351, D3039. 2652 */ 2653 error = ENOTCONN; 2654 if (so->so_type != SOCK_DGRAM) { 2655 SOCK_UNLOCK(so); 2656 return (error); 2657 } 2658 } else 2659 error = 0; 2660 SOCK_UNLOCK(so); 2661 2662 switch (how) { 2663 case SHUT_RD: 2664 if (so->so_type == SOCK_DGRAM) 2665 socantrcvmore(so); 2666 else 2667 uipc_cantrcvmore(so); 2668 unp_dispose(so); 2669 break; 2670 case SHUT_RDWR: 2671 if (so->so_type == SOCK_DGRAM) 2672 socantrcvmore(so); 2673 else 2674 uipc_cantrcvmore(so); 2675 unp_dispose(so); 2676 /* FALLTHROUGH */ 2677 case SHUT_WR: 2678 if (so->so_type == SOCK_DGRAM) { 2679 socantsendmore(so); 2680 } else { 2681 UNP_PCB_LOCK(unp); 2682 if (unp->unp_conn != NULL) 2683 uipc_cantrcvmore(unp->unp_conn->unp_socket); 2684 UNP_PCB_UNLOCK(unp); 2685 } 2686 } 2687 wakeup(&so->so_timeo); 2688 2689 return (error); 2690 } 2691 2692 static int 2693 uipc_sockaddr(struct socket *so, struct sockaddr *ret) 2694 { 2695 struct unpcb *unp; 2696 const struct sockaddr *sa; 2697 2698 unp = sotounpcb(so); 2699 KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL")); 2700 2701 UNP_PCB_LOCK(unp); 2702 if (unp->unp_addr != NULL) 2703 sa = (struct sockaddr *) unp->unp_addr; 2704 else 2705 sa = &sun_noname; 2706 bcopy(sa, ret, sa->sa_len); 2707 UNP_PCB_UNLOCK(unp); 2708 return (0); 2709 } 2710 2711 static int 2712 uipc_ctloutput(struct socket *so, struct sockopt *sopt) 2713 { 2714 struct unpcb *unp; 2715 struct xucred xu; 2716 int error, optval; 2717 2718 if (sopt->sopt_level != SOL_LOCAL) 2719 return (EINVAL); 2720 2721 unp = sotounpcb(so); 2722 KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL")); 2723 error = 0; 2724 switch (sopt->sopt_dir) { 2725 case SOPT_GET: 2726 switch (sopt->sopt_name) { 2727 case LOCAL_PEERCRED: 2728 UNP_PCB_LOCK(unp); 2729 if (unp->unp_flags & UNP_HAVEPC) 2730 xu = unp->unp_peercred; 2731 else { 2732 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 2733 error = ENOTCONN; 2734 else 2735 error = EINVAL; 2736 } 2737 UNP_PCB_UNLOCK(unp); 2738 if (error == 0) 2739 error = sooptcopyout(sopt, &xu, sizeof(xu)); 2740 break; 2741 2742 case LOCAL_CREDS: 2743 /* Unlocked read. */ 2744 optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0; 2745 error = sooptcopyout(sopt, &optval, sizeof(optval)); 2746 break; 2747 2748 case LOCAL_CREDS_PERSISTENT: 2749 /* Unlocked read. */ 2750 optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0; 2751 error = sooptcopyout(sopt, &optval, sizeof(optval)); 2752 break; 2753 2754 default: 2755 error = EOPNOTSUPP; 2756 break; 2757 } 2758 break; 2759 2760 case SOPT_SET: 2761 switch (sopt->sopt_name) { 2762 case LOCAL_CREDS: 2763 case LOCAL_CREDS_PERSISTENT: 2764 error = sooptcopyin(sopt, &optval, sizeof(optval), 2765 sizeof(optval)); 2766 if (error) 2767 break; 2768 2769 #define OPTSET(bit, exclusive) do { \ 2770 UNP_PCB_LOCK(unp); \ 2771 if (optval) { \ 2772 if ((unp->unp_flags & (exclusive)) != 0) { \ 2773 UNP_PCB_UNLOCK(unp); \ 2774 error = EINVAL; \ 2775 break; \ 2776 } \ 2777 unp->unp_flags |= (bit); \ 2778 } else \ 2779 unp->unp_flags &= ~(bit); \ 2780 UNP_PCB_UNLOCK(unp); \ 2781 } while (0) 2782 2783 switch (sopt->sopt_name) { 2784 case LOCAL_CREDS: 2785 OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS); 2786 break; 2787 2788 case LOCAL_CREDS_PERSISTENT: 2789 OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT); 2790 break; 2791 2792 default: 2793 break; 2794 } 2795 break; 2796 #undef OPTSET 2797 default: 2798 error = ENOPROTOOPT; 2799 break; 2800 } 2801 break; 2802 2803 default: 2804 error = EOPNOTSUPP; 2805 break; 2806 } 2807 return (error); 2808 } 2809 2810 static int 2811 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 2812 { 2813 2814 return (unp_connectat(AT_FDCWD, so, nam, td, false)); 2815 } 2816 2817 static int 2818 unp_connectat(int fd, struct socket *so, struct sockaddr *nam, 2819 struct thread *td, bool return_locked) 2820 { 2821 struct mtx *vplock; 2822 struct sockaddr_un *soun; 2823 struct vnode *vp; 2824 struct socket *so2; 2825 struct unpcb *unp, *unp2, *unp3; 2826 struct nameidata nd; 2827 char buf[SOCK_MAXADDRLEN]; 2828 struct sockaddr *sa; 2829 cap_rights_t rights; 2830 int error, len; 2831 bool connreq; 2832 2833 CURVNET_ASSERT_SET(); 2834 2835 if (nam->sa_family != AF_UNIX) 2836 return (EAFNOSUPPORT); 2837 if (nam->sa_len > sizeof(struct sockaddr_un)) 2838 return (EINVAL); 2839 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); 2840 if (len <= 0) 2841 return (EINVAL); 2842 soun = (struct sockaddr_un *)nam; 2843 bcopy(soun->sun_path, buf, len); 2844 buf[len] = 0; 2845 2846 error = 0; 2847 unp = sotounpcb(so); 2848 UNP_PCB_LOCK(unp); 2849 for (;;) { 2850 /* 2851 * Wait for connection state to stabilize. If a connection 2852 * already exists, give up. For datagram sockets, which permit 2853 * multiple consecutive connect(2) calls, upper layers are 2854 * responsible for disconnecting in advance of a subsequent 2855 * connect(2), but this is not synchronized with PCB connection 2856 * state. 2857 * 2858 * Also make sure that no threads are currently attempting to 2859 * lock the peer socket, to ensure that unp_conn cannot 2860 * transition between two valid sockets while locks are dropped. 2861 */ 2862 if (SOLISTENING(so)) 2863 error = EOPNOTSUPP; 2864 else if (unp->unp_conn != NULL) 2865 error = EISCONN; 2866 else if ((unp->unp_flags & UNP_CONNECTING) != 0) { 2867 error = EALREADY; 2868 } 2869 if (error != 0) { 2870 UNP_PCB_UNLOCK(unp); 2871 return (error); 2872 } 2873 if (unp->unp_pairbusy > 0) { 2874 unp->unp_flags |= UNP_WAITING; 2875 mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0); 2876 continue; 2877 } 2878 break; 2879 } 2880 unp->unp_flags |= UNP_CONNECTING; 2881 UNP_PCB_UNLOCK(unp); 2882 2883 connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0; 2884 if (connreq) 2885 sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); 2886 else 2887 sa = NULL; 2888 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, 2889 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT)); 2890 error = namei(&nd); 2891 if (error) 2892 vp = NULL; 2893 else 2894 vp = nd.ni_vp; 2895 ASSERT_VOP_LOCKED(vp, "unp_connect"); 2896 if (error) 2897 goto bad; 2898 NDFREE_PNBUF(&nd); 2899 2900 if (vp->v_type != VSOCK) { 2901 error = ENOTSOCK; 2902 goto bad; 2903 } 2904 #ifdef MAC 2905 error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD); 2906 if (error) 2907 goto bad; 2908 #endif 2909 error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td); 2910 if (error) 2911 goto bad; 2912 2913 unp = sotounpcb(so); 2914 KASSERT(unp != NULL, ("unp_connect: unp == NULL")); 2915 2916 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 2917 mtx_lock(vplock); 2918 VOP_UNP_CONNECT(vp, &unp2); 2919 if (unp2 == NULL) { 2920 error = ECONNREFUSED; 2921 goto bad2; 2922 } 2923 so2 = unp2->unp_socket; 2924 if (so->so_type != so2->so_type) { 2925 error = EPROTOTYPE; 2926 goto bad2; 2927 } 2928 if (connreq) { 2929 if (SOLISTENING(so2)) 2930 so2 = solisten_clone(so2); 2931 else 2932 so2 = NULL; 2933 if (so2 == NULL) { 2934 error = ECONNREFUSED; 2935 goto bad2; 2936 } 2937 if ((error = uipc_attach(so2, 0, NULL)) != 0) { 2938 sodealloc(so2); 2939 goto bad2; 2940 } 2941 unp3 = sotounpcb(so2); 2942 unp_pcb_lock_pair(unp2, unp3); 2943 if (unp2->unp_addr != NULL) { 2944 bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len); 2945 unp3->unp_addr = (struct sockaddr_un *) sa; 2946 sa = NULL; 2947 } 2948 2949 unp_copy_peercred(td, unp3, unp, unp2); 2950 2951 UNP_PCB_UNLOCK(unp2); 2952 unp2 = unp3; 2953 2954 /* 2955 * It is safe to block on the PCB lock here since unp2 is 2956 * nascent and cannot be connected to any other sockets. 2957 */ 2958 UNP_PCB_LOCK(unp); 2959 #ifdef MAC 2960 mac_socketpeer_set_from_socket(so, so2); 2961 mac_socketpeer_set_from_socket(so2, so); 2962 #endif 2963 } else { 2964 unp_pcb_lock_pair(unp, unp2); 2965 } 2966 KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 && 2967 sotounpcb(so2) == unp2, 2968 ("%s: unp2 %p so2 %p", __func__, unp2, so2)); 2969 unp_connect2(so, so2, connreq); 2970 if (connreq) 2971 (void)solisten_enqueue(so2, SS_ISCONNECTED); 2972 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0, 2973 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp)); 2974 unp->unp_flags &= ~UNP_CONNECTING; 2975 if (!return_locked) 2976 unp_pcb_unlock_pair(unp, unp2); 2977 bad2: 2978 mtx_unlock(vplock); 2979 bad: 2980 if (vp != NULL) { 2981 /* 2982 * If we are returning locked (called via uipc_sosend_dgram()), 2983 * we need to be sure that vput() won't sleep. This is 2984 * guaranteed by VOP_UNP_CONNECT() call above and unp2 lock. 2985 * SOCK_STREAM/SEQPACKET can't request return_locked (yet). 2986 */ 2987 MPASS(!(return_locked && connreq)); 2988 vput(vp); 2989 } 2990 free(sa, M_SONAME); 2991 if (__predict_false(error)) { 2992 UNP_PCB_LOCK(unp); 2993 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0, 2994 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp)); 2995 unp->unp_flags &= ~UNP_CONNECTING; 2996 UNP_PCB_UNLOCK(unp); 2997 } 2998 return (error); 2999 } 3000 3001 /* 3002 * Set socket peer credentials at connection time. 3003 * 3004 * The client's PCB credentials are copied from its process structure. The 3005 * server's PCB credentials are copied from the socket on which it called 3006 * listen(2). uipc_listen cached that process's credentials at the time. 3007 */ 3008 void 3009 unp_copy_peercred(struct thread *td, struct unpcb *client_unp, 3010 struct unpcb *server_unp, struct unpcb *listen_unp) 3011 { 3012 cru2xt(td, &client_unp->unp_peercred); 3013 client_unp->unp_flags |= UNP_HAVEPC; 3014 3015 memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred, 3016 sizeof(server_unp->unp_peercred)); 3017 server_unp->unp_flags |= UNP_HAVEPC; 3018 client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK); 3019 } 3020 3021 /* 3022 * unix/stream & unix/seqpacket version of soisconnected(). 3023 * 3024 * The crucial thing we are doing here is setting up the uxst_peer linkage, 3025 * holding unp and receive buffer locks of the both sockets. The disconnect 3026 * procedure does the same. This gives as a safe way to access the peer in the 3027 * send(2) and recv(2) during the socket lifetime. 3028 * 3029 * The less important thing is event notification of the fact that a socket is 3030 * now connected. It is unusual for a software to put a socket into event 3031 * mechanism before connect(2), but is supposed to be supported. Note that 3032 * there can not be any sleeping I/O on the socket, yet, only presence in the 3033 * select/poll/kevent. 3034 * 3035 * This function can be called via two call paths: 3036 * 1) socketpair(2) - in this case socket has not been yet reported to userland 3037 * and just can't have any event notifications mechanisms set up. The 3038 * 'wakeup' boolean is always false. 3039 * 2) connect(2) of existing socket to a recent clone of a listener: 3040 * 2.1) Socket that connect(2)s will have 'wakeup' true. An application 3041 * could have already put it into event mechanism, is it shall be 3042 * reported as readable and as writable. 3043 * 2.2) Socket that was just cloned with solisten_clone(). Same as 1). 3044 */ 3045 static void 3046 unp_soisconnected(struct socket *so, bool wakeup) 3047 { 3048 struct socket *so2 = sotounpcb(so)->unp_conn->unp_socket; 3049 struct sockbuf *sb; 3050 3051 SOCK_LOCK_ASSERT(so); 3052 UNP_PCB_LOCK_ASSERT(sotounpcb(so)); 3053 UNP_PCB_LOCK_ASSERT(sotounpcb(so2)); 3054 SOCK_RECVBUF_LOCK_ASSERT(so); 3055 SOCK_RECVBUF_LOCK_ASSERT(so2); 3056 3057 MPASS(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET); 3058 MPASS((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 3059 SS_ISDISCONNECTING)) == 0); 3060 MPASS(so->so_qstate == SQ_NONE); 3061 3062 so->so_state &= ~SS_ISDISCONNECTED; 3063 so->so_state |= SS_ISCONNECTED; 3064 3065 sb = &so2->so_rcv; 3066 sb->uxst_peer = so; 3067 3068 if (wakeup) { 3069 KNOTE_LOCKED(&sb->sb_sel->si_note, 0); 3070 sb = &so->so_rcv; 3071 selwakeuppri(sb->sb_sel, PSOCK); 3072 SOCK_SENDBUF_LOCK_ASSERT(so); 3073 sb = &so->so_snd; 3074 selwakeuppri(sb->sb_sel, PSOCK); 3075 SOCK_SENDBUF_UNLOCK(so); 3076 } 3077 } 3078 3079 static void 3080 unp_connect2(struct socket *so, struct socket *so2, bool wakeup) 3081 { 3082 struct unpcb *unp; 3083 struct unpcb *unp2; 3084 3085 MPASS(so2->so_type == so->so_type); 3086 unp = sotounpcb(so); 3087 KASSERT(unp != NULL, ("unp_connect2: unp == NULL")); 3088 unp2 = sotounpcb(so2); 3089 KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL")); 3090 3091 UNP_PCB_LOCK_ASSERT(unp); 3092 UNP_PCB_LOCK_ASSERT(unp2); 3093 KASSERT(unp->unp_conn == NULL, 3094 ("%s: socket %p is already connected", __func__, unp)); 3095 3096 unp->unp_conn = unp2; 3097 unp_pcb_hold(unp2); 3098 unp_pcb_hold(unp); 3099 switch (so->so_type) { 3100 case SOCK_DGRAM: 3101 UNP_REF_LIST_LOCK(); 3102 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); 3103 UNP_REF_LIST_UNLOCK(); 3104 soisconnected(so); 3105 break; 3106 3107 case SOCK_STREAM: 3108 case SOCK_SEQPACKET: 3109 KASSERT(unp2->unp_conn == NULL, 3110 ("%s: socket %p is already connected", __func__, unp2)); 3111 unp2->unp_conn = unp; 3112 SOCK_LOCK(so); 3113 SOCK_LOCK(so2); 3114 if (wakeup) /* Avoid LOR with receive buffer lock. */ 3115 SOCK_SENDBUF_LOCK(so); 3116 SOCK_RECVBUF_LOCK(so); 3117 SOCK_RECVBUF_LOCK(so2); 3118 unp_soisconnected(so, wakeup); /* Will unlock send buffer. */ 3119 unp_soisconnected(so2, false); 3120 SOCK_RECVBUF_UNLOCK(so); 3121 SOCK_RECVBUF_UNLOCK(so2); 3122 SOCK_UNLOCK(so); 3123 SOCK_UNLOCK(so2); 3124 break; 3125 3126 default: 3127 panic("unp_connect2"); 3128 } 3129 } 3130 3131 static void 3132 unp_soisdisconnected(struct socket *so) 3133 { 3134 SOCK_LOCK_ASSERT(so); 3135 SOCK_RECVBUF_LOCK_ASSERT(so); 3136 MPASS(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET); 3137 MPASS(!SOLISTENING(so)); 3138 MPASS((so->so_state & (SS_ISCONNECTING | SS_ISDISCONNECTING | 3139 SS_ISDISCONNECTED)) == 0); 3140 MPASS(so->so_state & SS_ISCONNECTED); 3141 3142 so->so_state |= SS_ISDISCONNECTED; 3143 so->so_state &= ~SS_ISCONNECTED; 3144 so->so_rcv.uxst_peer = NULL; 3145 socantrcvmore_locked(so); 3146 } 3147 3148 static void 3149 unp_disconnect(struct unpcb *unp, struct unpcb *unp2) 3150 { 3151 struct socket *so, *so2; 3152 struct mbuf *m = NULL; 3153 #ifdef INVARIANTS 3154 struct unpcb *unptmp; 3155 #endif 3156 3157 UNP_PCB_LOCK_ASSERT(unp); 3158 UNP_PCB_LOCK_ASSERT(unp2); 3159 KASSERT(unp->unp_conn == unp2, 3160 ("%s: unpcb %p is not connected to %p", __func__, unp, unp2)); 3161 3162 unp->unp_conn = NULL; 3163 so = unp->unp_socket; 3164 so2 = unp2->unp_socket; 3165 switch (unp->unp_socket->so_type) { 3166 case SOCK_DGRAM: 3167 /* 3168 * Remove our send socket buffer from the peer's receive buffer. 3169 * Move the data to the receive buffer only if it is empty. 3170 * This is a protection against a scenario where a peer 3171 * connects, floods and disconnects, effectively blocking 3172 * sendto() from unconnected sockets. 3173 */ 3174 SOCK_RECVBUF_LOCK(so2); 3175 if (!STAILQ_EMPTY(&so->so_snd.uxdg_mb)) { 3176 TAILQ_REMOVE(&so2->so_rcv.uxdg_conns, &so->so_snd, 3177 uxdg_clist); 3178 if (__predict_true((so2->so_rcv.sb_state & 3179 SBS_CANTRCVMORE) == 0) && 3180 STAILQ_EMPTY(&so2->so_rcv.uxdg_mb)) { 3181 STAILQ_CONCAT(&so2->so_rcv.uxdg_mb, 3182 &so->so_snd.uxdg_mb); 3183 so2->so_rcv.uxdg_cc += so->so_snd.uxdg_cc; 3184 so2->so_rcv.uxdg_ctl += so->so_snd.uxdg_ctl; 3185 so2->so_rcv.uxdg_mbcnt += so->so_snd.uxdg_mbcnt; 3186 } else { 3187 m = STAILQ_FIRST(&so->so_snd.uxdg_mb); 3188 STAILQ_INIT(&so->so_snd.uxdg_mb); 3189 so2->so_rcv.sb_acc -= so->so_snd.uxdg_cc; 3190 so2->so_rcv.sb_ccc -= so->so_snd.uxdg_cc; 3191 so2->so_rcv.sb_ctl -= so->so_snd.uxdg_ctl; 3192 so2->so_rcv.sb_mbcnt -= so->so_snd.uxdg_mbcnt; 3193 } 3194 /* Note: so may reconnect. */ 3195 so->so_snd.uxdg_cc = 0; 3196 so->so_snd.uxdg_ctl = 0; 3197 so->so_snd.uxdg_mbcnt = 0; 3198 } 3199 SOCK_RECVBUF_UNLOCK(so2); 3200 UNP_REF_LIST_LOCK(); 3201 #ifdef INVARIANTS 3202 LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) { 3203 if (unptmp == unp) 3204 break; 3205 } 3206 KASSERT(unptmp != NULL, 3207 ("%s: %p not found in reflist of %p", __func__, unp, unp2)); 3208 #endif 3209 LIST_REMOVE(unp, unp_reflink); 3210 UNP_REF_LIST_UNLOCK(); 3211 if (so) { 3212 SOCK_LOCK(so); 3213 so->so_state &= ~SS_ISCONNECTED; 3214 SOCK_UNLOCK(so); 3215 } 3216 break; 3217 3218 case SOCK_STREAM: 3219 case SOCK_SEQPACKET: 3220 SOCK_LOCK(so); 3221 SOCK_LOCK(so2); 3222 SOCK_RECVBUF_LOCK(so); 3223 SOCK_RECVBUF_LOCK(so2); 3224 unp_soisdisconnected(so); 3225 MPASS(unp2->unp_conn == unp); 3226 unp2->unp_conn = NULL; 3227 unp_soisdisconnected(so2); 3228 SOCK_UNLOCK(so); 3229 SOCK_UNLOCK(so2); 3230 break; 3231 } 3232 3233 if (unp == unp2) { 3234 unp_pcb_rele_notlast(unp); 3235 if (!unp_pcb_rele(unp)) 3236 UNP_PCB_UNLOCK(unp); 3237 } else { 3238 if (!unp_pcb_rele(unp)) 3239 UNP_PCB_UNLOCK(unp); 3240 if (!unp_pcb_rele(unp2)) 3241 UNP_PCB_UNLOCK(unp2); 3242 } 3243 3244 if (m != NULL) { 3245 unp_scan(m, unp_freerights); 3246 m_freemp(m); 3247 } 3248 } 3249 3250 /* 3251 * unp_pcblist() walks the global list of struct unpcb's to generate a 3252 * pointer list, bumping the refcount on each unpcb. It then copies them out 3253 * sequentially, validating the generation number on each to see if it has 3254 * been detached. All of this is necessary because copyout() may sleep on 3255 * disk I/O. 3256 */ 3257 static int 3258 unp_pcblist(SYSCTL_HANDLER_ARGS) 3259 { 3260 struct unpcb *unp, **unp_list; 3261 unp_gen_t gencnt; 3262 struct xunpgen *xug; 3263 struct unp_head *head; 3264 struct xunpcb *xu; 3265 u_int i; 3266 int error, n; 3267 3268 switch ((intptr_t)arg1) { 3269 case SOCK_STREAM: 3270 head = &unp_shead; 3271 break; 3272 3273 case SOCK_DGRAM: 3274 head = &unp_dhead; 3275 break; 3276 3277 case SOCK_SEQPACKET: 3278 head = &unp_sphead; 3279 break; 3280 3281 default: 3282 panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1); 3283 } 3284 3285 /* 3286 * The process of preparing the PCB list is too time-consuming and 3287 * resource-intensive to repeat twice on every request. 3288 */ 3289 if (req->oldptr == NULL) { 3290 n = unp_count; 3291 req->oldidx = 2 * (sizeof *xug) 3292 + (n + n/8) * sizeof(struct xunpcb); 3293 return (0); 3294 } 3295 3296 if (req->newptr != NULL) 3297 return (EPERM); 3298 3299 /* 3300 * OK, now we're committed to doing something. 3301 */ 3302 xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO); 3303 UNP_LINK_RLOCK(); 3304 gencnt = unp_gencnt; 3305 n = unp_count; 3306 UNP_LINK_RUNLOCK(); 3307 3308 xug->xug_len = sizeof *xug; 3309 xug->xug_count = n; 3310 xug->xug_gen = gencnt; 3311 xug->xug_sogen = so_gencnt; 3312 error = SYSCTL_OUT(req, xug, sizeof *xug); 3313 if (error) { 3314 free(xug, M_TEMP); 3315 return (error); 3316 } 3317 3318 unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK); 3319 3320 UNP_LINK_RLOCK(); 3321 for (unp = LIST_FIRST(head), i = 0; unp && i < n; 3322 unp = LIST_NEXT(unp, unp_link)) { 3323 UNP_PCB_LOCK(unp); 3324 if (unp->unp_gencnt <= gencnt) { 3325 if (cr_cansee(req->td->td_ucred, 3326 unp->unp_socket->so_cred)) { 3327 UNP_PCB_UNLOCK(unp); 3328 continue; 3329 } 3330 unp_list[i++] = unp; 3331 unp_pcb_hold(unp); 3332 } 3333 UNP_PCB_UNLOCK(unp); 3334 } 3335 UNP_LINK_RUNLOCK(); 3336 n = i; /* In case we lost some during malloc. */ 3337 3338 error = 0; 3339 xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO); 3340 for (i = 0; i < n; i++) { 3341 unp = unp_list[i]; 3342 UNP_PCB_LOCK(unp); 3343 if (unp_pcb_rele(unp)) 3344 continue; 3345 3346 if (unp->unp_gencnt <= gencnt) { 3347 xu->xu_len = sizeof *xu; 3348 xu->xu_unpp = (uintptr_t)unp; 3349 /* 3350 * XXX - need more locking here to protect against 3351 * connect/disconnect races for SMP. 3352 */ 3353 if (unp->unp_addr != NULL) 3354 bcopy(unp->unp_addr, &xu->xu_addr, 3355 unp->unp_addr->sun_len); 3356 else 3357 bzero(&xu->xu_addr, sizeof(xu->xu_addr)); 3358 if (unp->unp_conn != NULL && 3359 unp->unp_conn->unp_addr != NULL) 3360 bcopy(unp->unp_conn->unp_addr, 3361 &xu->xu_caddr, 3362 unp->unp_conn->unp_addr->sun_len); 3363 else 3364 bzero(&xu->xu_caddr, sizeof(xu->xu_caddr)); 3365 xu->unp_vnode = (uintptr_t)unp->unp_vnode; 3366 xu->unp_conn = (uintptr_t)unp->unp_conn; 3367 xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs); 3368 xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink); 3369 xu->unp_gencnt = unp->unp_gencnt; 3370 sotoxsocket(unp->unp_socket, &xu->xu_socket); 3371 UNP_PCB_UNLOCK(unp); 3372 error = SYSCTL_OUT(req, xu, sizeof *xu); 3373 } else { 3374 UNP_PCB_UNLOCK(unp); 3375 } 3376 } 3377 free(xu, M_TEMP); 3378 if (!error) { 3379 /* 3380 * Give the user an updated idea of our state. If the 3381 * generation differs from what we told her before, she knows 3382 * that something happened while we were processing this 3383 * request, and it might be necessary to retry. 3384 */ 3385 xug->xug_gen = unp_gencnt; 3386 xug->xug_sogen = so_gencnt; 3387 xug->xug_count = unp_count; 3388 error = SYSCTL_OUT(req, xug, sizeof *xug); 3389 } 3390 free(unp_list, M_TEMP); 3391 free(xug, M_TEMP); 3392 return (error); 3393 } 3394 3395 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, 3396 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3397 (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", 3398 "List of active local datagram sockets"); 3399 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, 3400 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3401 (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", 3402 "List of active local stream sockets"); 3403 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, 3404 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3405 (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb", 3406 "List of active local seqpacket sockets"); 3407 3408 static void 3409 unp_drop(struct unpcb *unp) 3410 { 3411 struct socket *so; 3412 struct unpcb *unp2; 3413 3414 /* 3415 * Regardless of whether the socket's peer dropped the connection 3416 * with this socket by aborting or disconnecting, POSIX requires 3417 * that ECONNRESET is returned on next connected send(2) in case of 3418 * a SOCK_DGRAM socket and EPIPE for SOCK_STREAM. 3419 */ 3420 UNP_PCB_LOCK(unp); 3421 if ((so = unp->unp_socket) != NULL) 3422 so->so_error = 3423 so->so_proto->pr_type == SOCK_DGRAM ? ECONNRESET : EPIPE; 3424 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) { 3425 /* Last reference dropped in unp_disconnect(). */ 3426 unp_pcb_rele_notlast(unp); 3427 unp_disconnect(unp, unp2); 3428 } else if (!unp_pcb_rele(unp)) { 3429 UNP_PCB_UNLOCK(unp); 3430 } 3431 } 3432 3433 static void 3434 unp_freerights(struct filedescent **fdep, int fdcount) 3435 { 3436 struct file *fp; 3437 int i; 3438 3439 KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount)); 3440 3441 for (i = 0; i < fdcount; i++) { 3442 fp = fdep[i]->fde_file; 3443 filecaps_free(&fdep[i]->fde_caps); 3444 unp_discard(fp); 3445 } 3446 free(fdep[0], M_FILECAPS); 3447 } 3448 3449 static int 3450 unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags) 3451 { 3452 struct thread *td = curthread; /* XXX */ 3453 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 3454 int i; 3455 int *fdp; 3456 struct filedesc *fdesc = td->td_proc->p_fd; 3457 struct filedescent **fdep; 3458 void *data; 3459 socklen_t clen = control->m_len, datalen; 3460 int error, newfds; 3461 u_int newlen; 3462 3463 UNP_LINK_UNLOCK_ASSERT(); 3464 3465 error = 0; 3466 if (controlp != NULL) /* controlp == NULL => free control messages */ 3467 *controlp = NULL; 3468 while (cm != NULL) { 3469 MPASS(clen >= sizeof(*cm) && clen >= cm->cmsg_len); 3470 3471 data = CMSG_DATA(cm); 3472 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; 3473 if (cm->cmsg_level == SOL_SOCKET 3474 && cm->cmsg_type == SCM_RIGHTS) { 3475 newfds = datalen / sizeof(*fdep); 3476 if (newfds == 0) 3477 goto next; 3478 fdep = data; 3479 3480 /* If we're not outputting the descriptors free them. */ 3481 if (error || controlp == NULL) { 3482 unp_freerights(fdep, newfds); 3483 goto next; 3484 } 3485 FILEDESC_XLOCK(fdesc); 3486 3487 /* 3488 * Now change each pointer to an fd in the global 3489 * table to an integer that is the index to the local 3490 * fd table entry that we set up to point to the 3491 * global one we are transferring. 3492 */ 3493 newlen = newfds * sizeof(int); 3494 *controlp = sbcreatecontrol(NULL, newlen, 3495 SCM_RIGHTS, SOL_SOCKET, M_WAITOK); 3496 3497 fdp = (int *) 3498 CMSG_DATA(mtod(*controlp, struct cmsghdr *)); 3499 if ((error = fdallocn(td, 0, fdp, newfds))) { 3500 FILEDESC_XUNLOCK(fdesc); 3501 unp_freerights(fdep, newfds); 3502 m_freem(*controlp); 3503 *controlp = NULL; 3504 goto next; 3505 } 3506 for (i = 0; i < newfds; i++, fdp++) { 3507 _finstall(fdesc, fdep[i]->fde_file, *fdp, 3508 (flags & MSG_CMSG_CLOEXEC) != 0 ? O_CLOEXEC : 0, 3509 &fdep[i]->fde_caps); 3510 unp_externalize_fp(fdep[i]->fde_file); 3511 } 3512 3513 /* 3514 * The new type indicates that the mbuf data refers to 3515 * kernel resources that may need to be released before 3516 * the mbuf is freed. 3517 */ 3518 m_chtype(*controlp, MT_EXTCONTROL); 3519 FILEDESC_XUNLOCK(fdesc); 3520 free(fdep[0], M_FILECAPS); 3521 } else { 3522 /* We can just copy anything else across. */ 3523 if (error || controlp == NULL) 3524 goto next; 3525 *controlp = sbcreatecontrol(NULL, datalen, 3526 cm->cmsg_type, cm->cmsg_level, M_WAITOK); 3527 bcopy(data, 3528 CMSG_DATA(mtod(*controlp, struct cmsghdr *)), 3529 datalen); 3530 } 3531 controlp = &(*controlp)->m_next; 3532 3533 next: 3534 if (CMSG_SPACE(datalen) < clen) { 3535 clen -= CMSG_SPACE(datalen); 3536 cm = (struct cmsghdr *) 3537 ((caddr_t)cm + CMSG_SPACE(datalen)); 3538 } else { 3539 clen = 0; 3540 cm = NULL; 3541 } 3542 } 3543 3544 m_freem(control); 3545 return (error); 3546 } 3547 3548 static void 3549 unp_zone_change(void *tag) 3550 { 3551 3552 uma_zone_set_max(unp_zone, maxsockets); 3553 } 3554 3555 #ifdef INVARIANTS 3556 static void 3557 unp_zdtor(void *mem, int size __unused, void *arg __unused) 3558 { 3559 struct unpcb *unp; 3560 3561 unp = mem; 3562 3563 KASSERT(LIST_EMPTY(&unp->unp_refs), 3564 ("%s: unpcb %p has lingering refs", __func__, unp)); 3565 KASSERT(unp->unp_socket == NULL, 3566 ("%s: unpcb %p has socket backpointer", __func__, unp)); 3567 KASSERT(unp->unp_vnode == NULL, 3568 ("%s: unpcb %p has vnode references", __func__, unp)); 3569 KASSERT(unp->unp_conn == NULL, 3570 ("%s: unpcb %p is still connected", __func__, unp)); 3571 KASSERT(unp->unp_addr == NULL, 3572 ("%s: unpcb %p has leaked addr", __func__, unp)); 3573 } 3574 #endif 3575 3576 static void 3577 unp_init(void *arg __unused) 3578 { 3579 uma_dtor dtor; 3580 3581 #ifdef INVARIANTS 3582 dtor = unp_zdtor; 3583 #else 3584 dtor = NULL; 3585 #endif 3586 unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor, 3587 NULL, NULL, UMA_ALIGN_CACHE, 0); 3588 uma_zone_set_max(unp_zone, maxsockets); 3589 uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached"); 3590 EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change, 3591 NULL, EVENTHANDLER_PRI_ANY); 3592 LIST_INIT(&unp_dhead); 3593 LIST_INIT(&unp_shead); 3594 LIST_INIT(&unp_sphead); 3595 SLIST_INIT(&unp_defers); 3596 TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL); 3597 TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL); 3598 UNP_LINK_LOCK_INIT(); 3599 UNP_DEFERRED_LOCK_INIT(); 3600 unp_vp_mtxpool = mtx_pool_create("unp vp mtxpool", 32, MTX_DEF); 3601 } 3602 SYSINIT(unp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, unp_init, NULL); 3603 3604 static void 3605 unp_internalize_cleanup_rights(struct mbuf *control) 3606 { 3607 struct cmsghdr *cp; 3608 struct mbuf *m; 3609 void *data; 3610 socklen_t datalen; 3611 3612 for (m = control; m != NULL; m = m->m_next) { 3613 cp = mtod(m, struct cmsghdr *); 3614 if (cp->cmsg_level != SOL_SOCKET || 3615 cp->cmsg_type != SCM_RIGHTS) 3616 continue; 3617 data = CMSG_DATA(cp); 3618 datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data; 3619 unp_freerights(data, datalen / sizeof(struct filedesc *)); 3620 } 3621 } 3622 3623 static int 3624 unp_internalize(struct mbuf *control, struct mchain *mc, struct thread *td) 3625 { 3626 struct proc *p; 3627 struct filedesc *fdesc; 3628 struct bintime *bt; 3629 struct cmsghdr *cm; 3630 struct cmsgcred *cmcred; 3631 struct mbuf *m; 3632 struct filedescent *fde, **fdep, *fdev; 3633 struct file *fp; 3634 struct timeval *tv; 3635 struct timespec *ts; 3636 void *data; 3637 socklen_t clen, datalen; 3638 int i, j, error, *fdp, oldfds; 3639 u_int newlen; 3640 3641 MPASS(control->m_next == NULL); /* COMPAT_OLDSOCK may violate */ 3642 UNP_LINK_UNLOCK_ASSERT(); 3643 3644 p = td->td_proc; 3645 fdesc = p->p_fd; 3646 error = 0; 3647 *mc = MCHAIN_INITIALIZER(mc); 3648 for (clen = control->m_len, cm = mtod(control, struct cmsghdr *), 3649 data = CMSG_DATA(cm); 3650 3651 clen >= sizeof(*cm) && cm->cmsg_level == SOL_SOCKET && 3652 clen >= cm->cmsg_len && cm->cmsg_len >= sizeof(*cm) && 3653 (char *)cm + cm->cmsg_len >= (char *)data; 3654 3655 clen -= min(CMSG_SPACE(datalen), clen), 3656 cm = (struct cmsghdr *) ((char *)cm + CMSG_SPACE(datalen)), 3657 data = CMSG_DATA(cm)) { 3658 datalen = (char *)cm + cm->cmsg_len - (char *)data; 3659 switch (cm->cmsg_type) { 3660 case SCM_CREDS: 3661 m = sbcreatecontrol(NULL, sizeof(*cmcred), SCM_CREDS, 3662 SOL_SOCKET, M_WAITOK); 3663 cmcred = (struct cmsgcred *) 3664 CMSG_DATA(mtod(m, struct cmsghdr *)); 3665 cmcred->cmcred_pid = p->p_pid; 3666 cmcred->cmcred_uid = td->td_ucred->cr_ruid; 3667 cmcred->cmcred_gid = td->td_ucred->cr_rgid; 3668 cmcred->cmcred_euid = td->td_ucred->cr_uid; 3669 cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups, 3670 CMGROUP_MAX); 3671 for (i = 0; i < cmcred->cmcred_ngroups; i++) 3672 cmcred->cmcred_groups[i] = 3673 td->td_ucred->cr_groups[i]; 3674 break; 3675 3676 case SCM_RIGHTS: 3677 oldfds = datalen / sizeof (int); 3678 if (oldfds == 0) 3679 continue; 3680 /* On some machines sizeof pointer is bigger than 3681 * sizeof int, so we need to check if data fits into 3682 * single mbuf. We could allocate several mbufs, and 3683 * unp_externalize() should even properly handle that. 3684 * But it is not worth to complicate the code for an 3685 * insane scenario of passing over 200 file descriptors 3686 * at once. 3687 */ 3688 newlen = oldfds * sizeof(fdep[0]); 3689 if (CMSG_SPACE(newlen) > MCLBYTES) { 3690 error = EMSGSIZE; 3691 goto out; 3692 } 3693 /* 3694 * Check that all the FDs passed in refer to legal 3695 * files. If not, reject the entire operation. 3696 */ 3697 fdp = data; 3698 FILEDESC_SLOCK(fdesc); 3699 for (i = 0; i < oldfds; i++, fdp++) { 3700 fp = fget_noref(fdesc, *fdp); 3701 if (fp == NULL) { 3702 FILEDESC_SUNLOCK(fdesc); 3703 error = EBADF; 3704 goto out; 3705 } 3706 if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) { 3707 FILEDESC_SUNLOCK(fdesc); 3708 error = EOPNOTSUPP; 3709 goto out; 3710 } 3711 } 3712 3713 /* 3714 * Now replace the integer FDs with pointers to the 3715 * file structure and capability rights. 3716 */ 3717 m = sbcreatecontrol(NULL, newlen, SCM_RIGHTS, 3718 SOL_SOCKET, M_WAITOK); 3719 fdp = data; 3720 for (i = 0; i < oldfds; i++, fdp++) { 3721 if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) { 3722 fdp = data; 3723 for (j = 0; j < i; j++, fdp++) { 3724 fdrop(fdesc->fd_ofiles[*fdp]. 3725 fde_file, td); 3726 } 3727 FILEDESC_SUNLOCK(fdesc); 3728 error = EBADF; 3729 goto out; 3730 } 3731 } 3732 fdp = data; 3733 fdep = (struct filedescent **) 3734 CMSG_DATA(mtod(m, struct cmsghdr *)); 3735 fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS, 3736 M_WAITOK); 3737 for (i = 0; i < oldfds; i++, fdev++, fdp++) { 3738 fde = &fdesc->fd_ofiles[*fdp]; 3739 fdep[i] = fdev; 3740 fdep[i]->fde_file = fde->fde_file; 3741 filecaps_copy(&fde->fde_caps, 3742 &fdep[i]->fde_caps, true); 3743 unp_internalize_fp(fdep[i]->fde_file); 3744 } 3745 FILEDESC_SUNLOCK(fdesc); 3746 break; 3747 3748 case SCM_TIMESTAMP: 3749 m = sbcreatecontrol(NULL, sizeof(*tv), SCM_TIMESTAMP, 3750 SOL_SOCKET, M_WAITOK); 3751 tv = (struct timeval *) 3752 CMSG_DATA(mtod(m, struct cmsghdr *)); 3753 microtime(tv); 3754 break; 3755 3756 case SCM_BINTIME: 3757 m = sbcreatecontrol(NULL, sizeof(*bt), SCM_BINTIME, 3758 SOL_SOCKET, M_WAITOK); 3759 bt = (struct bintime *) 3760 CMSG_DATA(mtod(m, struct cmsghdr *)); 3761 bintime(bt); 3762 break; 3763 3764 case SCM_REALTIME: 3765 m = sbcreatecontrol(NULL, sizeof(*ts), SCM_REALTIME, 3766 SOL_SOCKET, M_WAITOK); 3767 ts = (struct timespec *) 3768 CMSG_DATA(mtod(m, struct cmsghdr *)); 3769 nanotime(ts); 3770 break; 3771 3772 case SCM_MONOTONIC: 3773 m = sbcreatecontrol(NULL, sizeof(*ts), SCM_MONOTONIC, 3774 SOL_SOCKET, M_WAITOK); 3775 ts = (struct timespec *) 3776 CMSG_DATA(mtod(m, struct cmsghdr *)); 3777 nanouptime(ts); 3778 break; 3779 3780 default: 3781 error = EINVAL; 3782 goto out; 3783 } 3784 3785 mc_append(mc, m); 3786 } 3787 if (clen > 0) 3788 error = EINVAL; 3789 3790 out: 3791 if (error != 0) 3792 unp_internalize_cleanup_rights(mc_first(mc)); 3793 m_freem(control); 3794 return (error); 3795 } 3796 3797 static void 3798 unp_addsockcred(struct thread *td, struct mchain *mc, int mode) 3799 { 3800 struct mbuf *m, *n, *n_prev; 3801 const struct cmsghdr *cm; 3802 int ngroups, i, cmsgtype; 3803 size_t ctrlsz; 3804 3805 ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX); 3806 if (mode & UNP_WANTCRED_ALWAYS) { 3807 ctrlsz = SOCKCRED2SIZE(ngroups); 3808 cmsgtype = SCM_CREDS2; 3809 } else { 3810 ctrlsz = SOCKCREDSIZE(ngroups); 3811 cmsgtype = SCM_CREDS; 3812 } 3813 3814 /* XXXGL: uipc_sosend_*() need to be improved so that we can M_WAITOK */ 3815 m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET, M_NOWAIT); 3816 if (m == NULL) 3817 return; 3818 MPASS((m->m_flags & M_EXT) == 0 && m->m_next == NULL); 3819 3820 if (mode & UNP_WANTCRED_ALWAYS) { 3821 struct sockcred2 *sc; 3822 3823 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *)); 3824 sc->sc_version = 0; 3825 sc->sc_pid = td->td_proc->p_pid; 3826 sc->sc_uid = td->td_ucred->cr_ruid; 3827 sc->sc_euid = td->td_ucred->cr_uid; 3828 sc->sc_gid = td->td_ucred->cr_rgid; 3829 sc->sc_egid = td->td_ucred->cr_gid; 3830 sc->sc_ngroups = ngroups; 3831 for (i = 0; i < sc->sc_ngroups; i++) 3832 sc->sc_groups[i] = td->td_ucred->cr_groups[i]; 3833 } else { 3834 struct sockcred *sc; 3835 3836 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *)); 3837 sc->sc_uid = td->td_ucred->cr_ruid; 3838 sc->sc_euid = td->td_ucred->cr_uid; 3839 sc->sc_gid = td->td_ucred->cr_rgid; 3840 sc->sc_egid = td->td_ucred->cr_gid; 3841 sc->sc_ngroups = ngroups; 3842 for (i = 0; i < sc->sc_ngroups; i++) 3843 sc->sc_groups[i] = td->td_ucred->cr_groups[i]; 3844 } 3845 3846 /* 3847 * Unlink SCM_CREDS control messages (struct cmsgcred), since just 3848 * created SCM_CREDS control message (struct sockcred) has another 3849 * format. 3850 */ 3851 if (!STAILQ_EMPTY(&mc->mc_q) && cmsgtype == SCM_CREDS) 3852 STAILQ_FOREACH_SAFE(n, &mc->mc_q, m_stailq, n_prev) { 3853 cm = mtod(n, struct cmsghdr *); 3854 if (cm->cmsg_level == SOL_SOCKET && 3855 cm->cmsg_type == SCM_CREDS) { 3856 mc_remove(mc, n); 3857 m_free(n); 3858 } 3859 } 3860 3861 /* Prepend it to the head. */ 3862 mc_prepend(mc, m); 3863 } 3864 3865 static struct unpcb * 3866 fptounp(struct file *fp) 3867 { 3868 struct socket *so; 3869 3870 if (fp->f_type != DTYPE_SOCKET) 3871 return (NULL); 3872 if ((so = fp->f_data) == NULL) 3873 return (NULL); 3874 if (so->so_proto->pr_domain != &localdomain) 3875 return (NULL); 3876 return sotounpcb(so); 3877 } 3878 3879 static void 3880 unp_discard(struct file *fp) 3881 { 3882 struct unp_defer *dr; 3883 3884 if (unp_externalize_fp(fp)) { 3885 dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK); 3886 dr->ud_fp = fp; 3887 UNP_DEFERRED_LOCK(); 3888 SLIST_INSERT_HEAD(&unp_defers, dr, ud_link); 3889 UNP_DEFERRED_UNLOCK(); 3890 atomic_add_int(&unp_defers_count, 1); 3891 taskqueue_enqueue(taskqueue_thread, &unp_defer_task); 3892 } else 3893 closef_nothread(fp); 3894 } 3895 3896 static void 3897 unp_process_defers(void *arg __unused, int pending) 3898 { 3899 struct unp_defer *dr; 3900 SLIST_HEAD(, unp_defer) drl; 3901 int count; 3902 3903 SLIST_INIT(&drl); 3904 for (;;) { 3905 UNP_DEFERRED_LOCK(); 3906 if (SLIST_FIRST(&unp_defers) == NULL) { 3907 UNP_DEFERRED_UNLOCK(); 3908 break; 3909 } 3910 SLIST_SWAP(&unp_defers, &drl, unp_defer); 3911 UNP_DEFERRED_UNLOCK(); 3912 count = 0; 3913 while ((dr = SLIST_FIRST(&drl)) != NULL) { 3914 SLIST_REMOVE_HEAD(&drl, ud_link); 3915 closef_nothread(dr->ud_fp); 3916 free(dr, M_TEMP); 3917 count++; 3918 } 3919 atomic_add_int(&unp_defers_count, -count); 3920 } 3921 } 3922 3923 static void 3924 unp_internalize_fp(struct file *fp) 3925 { 3926 struct unpcb *unp; 3927 3928 UNP_LINK_WLOCK(); 3929 if ((unp = fptounp(fp)) != NULL) { 3930 unp->unp_file = fp; 3931 unp->unp_msgcount++; 3932 } 3933 unp_rights++; 3934 UNP_LINK_WUNLOCK(); 3935 } 3936 3937 static int 3938 unp_externalize_fp(struct file *fp) 3939 { 3940 struct unpcb *unp; 3941 int ret; 3942 3943 UNP_LINK_WLOCK(); 3944 if ((unp = fptounp(fp)) != NULL) { 3945 unp->unp_msgcount--; 3946 ret = 1; 3947 } else 3948 ret = 0; 3949 unp_rights--; 3950 UNP_LINK_WUNLOCK(); 3951 return (ret); 3952 } 3953 3954 /* 3955 * unp_defer indicates whether additional work has been defered for a future 3956 * pass through unp_gc(). It is thread local and does not require explicit 3957 * synchronization. 3958 */ 3959 static int unp_marked; 3960 3961 static void 3962 unp_remove_dead_ref(struct filedescent **fdep, int fdcount) 3963 { 3964 struct unpcb *unp; 3965 struct file *fp; 3966 int i; 3967 3968 /* 3969 * This function can only be called from the gc task. 3970 */ 3971 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0, 3972 ("%s: not on gc callout", __func__)); 3973 UNP_LINK_LOCK_ASSERT(); 3974 3975 for (i = 0; i < fdcount; i++) { 3976 fp = fdep[i]->fde_file; 3977 if ((unp = fptounp(fp)) == NULL) 3978 continue; 3979 if ((unp->unp_gcflag & UNPGC_DEAD) == 0) 3980 continue; 3981 unp->unp_gcrefs--; 3982 } 3983 } 3984 3985 static void 3986 unp_restore_undead_ref(struct filedescent **fdep, int fdcount) 3987 { 3988 struct unpcb *unp; 3989 struct file *fp; 3990 int i; 3991 3992 /* 3993 * This function can only be called from the gc task. 3994 */ 3995 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0, 3996 ("%s: not on gc callout", __func__)); 3997 UNP_LINK_LOCK_ASSERT(); 3998 3999 for (i = 0; i < fdcount; i++) { 4000 fp = fdep[i]->fde_file; 4001 if ((unp = fptounp(fp)) == NULL) 4002 continue; 4003 if ((unp->unp_gcflag & UNPGC_DEAD) == 0) 4004 continue; 4005 unp->unp_gcrefs++; 4006 unp_marked++; 4007 } 4008 } 4009 4010 static void 4011 unp_scan_socket(struct socket *so, void (*op)(struct filedescent **, int)) 4012 { 4013 struct sockbuf *sb; 4014 4015 SOCK_LOCK_ASSERT(so); 4016 4017 if (sotounpcb(so)->unp_gcflag & UNPGC_IGNORE_RIGHTS) 4018 return; 4019 4020 SOCK_RECVBUF_LOCK(so); 4021 switch (so->so_type) { 4022 case SOCK_DGRAM: 4023 unp_scan(STAILQ_FIRST(&so->so_rcv.uxdg_mb), op); 4024 unp_scan(so->so_rcv.uxdg_peeked, op); 4025 TAILQ_FOREACH(sb, &so->so_rcv.uxdg_conns, uxdg_clist) 4026 unp_scan(STAILQ_FIRST(&sb->uxdg_mb), op); 4027 break; 4028 case SOCK_STREAM: 4029 case SOCK_SEQPACKET: 4030 unp_scan(STAILQ_FIRST(&so->so_rcv.uxst_mbq), op); 4031 break; 4032 } 4033 SOCK_RECVBUF_UNLOCK(so); 4034 } 4035 4036 static void 4037 unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int)) 4038 { 4039 struct socket *so, *soa; 4040 4041 so = unp->unp_socket; 4042 SOCK_LOCK(so); 4043 if (SOLISTENING(so)) { 4044 /* 4045 * Mark all sockets in our accept queue. 4046 */ 4047 TAILQ_FOREACH(soa, &so->sol_comp, so_list) 4048 unp_scan_socket(soa, op); 4049 } else { 4050 /* 4051 * Mark all sockets we reference with RIGHTS. 4052 */ 4053 unp_scan_socket(so, op); 4054 } 4055 SOCK_UNLOCK(so); 4056 } 4057 4058 static int unp_recycled; 4059 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0, 4060 "Number of unreachable sockets claimed by the garbage collector."); 4061 4062 static int unp_taskcount; 4063 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0, 4064 "Number of times the garbage collector has run."); 4065 4066 SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0, 4067 "Number of active local sockets."); 4068 4069 static void 4070 unp_gc(__unused void *arg, int pending) 4071 { 4072 struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead, 4073 NULL }; 4074 struct unp_head **head; 4075 struct unp_head unp_deadhead; /* List of potentially-dead sockets. */ 4076 struct file *f, **unref; 4077 struct unpcb *unp, *unptmp; 4078 int i, total, unp_unreachable; 4079 4080 LIST_INIT(&unp_deadhead); 4081 unp_taskcount++; 4082 UNP_LINK_RLOCK(); 4083 /* 4084 * First determine which sockets may be in cycles. 4085 */ 4086 unp_unreachable = 0; 4087 4088 for (head = heads; *head != NULL; head++) 4089 LIST_FOREACH(unp, *head, unp_link) { 4090 KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0, 4091 ("%s: unp %p has unexpected gc flags 0x%x", 4092 __func__, unp, (unsigned int)unp->unp_gcflag)); 4093 4094 f = unp->unp_file; 4095 4096 /* 4097 * Check for an unreachable socket potentially in a 4098 * cycle. It must be in a queue as indicated by 4099 * msgcount, and this must equal the file reference 4100 * count. Note that when msgcount is 0 the file is 4101 * NULL. 4102 */ 4103 if (f != NULL && unp->unp_msgcount != 0 && 4104 refcount_load(&f->f_count) == unp->unp_msgcount) { 4105 LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead); 4106 unp->unp_gcflag |= UNPGC_DEAD; 4107 unp->unp_gcrefs = unp->unp_msgcount; 4108 unp_unreachable++; 4109 } 4110 } 4111 4112 /* 4113 * Scan all sockets previously marked as potentially being in a cycle 4114 * and remove the references each socket holds on any UNPGC_DEAD 4115 * sockets in its queue. After this step, all remaining references on 4116 * sockets marked UNPGC_DEAD should not be part of any cycle. 4117 */ 4118 LIST_FOREACH(unp, &unp_deadhead, unp_dead) 4119 unp_gc_scan(unp, unp_remove_dead_ref); 4120 4121 /* 4122 * If a socket still has a non-negative refcount, it cannot be in a 4123 * cycle. In this case increment refcount of all children iteratively. 4124 * Stop the scan once we do a complete loop without discovering 4125 * a new reachable socket. 4126 */ 4127 do { 4128 unp_marked = 0; 4129 LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp) 4130 if (unp->unp_gcrefs > 0) { 4131 unp->unp_gcflag &= ~UNPGC_DEAD; 4132 LIST_REMOVE(unp, unp_dead); 4133 KASSERT(unp_unreachable > 0, 4134 ("%s: unp_unreachable underflow.", 4135 __func__)); 4136 unp_unreachable--; 4137 unp_gc_scan(unp, unp_restore_undead_ref); 4138 } 4139 } while (unp_marked); 4140 4141 UNP_LINK_RUNLOCK(); 4142 4143 if (unp_unreachable == 0) 4144 return; 4145 4146 /* 4147 * Allocate space for a local array of dead unpcbs. 4148 * TODO: can this path be simplified by instead using the local 4149 * dead list at unp_deadhead, after taking out references 4150 * on the file object and/or unpcb and dropping the link lock? 4151 */ 4152 unref = malloc(unp_unreachable * sizeof(struct file *), 4153 M_TEMP, M_WAITOK); 4154 4155 /* 4156 * Iterate looking for sockets which have been specifically marked 4157 * as unreachable and store them locally. 4158 */ 4159 UNP_LINK_RLOCK(); 4160 total = 0; 4161 LIST_FOREACH(unp, &unp_deadhead, unp_dead) { 4162 KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0, 4163 ("%s: unp %p not marked UNPGC_DEAD", __func__, unp)); 4164 unp->unp_gcflag &= ~UNPGC_DEAD; 4165 f = unp->unp_file; 4166 if (unp->unp_msgcount == 0 || f == NULL || 4167 refcount_load(&f->f_count) != unp->unp_msgcount || 4168 !fhold(f)) 4169 continue; 4170 unref[total++] = f; 4171 KASSERT(total <= unp_unreachable, 4172 ("%s: incorrect unreachable count.", __func__)); 4173 } 4174 UNP_LINK_RUNLOCK(); 4175 4176 /* 4177 * Now flush all sockets, free'ing rights. This will free the 4178 * struct files associated with these sockets but leave each socket 4179 * with one remaining ref. 4180 */ 4181 for (i = 0; i < total; i++) { 4182 struct socket *so; 4183 4184 so = unref[i]->f_data; 4185 CURVNET_SET(so->so_vnet); 4186 socantrcvmore(so); 4187 unp_dispose(so); 4188 CURVNET_RESTORE(); 4189 } 4190 4191 /* 4192 * And finally release the sockets so they can be reclaimed. 4193 */ 4194 for (i = 0; i < total; i++) 4195 fdrop(unref[i], NULL); 4196 unp_recycled += total; 4197 free(unref, M_TEMP); 4198 } 4199 4200 /* 4201 * Synchronize against unp_gc, which can trip over data as we are freeing it. 4202 */ 4203 static void 4204 unp_dispose(struct socket *so) 4205 { 4206 struct sockbuf *sb; 4207 struct unpcb *unp; 4208 struct mbuf *m; 4209 int error __diagused; 4210 4211 MPASS(!SOLISTENING(so)); 4212 4213 unp = sotounpcb(so); 4214 UNP_LINK_WLOCK(); 4215 unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS; 4216 UNP_LINK_WUNLOCK(); 4217 4218 /* 4219 * Grab our special mbufs before calling sbrelease(). 4220 */ 4221 error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR); 4222 MPASS(!error); 4223 SOCK_RECVBUF_LOCK(so); 4224 switch (so->so_type) { 4225 case SOCK_DGRAM: 4226 while ((sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) != NULL) { 4227 STAILQ_CONCAT(&so->so_rcv.uxdg_mb, &sb->uxdg_mb); 4228 TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist); 4229 /* Note: socket of sb may reconnect. */ 4230 sb->uxdg_cc = sb->uxdg_ctl = sb->uxdg_mbcnt = 0; 4231 } 4232 sb = &so->so_rcv; 4233 if (sb->uxdg_peeked != NULL) { 4234 STAILQ_INSERT_HEAD(&sb->uxdg_mb, sb->uxdg_peeked, 4235 m_stailqpkt); 4236 sb->uxdg_peeked = NULL; 4237 } 4238 m = STAILQ_FIRST(&sb->uxdg_mb); 4239 STAILQ_INIT(&sb->uxdg_mb); 4240 break; 4241 case SOCK_STREAM: 4242 case SOCK_SEQPACKET: 4243 sb = &so->so_rcv; 4244 m = STAILQ_FIRST(&sb->uxst_mbq); 4245 STAILQ_INIT(&sb->uxst_mbq); 4246 sb->sb_acc = sb->sb_ccc = sb->sb_ctl = sb->sb_mbcnt = 0; 4247 /* 4248 * Trim M_NOTREADY buffers from the free list. They are 4249 * referenced by the I/O thread. 4250 */ 4251 if (sb->uxst_fnrdy != NULL) { 4252 struct mbuf *n, *prev; 4253 4254 while (m != NULL && m->m_flags & M_NOTREADY) 4255 m = m->m_next; 4256 for (prev = n = m; n != NULL; n = n->m_next) { 4257 if (n->m_flags & M_NOTREADY) 4258 prev->m_next = n->m_next; 4259 else 4260 prev = n; 4261 } 4262 sb->uxst_fnrdy = NULL; 4263 } 4264 break; 4265 } 4266 /* 4267 * Mark sb with SBS_CANTRCVMORE. This is needed to prevent 4268 * uipc_sosend_*() or unp_disconnect() adding more data to the socket. 4269 * We came here either through shutdown(2) or from the final sofree(). 4270 * The sofree() case is simple as it guarantees that no more sends will 4271 * happen, however we can race with unp_disconnect() from our peer. 4272 * The shutdown(2) case is more exotic. It would call into 4273 * unp_dispose() only if socket is SS_ISCONNECTED. This is possible if 4274 * we did connect(2) on this socket and we also had it bound with 4275 * bind(2) and receive connections from other sockets. Because 4276 * uipc_shutdown() violates POSIX (see comment there) this applies to 4277 * SOCK_DGRAM as well. For SOCK_DGRAM this SBS_CANTRCVMORE will have 4278 * affect not only on the peer we connect(2)ed to, but also on all of 4279 * the peers who had connect(2)ed to us. Their sends would end up 4280 * with ENOBUFS. 4281 */ 4282 sb->sb_state |= SBS_CANTRCVMORE; 4283 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 4284 RLIM_INFINITY); 4285 SOCK_RECVBUF_UNLOCK(so); 4286 SOCK_IO_RECV_UNLOCK(so); 4287 4288 if (m != NULL) { 4289 unp_scan(m, unp_freerights); 4290 m_freemp(m); 4291 } 4292 } 4293 4294 static void 4295 unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int)) 4296 { 4297 struct mbuf *m; 4298 struct cmsghdr *cm; 4299 void *data; 4300 socklen_t clen, datalen; 4301 4302 while (m0 != NULL) { 4303 for (m = m0; m; m = m->m_next) { 4304 if (m->m_type != MT_CONTROL) 4305 continue; 4306 4307 cm = mtod(m, struct cmsghdr *); 4308 clen = m->m_len; 4309 4310 while (cm != NULL) { 4311 if (sizeof(*cm) > clen || cm->cmsg_len > clen) 4312 break; 4313 4314 data = CMSG_DATA(cm); 4315 datalen = (caddr_t)cm + cm->cmsg_len 4316 - (caddr_t)data; 4317 4318 if (cm->cmsg_level == SOL_SOCKET && 4319 cm->cmsg_type == SCM_RIGHTS) { 4320 (*op)(data, datalen / 4321 sizeof(struct filedescent *)); 4322 } 4323 4324 if (CMSG_SPACE(datalen) < clen) { 4325 clen -= CMSG_SPACE(datalen); 4326 cm = (struct cmsghdr *) 4327 ((caddr_t)cm + CMSG_SPACE(datalen)); 4328 } else { 4329 clen = 0; 4330 cm = NULL; 4331 } 4332 } 4333 } 4334 m0 = m0->m_nextpkt; 4335 } 4336 } 4337 4338 /* 4339 * Definitions of protocols supported in the LOCAL domain. 4340 */ 4341 static struct protosw streamproto = { 4342 .pr_type = SOCK_STREAM, 4343 .pr_flags = PR_CONNREQUIRED | PR_CAPATTACH | PR_SOCKBUF, 4344 .pr_ctloutput = &uipc_ctloutput, 4345 .pr_abort = uipc_abort, 4346 .pr_accept = uipc_peeraddr, 4347 .pr_attach = uipc_attach, 4348 .pr_bind = uipc_bind, 4349 .pr_bindat = uipc_bindat, 4350 .pr_connect = uipc_connect, 4351 .pr_connectat = uipc_connectat, 4352 .pr_connect2 = uipc_connect2, 4353 .pr_detach = uipc_detach, 4354 .pr_disconnect = uipc_disconnect, 4355 .pr_listen = uipc_listen, 4356 .pr_peeraddr = uipc_peeraddr, 4357 .pr_send = uipc_sendfile, 4358 .pr_sendfile_wait = uipc_sendfile_wait, 4359 .pr_ready = uipc_ready, 4360 .pr_sense = uipc_sense, 4361 .pr_shutdown = uipc_shutdown, 4362 .pr_sockaddr = uipc_sockaddr, 4363 .pr_sosend = uipc_sosend_stream_or_seqpacket, 4364 .pr_soreceive = uipc_soreceive_stream_or_seqpacket, 4365 .pr_sopoll = uipc_sopoll_stream_or_seqpacket, 4366 .pr_kqfilter = uipc_kqfilter_stream_or_seqpacket, 4367 .pr_close = uipc_close, 4368 .pr_chmod = uipc_chmod, 4369 }; 4370 4371 static struct protosw dgramproto = { 4372 .pr_type = SOCK_DGRAM, 4373 .pr_flags = PR_ATOMIC | PR_ADDR | PR_CAPATTACH | PR_SOCKBUF, 4374 .pr_ctloutput = &uipc_ctloutput, 4375 .pr_abort = uipc_abort, 4376 .pr_accept = uipc_peeraddr, 4377 .pr_attach = uipc_attach, 4378 .pr_bind = uipc_bind, 4379 .pr_bindat = uipc_bindat, 4380 .pr_connect = uipc_connect, 4381 .pr_connectat = uipc_connectat, 4382 .pr_connect2 = uipc_connect2, 4383 .pr_detach = uipc_detach, 4384 .pr_disconnect = uipc_disconnect, 4385 .pr_peeraddr = uipc_peeraddr, 4386 .pr_sosend = uipc_sosend_dgram, 4387 .pr_sense = uipc_sense, 4388 .pr_shutdown = uipc_shutdown, 4389 .pr_sockaddr = uipc_sockaddr, 4390 .pr_soreceive = uipc_soreceive_dgram, 4391 .pr_close = uipc_close, 4392 .pr_chmod = uipc_chmod, 4393 }; 4394 4395 static struct protosw seqpacketproto = { 4396 .pr_type = SOCK_SEQPACKET, 4397 .pr_flags = PR_CONNREQUIRED | PR_CAPATTACH | PR_SOCKBUF, 4398 .pr_ctloutput = &uipc_ctloutput, 4399 .pr_abort = uipc_abort, 4400 .pr_accept = uipc_peeraddr, 4401 .pr_attach = uipc_attach, 4402 .pr_bind = uipc_bind, 4403 .pr_bindat = uipc_bindat, 4404 .pr_connect = uipc_connect, 4405 .pr_connectat = uipc_connectat, 4406 .pr_connect2 = uipc_connect2, 4407 .pr_detach = uipc_detach, 4408 .pr_disconnect = uipc_disconnect, 4409 .pr_listen = uipc_listen, 4410 .pr_peeraddr = uipc_peeraddr, 4411 .pr_sense = uipc_sense, 4412 .pr_shutdown = uipc_shutdown, 4413 .pr_sockaddr = uipc_sockaddr, 4414 .pr_sosend = uipc_sosend_stream_or_seqpacket, 4415 .pr_soreceive = uipc_soreceive_stream_or_seqpacket, 4416 .pr_sopoll = uipc_sopoll_stream_or_seqpacket, 4417 .pr_kqfilter = uipc_kqfilter_stream_or_seqpacket, 4418 .pr_close = uipc_close, 4419 .pr_chmod = uipc_chmod, 4420 }; 4421 4422 static struct domain localdomain = { 4423 .dom_family = AF_LOCAL, 4424 .dom_name = "local", 4425 .dom_externalize = unp_externalize, 4426 .dom_nprotosw = 3, 4427 .dom_protosw = { 4428 &streamproto, 4429 &dgramproto, 4430 &seqpacketproto, 4431 } 4432 }; 4433 DOMAIN_SET(local); 4434 4435 /* 4436 * A helper function called by VFS before socket-type vnode reclamation. 4437 * For an active vnode it clears unp_vnode pointer and decrements unp_vnode 4438 * use count. 4439 */ 4440 void 4441 vfs_unp_reclaim(struct vnode *vp) 4442 { 4443 struct unpcb *unp; 4444 int active; 4445 struct mtx *vplock; 4446 4447 ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim"); 4448 KASSERT(vp->v_type == VSOCK, 4449 ("vfs_unp_reclaim: vp->v_type != VSOCK")); 4450 4451 active = 0; 4452 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 4453 mtx_lock(vplock); 4454 VOP_UNP_CONNECT(vp, &unp); 4455 if (unp == NULL) 4456 goto done; 4457 UNP_PCB_LOCK(unp); 4458 if (unp->unp_vnode == vp) { 4459 VOP_UNP_DETACH(vp); 4460 unp->unp_vnode = NULL; 4461 active = 1; 4462 } 4463 UNP_PCB_UNLOCK(unp); 4464 done: 4465 mtx_unlock(vplock); 4466 if (active) 4467 vunref(vp); 4468 } 4469 4470 #ifdef DDB 4471 static void 4472 db_print_indent(int indent) 4473 { 4474 int i; 4475 4476 for (i = 0; i < indent; i++) 4477 db_printf(" "); 4478 } 4479 4480 static void 4481 db_print_unpflags(int unp_flags) 4482 { 4483 int comma; 4484 4485 comma = 0; 4486 if (unp_flags & UNP_HAVEPC) { 4487 db_printf("%sUNP_HAVEPC", comma ? ", " : ""); 4488 comma = 1; 4489 } 4490 if (unp_flags & UNP_WANTCRED_ALWAYS) { 4491 db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : ""); 4492 comma = 1; 4493 } 4494 if (unp_flags & UNP_WANTCRED_ONESHOT) { 4495 db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : ""); 4496 comma = 1; 4497 } 4498 if (unp_flags & UNP_CONNECTING) { 4499 db_printf("%sUNP_CONNECTING", comma ? ", " : ""); 4500 comma = 1; 4501 } 4502 if (unp_flags & UNP_BINDING) { 4503 db_printf("%sUNP_BINDING", comma ? ", " : ""); 4504 comma = 1; 4505 } 4506 } 4507 4508 static void 4509 db_print_xucred(int indent, struct xucred *xu) 4510 { 4511 int comma, i; 4512 4513 db_print_indent(indent); 4514 db_printf("cr_version: %u cr_uid: %u cr_pid: %d cr_ngroups: %d\n", 4515 xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups); 4516 db_print_indent(indent); 4517 db_printf("cr_groups: "); 4518 comma = 0; 4519 for (i = 0; i < xu->cr_ngroups; i++) { 4520 db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]); 4521 comma = 1; 4522 } 4523 db_printf("\n"); 4524 } 4525 4526 static void 4527 db_print_unprefs(int indent, struct unp_head *uh) 4528 { 4529 struct unpcb *unp; 4530 int counter; 4531 4532 counter = 0; 4533 LIST_FOREACH(unp, uh, unp_reflink) { 4534 if (counter % 4 == 0) 4535 db_print_indent(indent); 4536 db_printf("%p ", unp); 4537 if (counter % 4 == 3) 4538 db_printf("\n"); 4539 counter++; 4540 } 4541 if (counter != 0 && counter % 4 != 0) 4542 db_printf("\n"); 4543 } 4544 4545 DB_SHOW_COMMAND(unpcb, db_show_unpcb) 4546 { 4547 struct unpcb *unp; 4548 4549 if (!have_addr) { 4550 db_printf("usage: show unpcb <addr>\n"); 4551 return; 4552 } 4553 unp = (struct unpcb *)addr; 4554 4555 db_printf("unp_socket: %p unp_vnode: %p\n", unp->unp_socket, 4556 unp->unp_vnode); 4557 4558 db_printf("unp_ino: %ju unp_conn: %p\n", (uintmax_t)unp->unp_ino, 4559 unp->unp_conn); 4560 4561 db_printf("unp_refs:\n"); 4562 db_print_unprefs(2, &unp->unp_refs); 4563 4564 /* XXXRW: Would be nice to print the full address, if any. */ 4565 db_printf("unp_addr: %p\n", unp->unp_addr); 4566 4567 db_printf("unp_gencnt: %llu\n", 4568 (unsigned long long)unp->unp_gencnt); 4569 4570 db_printf("unp_flags: %x (", unp->unp_flags); 4571 db_print_unpflags(unp->unp_flags); 4572 db_printf(")\n"); 4573 4574 db_printf("unp_peercred:\n"); 4575 db_print_xucred(2, &unp->unp_peercred); 4576 4577 db_printf("unp_refcount: %u\n", unp->unp_refcount); 4578 } 4579 #endif 4580