1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All Rights Reserved. 6 * Copyright (c) 2004-2009 Robert N. M. Watson All Rights Reserved. 7 * Copyright (c) 2018 Matthew Macy 8 * Copyright (c) 2022-2025 Gleb Smirnoff <glebius@FreeBSD.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * UNIX Domain (Local) Sockets 37 * 38 * This is an implementation of UNIX (local) domain sockets. Each socket has 39 * an associated struct unpcb (UNIX protocol control block). Stream sockets 40 * may be connected to 0 or 1 other socket. Datagram sockets may be 41 * connected to 0, 1, or many other sockets. Sockets may be created and 42 * connected in pairs (socketpair(2)), or bound/connected to using the file 43 * system name space. For most purposes, only the receive socket buffer is 44 * used, as sending on one socket delivers directly to the receive socket 45 * buffer of a second socket. 46 * 47 * The implementation is substantially complicated by the fact that 48 * "ancillary data", such as file descriptors or credentials, may be passed 49 * across UNIX domain sockets. The potential for passing UNIX domain sockets 50 * over other UNIX domain sockets requires the implementation of a simple 51 * garbage collector to find and tear down cycles of disconnected sockets. 52 * 53 * TODO: 54 * RDM 55 * rethink name space problems 56 * need a proper out-of-band 57 */ 58 59 #include <sys/cdefs.h> 60 #include "opt_ddb.h" 61 62 #include <sys/param.h> 63 #include <sys/capsicum.h> 64 #include <sys/domain.h> 65 #include <sys/eventhandler.h> 66 #include <sys/fcntl.h> 67 #include <sys/file.h> 68 #include <sys/filedesc.h> 69 #include <sys/kernel.h> 70 #include <sys/lock.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/mount.h> 74 #include <sys/mutex.h> 75 #include <sys/namei.h> 76 #include <sys/poll.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/queue.h> 80 #include <sys/resourcevar.h> 81 #include <sys/rwlock.h> 82 #include <sys/socket.h> 83 #include <sys/socketvar.h> 84 #include <sys/signalvar.h> 85 #include <sys/stat.h> 86 #include <sys/sx.h> 87 #include <sys/sysctl.h> 88 #include <sys/systm.h> 89 #include <sys/taskqueue.h> 90 #include <sys/un.h> 91 #include <sys/unpcb.h> 92 #include <sys/vnode.h> 93 94 #include <net/vnet.h> 95 96 #ifdef DDB 97 #include <ddb/ddb.h> 98 #endif 99 100 #include <security/mac/mac_framework.h> 101 102 #include <vm/uma.h> 103 104 MALLOC_DECLARE(M_FILECAPS); 105 106 static struct domain localdomain; 107 108 static uma_zone_t unp_zone; 109 static unp_gen_t unp_gencnt; /* (l) */ 110 static u_int unp_count; /* (l) Count of local sockets. */ 111 static ino_t unp_ino; /* Prototype for fake inode numbers. */ 112 static int unp_rights; /* (g) File descriptors in flight. */ 113 static struct unp_head unp_shead; /* (l) List of stream sockets. */ 114 static struct unp_head unp_dhead; /* (l) List of datagram sockets. */ 115 static struct unp_head unp_sphead; /* (l) List of seqpacket sockets. */ 116 static struct mtx_pool *unp_vp_mtxpool; 117 118 struct unp_defer { 119 SLIST_ENTRY(unp_defer) ud_link; 120 struct file *ud_fp; 121 }; 122 static SLIST_HEAD(, unp_defer) unp_defers; 123 static int unp_defers_count; 124 125 static const struct sockaddr sun_noname = { 126 .sa_len = sizeof(sun_noname), 127 .sa_family = AF_LOCAL, 128 }; 129 130 /* 131 * Garbage collection of cyclic file descriptor/socket references occurs 132 * asynchronously in a taskqueue context in order to avoid recursion and 133 * reentrance in the UNIX domain socket, file descriptor, and socket layer 134 * code. See unp_gc() for a full description. 135 */ 136 static struct timeout_task unp_gc_task; 137 138 /* 139 * The close of unix domain sockets attached as SCM_RIGHTS is 140 * postponed to the taskqueue, to avoid arbitrary recursion depth. 141 * The attached sockets might have another sockets attached. 142 */ 143 static struct task unp_defer_task; 144 145 /* 146 * SOCK_STREAM and SOCK_SEQPACKET unix(4) sockets fully bypass the send buffer, 147 * however the notion of send buffer still makes sense with them. Its size is 148 * the amount of space that a send(2) syscall may copyin(9) before checking 149 * with the receive buffer of a peer. Although not linked anywhere yet, 150 * pointed to by a stack variable, effectively it is a buffer that needs to be 151 * sized. 152 * 153 * SOCK_DGRAM sockets really use the sendspace as the maximum datagram size, 154 * and don't really want to reserve the sendspace. Their recvspace should be 155 * large enough for at least one max-size datagram plus address. 156 */ 157 #ifndef PIPSIZ 158 #define PIPSIZ 8192 159 #endif 160 static u_long unpst_sendspace = PIPSIZ; 161 static u_long unpst_recvspace = PIPSIZ; 162 static u_long unpdg_maxdgram = 8*1024; /* support 8KB syslog msgs */ 163 static u_long unpdg_recvspace = 16*1024; 164 static u_long unpsp_sendspace = PIPSIZ; 165 static u_long unpsp_recvspace = PIPSIZ; 166 167 static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 168 "Local domain"); 169 static SYSCTL_NODE(_net_local, SOCK_STREAM, stream, 170 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 171 "SOCK_STREAM"); 172 static SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, 173 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 174 "SOCK_DGRAM"); 175 static SYSCTL_NODE(_net_local, SOCK_SEQPACKET, seqpacket, 176 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 177 "SOCK_SEQPACKET"); 178 179 SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, 180 &unpst_sendspace, 0, "Default stream send space."); 181 SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, 182 &unpst_recvspace, 0, "Default stream receive space."); 183 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, 184 &unpdg_maxdgram, 0, "Maximum datagram size."); 185 SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, 186 &unpdg_recvspace, 0, "Default datagram receive space."); 187 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, maxseqpacket, CTLFLAG_RW, 188 &unpsp_sendspace, 0, "Default seqpacket send space."); 189 SYSCTL_ULONG(_net_local_seqpacket, OID_AUTO, recvspace, CTLFLAG_RW, 190 &unpsp_recvspace, 0, "Default seqpacket receive space."); 191 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, 192 "File descriptors in flight."); 193 SYSCTL_INT(_net_local, OID_AUTO, deferred, CTLFLAG_RD, 194 &unp_defers_count, 0, 195 "File descriptors deferred to taskqueue for close."); 196 197 /* 198 * Locking and synchronization: 199 * 200 * Several types of locks exist in the local domain socket implementation: 201 * - a global linkage lock 202 * - a global connection list lock 203 * - the mtxpool lock 204 * - per-unpcb mutexes 205 * 206 * The linkage lock protects the global socket lists, the generation number 207 * counter and garbage collector state. 208 * 209 * The connection list lock protects the list of referring sockets in a datagram 210 * socket PCB. This lock is also overloaded to protect a global list of 211 * sockets whose buffers contain socket references in the form of SCM_RIGHTS 212 * messages. To avoid recursion, such references are released by a dedicated 213 * thread. 214 * 215 * The mtxpool lock protects the vnode from being modified while referenced. 216 * Lock ordering rules require that it be acquired before any PCB locks. 217 * 218 * The unpcb lock (unp_mtx) protects the most commonly referenced fields in the 219 * unpcb. This includes the unp_conn field, which either links two connected 220 * PCBs together (for connected socket types) or points at the destination 221 * socket (for connectionless socket types). The operations of creating or 222 * destroying a connection therefore involve locking multiple PCBs. To avoid 223 * lock order reversals, in some cases this involves dropping a PCB lock and 224 * using a reference counter to maintain liveness. 225 * 226 * UNIX domain sockets each have an unpcb hung off of their so_pcb pointer, 227 * allocated in pr_attach() and freed in pr_detach(). The validity of that 228 * pointer is an invariant, so no lock is required to dereference the so_pcb 229 * pointer if a valid socket reference is held by the caller. In practice, 230 * this is always true during operations performed on a socket. Each unpcb 231 * has a back-pointer to its socket, unp_socket, which will be stable under 232 * the same circumstances. 233 * 234 * This pointer may only be safely dereferenced as long as a valid reference 235 * to the unpcb is held. Typically, this reference will be from the socket, 236 * or from another unpcb when the referring unpcb's lock is held (in order 237 * that the reference not be invalidated during use). For example, to follow 238 * unp->unp_conn->unp_socket, you need to hold a lock on unp_conn to guarantee 239 * that detach is not run clearing unp_socket. 240 * 241 * Blocking with UNIX domain sockets is a tricky issue: unlike most network 242 * protocols, bind() is a non-atomic operation, and connect() requires 243 * potential sleeping in the protocol, due to potentially waiting on local or 244 * distributed file systems. We try to separate "lookup" operations, which 245 * may sleep, and the IPC operations themselves, which typically can occur 246 * with relative atomicity as locks can be held over the entire operation. 247 * 248 * Another tricky issue is simultaneous multi-threaded or multi-process 249 * access to a single UNIX domain socket. These are handled by the flags 250 * UNP_CONNECTING and UNP_BINDING, which prevent concurrent connecting or 251 * binding, both of which involve dropping UNIX domain socket locks in order 252 * to perform namei() and other file system operations. 253 */ 254 static struct rwlock unp_link_rwlock; 255 static struct mtx unp_defers_lock; 256 257 #define UNP_LINK_LOCK_INIT() rw_init(&unp_link_rwlock, \ 258 "unp_link_rwlock") 259 260 #define UNP_LINK_LOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 261 RA_LOCKED) 262 #define UNP_LINK_UNLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 263 RA_UNLOCKED) 264 265 #define UNP_LINK_RLOCK() rw_rlock(&unp_link_rwlock) 266 #define UNP_LINK_RUNLOCK() rw_runlock(&unp_link_rwlock) 267 #define UNP_LINK_WLOCK() rw_wlock(&unp_link_rwlock) 268 #define UNP_LINK_WUNLOCK() rw_wunlock(&unp_link_rwlock) 269 #define UNP_LINK_WLOCK_ASSERT() rw_assert(&unp_link_rwlock, \ 270 RA_WLOCKED) 271 #define UNP_LINK_WOWNED() rw_wowned(&unp_link_rwlock) 272 273 #define UNP_DEFERRED_LOCK_INIT() mtx_init(&unp_defers_lock, \ 274 "unp_defer", NULL, MTX_DEF) 275 #define UNP_DEFERRED_LOCK() mtx_lock(&unp_defers_lock) 276 #define UNP_DEFERRED_UNLOCK() mtx_unlock(&unp_defers_lock) 277 278 #define UNP_REF_LIST_LOCK() UNP_DEFERRED_LOCK(); 279 #define UNP_REF_LIST_UNLOCK() UNP_DEFERRED_UNLOCK(); 280 281 #define UNP_PCB_LOCK_INIT(unp) mtx_init(&(unp)->unp_mtx, \ 282 "unp", "unp", \ 283 MTX_DUPOK|MTX_DEF) 284 #define UNP_PCB_LOCK_DESTROY(unp) mtx_destroy(&(unp)->unp_mtx) 285 #define UNP_PCB_LOCKPTR(unp) (&(unp)->unp_mtx) 286 #define UNP_PCB_LOCK(unp) mtx_lock(&(unp)->unp_mtx) 287 #define UNP_PCB_TRYLOCK(unp) mtx_trylock(&(unp)->unp_mtx) 288 #define UNP_PCB_UNLOCK(unp) mtx_unlock(&(unp)->unp_mtx) 289 #define UNP_PCB_OWNED(unp) mtx_owned(&(unp)->unp_mtx) 290 #define UNP_PCB_LOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_OWNED) 291 #define UNP_PCB_UNLOCK_ASSERT(unp) mtx_assert(&(unp)->unp_mtx, MA_NOTOWNED) 292 293 static int uipc_connect2(struct socket *, struct socket *); 294 static int uipc_ctloutput(struct socket *, struct sockopt *); 295 static int unp_connect(struct socket *, struct sockaddr *, 296 struct thread *); 297 static int unp_connectat(int, struct socket *, struct sockaddr *, 298 struct thread *, bool); 299 static void unp_connect2(struct socket *, struct socket *, bool); 300 static void unp_disconnect(struct unpcb *unp, struct unpcb *unp2); 301 static void unp_dispose(struct socket *so); 302 static void unp_drop(struct unpcb *); 303 static void unp_gc(__unused void *, int); 304 static void unp_scan(struct mbuf *, void (*)(struct filedescent **, int)); 305 static void unp_discard(struct file *); 306 static void unp_freerights(struct filedescent **, int); 307 static int unp_internalize(struct mbuf *, struct mchain *, 308 struct thread *); 309 static void unp_internalize_fp(struct file *); 310 static int unp_externalize(struct mbuf *, struct mbuf **, int); 311 static int unp_externalize_fp(struct file *); 312 static void unp_addsockcred(struct thread *, struct mchain *, int); 313 static void unp_process_defers(void * __unused, int); 314 315 static void uipc_wrknl_lock(void *); 316 static void uipc_wrknl_unlock(void *); 317 static void uipc_wrknl_assert_lock(void *, int); 318 319 static void 320 unp_pcb_hold(struct unpcb *unp) 321 { 322 u_int old __unused; 323 324 old = refcount_acquire(&unp->unp_refcount); 325 KASSERT(old > 0, ("%s: unpcb %p has no references", __func__, unp)); 326 } 327 328 static __result_use_check bool 329 unp_pcb_rele(struct unpcb *unp) 330 { 331 bool ret; 332 333 UNP_PCB_LOCK_ASSERT(unp); 334 335 if ((ret = refcount_release(&unp->unp_refcount))) { 336 UNP_PCB_UNLOCK(unp); 337 UNP_PCB_LOCK_DESTROY(unp); 338 uma_zfree(unp_zone, unp); 339 } 340 return (ret); 341 } 342 343 static void 344 unp_pcb_rele_notlast(struct unpcb *unp) 345 { 346 bool ret __unused; 347 348 ret = refcount_release(&unp->unp_refcount); 349 KASSERT(!ret, ("%s: unpcb %p has no references", __func__, unp)); 350 } 351 352 static void 353 unp_pcb_lock_pair(struct unpcb *unp, struct unpcb *unp2) 354 { 355 UNP_PCB_UNLOCK_ASSERT(unp); 356 UNP_PCB_UNLOCK_ASSERT(unp2); 357 358 if (unp == unp2) { 359 UNP_PCB_LOCK(unp); 360 } else if ((uintptr_t)unp2 > (uintptr_t)unp) { 361 UNP_PCB_LOCK(unp); 362 UNP_PCB_LOCK(unp2); 363 } else { 364 UNP_PCB_LOCK(unp2); 365 UNP_PCB_LOCK(unp); 366 } 367 } 368 369 static void 370 unp_pcb_unlock_pair(struct unpcb *unp, struct unpcb *unp2) 371 { 372 UNP_PCB_UNLOCK(unp); 373 if (unp != unp2) 374 UNP_PCB_UNLOCK(unp2); 375 } 376 377 /* 378 * Try to lock the connected peer of an already locked socket. In some cases 379 * this requires that we unlock the current socket. The pairbusy counter is 380 * used to block concurrent connection attempts while the lock is dropped. The 381 * caller must be careful to revalidate PCB state. 382 */ 383 static struct unpcb * 384 unp_pcb_lock_peer(struct unpcb *unp) 385 { 386 struct unpcb *unp2; 387 388 UNP_PCB_LOCK_ASSERT(unp); 389 unp2 = unp->unp_conn; 390 if (unp2 == NULL) 391 return (NULL); 392 if (__predict_false(unp == unp2)) 393 return (unp); 394 395 UNP_PCB_UNLOCK_ASSERT(unp2); 396 397 if (__predict_true(UNP_PCB_TRYLOCK(unp2))) 398 return (unp2); 399 if ((uintptr_t)unp2 > (uintptr_t)unp) { 400 UNP_PCB_LOCK(unp2); 401 return (unp2); 402 } 403 unp->unp_pairbusy++; 404 unp_pcb_hold(unp2); 405 UNP_PCB_UNLOCK(unp); 406 407 UNP_PCB_LOCK(unp2); 408 UNP_PCB_LOCK(unp); 409 KASSERT(unp->unp_conn == unp2 || unp->unp_conn == NULL, 410 ("%s: socket %p was reconnected", __func__, unp)); 411 if (--unp->unp_pairbusy == 0 && (unp->unp_flags & UNP_WAITING) != 0) { 412 unp->unp_flags &= ~UNP_WAITING; 413 wakeup(unp); 414 } 415 if (unp_pcb_rele(unp2)) { 416 /* unp2 is unlocked. */ 417 return (NULL); 418 } 419 if (unp->unp_conn == NULL) { 420 UNP_PCB_UNLOCK(unp2); 421 return (NULL); 422 } 423 return (unp2); 424 } 425 426 /* 427 * Try to lock peer of our socket for purposes of sending data to it. 428 */ 429 static int 430 uipc_lock_peer(struct socket *so, struct unpcb **unp2) 431 { 432 struct unpcb *unp; 433 int error; 434 435 unp = sotounpcb(so); 436 UNP_PCB_LOCK(unp); 437 *unp2 = unp_pcb_lock_peer(unp); 438 if (__predict_false(so->so_error != 0)) { 439 error = so->so_error; 440 so->so_error = 0; 441 UNP_PCB_UNLOCK(unp); 442 if (*unp2 != NULL) 443 UNP_PCB_UNLOCK(*unp2); 444 return (error); 445 } 446 if (__predict_false(*unp2 == NULL)) { 447 /* 448 * Different error code for a previously connected socket and 449 * a never connected one. The SS_ISDISCONNECTED is set in the 450 * unp_soisdisconnected() and is synchronized by the pcb lock. 451 */ 452 error = so->so_state & SS_ISDISCONNECTED ? EPIPE : ENOTCONN; 453 UNP_PCB_UNLOCK(unp); 454 return (error); 455 } 456 UNP_PCB_UNLOCK(unp); 457 458 return (0); 459 } 460 461 static void 462 uipc_abort(struct socket *so) 463 { 464 struct unpcb *unp, *unp2; 465 466 unp = sotounpcb(so); 467 KASSERT(unp != NULL, ("uipc_abort: unp == NULL")); 468 UNP_PCB_UNLOCK_ASSERT(unp); 469 470 UNP_PCB_LOCK(unp); 471 unp2 = unp->unp_conn; 472 if (unp2 != NULL) { 473 unp_pcb_hold(unp2); 474 UNP_PCB_UNLOCK(unp); 475 unp_drop(unp2); 476 } else 477 UNP_PCB_UNLOCK(unp); 478 } 479 480 static int 481 uipc_attach(struct socket *so, int proto, struct thread *td) 482 { 483 u_long sendspace, recvspace; 484 struct unpcb *unp; 485 int error; 486 bool locked; 487 488 KASSERT(so->so_pcb == NULL, ("uipc_attach: so_pcb != NULL")); 489 switch (so->so_type) { 490 case SOCK_DGRAM: 491 STAILQ_INIT(&so->so_rcv.uxdg_mb); 492 STAILQ_INIT(&so->so_snd.uxdg_mb); 493 TAILQ_INIT(&so->so_rcv.uxdg_conns); 494 /* 495 * Since send buffer is either bypassed or is a part 496 * of one-to-many receive buffer, we assign both space 497 * limits to unpdg_recvspace. 498 */ 499 sendspace = recvspace = unpdg_recvspace; 500 break; 501 502 case SOCK_STREAM: 503 sendspace = unpst_sendspace; 504 recvspace = unpst_recvspace; 505 goto common; 506 507 case SOCK_SEQPACKET: 508 sendspace = unpsp_sendspace; 509 recvspace = unpsp_recvspace; 510 common: 511 /* 512 * XXXGL: we need to initialize the mutex with MTX_DUPOK. 513 * Ideally, protocols that have PR_SOCKBUF should be 514 * responsible for mutex initialization officially, and then 515 * this uglyness with mtx_destroy(); mtx_init(); would go away. 516 */ 517 mtx_destroy(&so->so_rcv_mtx); 518 mtx_init(&so->so_rcv_mtx, "so_rcv", NULL, MTX_DEF | MTX_DUPOK); 519 knlist_init(&so->so_wrsel.si_note, so, uipc_wrknl_lock, 520 uipc_wrknl_unlock, uipc_wrknl_assert_lock); 521 STAILQ_INIT(&so->so_rcv.uxst_mbq); 522 break; 523 default: 524 panic("uipc_attach"); 525 } 526 error = soreserve(so, sendspace, recvspace); 527 if (error) 528 return (error); 529 unp = uma_zalloc(unp_zone, M_NOWAIT | M_ZERO); 530 if (unp == NULL) 531 return (ENOBUFS); 532 LIST_INIT(&unp->unp_refs); 533 UNP_PCB_LOCK_INIT(unp); 534 unp->unp_socket = so; 535 so->so_pcb = unp; 536 refcount_init(&unp->unp_refcount, 1); 537 unp->unp_mode = ACCESSPERMS; 538 539 if ((locked = UNP_LINK_WOWNED()) == false) 540 UNP_LINK_WLOCK(); 541 542 unp->unp_gencnt = ++unp_gencnt; 543 unp->unp_ino = ++unp_ino; 544 unp_count++; 545 switch (so->so_type) { 546 case SOCK_STREAM: 547 LIST_INSERT_HEAD(&unp_shead, unp, unp_link); 548 break; 549 550 case SOCK_DGRAM: 551 LIST_INSERT_HEAD(&unp_dhead, unp, unp_link); 552 break; 553 554 case SOCK_SEQPACKET: 555 LIST_INSERT_HEAD(&unp_sphead, unp, unp_link); 556 break; 557 558 default: 559 panic("uipc_attach"); 560 } 561 562 if (locked == false) 563 UNP_LINK_WUNLOCK(); 564 565 return (0); 566 } 567 568 static int 569 uipc_bindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) 570 { 571 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 572 struct vattr vattr; 573 int error, namelen; 574 struct nameidata nd; 575 struct unpcb *unp; 576 struct vnode *vp; 577 struct mount *mp; 578 cap_rights_t rights; 579 char *buf; 580 mode_t mode; 581 582 if (nam->sa_family != AF_UNIX) 583 return (EAFNOSUPPORT); 584 585 unp = sotounpcb(so); 586 KASSERT(unp != NULL, ("uipc_bind: unp == NULL")); 587 588 if (soun->sun_len > sizeof(struct sockaddr_un)) 589 return (EINVAL); 590 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 591 if (namelen <= 0) 592 return (EINVAL); 593 594 /* 595 * We don't allow simultaneous bind() calls on a single UNIX domain 596 * socket, so flag in-progress operations, and return an error if an 597 * operation is already in progress. 598 * 599 * Historically, we have not allowed a socket to be rebound, so this 600 * also returns an error. Not allowing re-binding simplifies the 601 * implementation and avoids a great many possible failure modes. 602 */ 603 UNP_PCB_LOCK(unp); 604 if (unp->unp_vnode != NULL) { 605 UNP_PCB_UNLOCK(unp); 606 return (EINVAL); 607 } 608 if (unp->unp_flags & UNP_BINDING) { 609 UNP_PCB_UNLOCK(unp); 610 return (EALREADY); 611 } 612 unp->unp_flags |= UNP_BINDING; 613 mode = unp->unp_mode & ~td->td_proc->p_pd->pd_cmask; 614 UNP_PCB_UNLOCK(unp); 615 616 buf = malloc(namelen + 1, M_TEMP, M_WAITOK); 617 bcopy(soun->sun_path, buf, namelen); 618 buf[namelen] = 0; 619 620 restart: 621 NDINIT_ATRIGHTS(&nd, CREATE, NOFOLLOW | LOCKPARENT | NOCACHE, 622 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_BINDAT)); 623 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 624 error = namei(&nd); 625 if (error) 626 goto error; 627 vp = nd.ni_vp; 628 if (vp != NULL || vn_start_write(nd.ni_dvp, &mp, V_NOWAIT) != 0) { 629 NDFREE_PNBUF(&nd); 630 if (nd.ni_dvp == vp) 631 vrele(nd.ni_dvp); 632 else 633 vput(nd.ni_dvp); 634 if (vp != NULL) { 635 vrele(vp); 636 error = EADDRINUSE; 637 goto error; 638 } 639 error = vn_start_write(NULL, &mp, V_XSLEEP | V_PCATCH); 640 if (error) 641 goto error; 642 goto restart; 643 } 644 VATTR_NULL(&vattr); 645 vattr.va_type = VSOCK; 646 vattr.va_mode = mode; 647 #ifdef MAC 648 error = mac_vnode_check_create(td->td_ucred, nd.ni_dvp, &nd.ni_cnd, 649 &vattr); 650 #endif 651 if (error == 0) { 652 /* 653 * The prior lookup may have left LK_SHARED in cn_lkflags, 654 * and VOP_CREATE technically only requires the new vnode to 655 * be locked shared. Most filesystems will return the new vnode 656 * locked exclusive regardless, but we should explicitly 657 * specify that here since we require it and assert to that 658 * effect below. 659 */ 660 nd.ni_cnd.cn_lkflags = (nd.ni_cnd.cn_lkflags & ~LK_SHARED) | 661 LK_EXCLUSIVE; 662 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 663 } 664 NDFREE_PNBUF(&nd); 665 if (error) { 666 VOP_VPUT_PAIR(nd.ni_dvp, NULL, true); 667 vn_finished_write(mp); 668 if (error == ERELOOKUP) 669 goto restart; 670 goto error; 671 } 672 vp = nd.ni_vp; 673 ASSERT_VOP_ELOCKED(vp, "uipc_bind"); 674 soun = (struct sockaddr_un *)sodupsockaddr(nam, M_WAITOK); 675 676 UNP_PCB_LOCK(unp); 677 VOP_UNP_BIND(vp, unp); 678 unp->unp_vnode = vp; 679 unp->unp_addr = soun; 680 unp->unp_flags &= ~UNP_BINDING; 681 UNP_PCB_UNLOCK(unp); 682 vref(vp); 683 VOP_VPUT_PAIR(nd.ni_dvp, &vp, true); 684 vn_finished_write(mp); 685 free(buf, M_TEMP); 686 return (0); 687 688 error: 689 UNP_PCB_LOCK(unp); 690 unp->unp_flags &= ~UNP_BINDING; 691 UNP_PCB_UNLOCK(unp); 692 free(buf, M_TEMP); 693 return (error); 694 } 695 696 static int 697 uipc_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 698 { 699 700 return (uipc_bindat(AT_FDCWD, so, nam, td)); 701 } 702 703 static int 704 uipc_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 705 { 706 int error; 707 708 KASSERT(td == curthread, ("uipc_connect: td != curthread")); 709 error = unp_connect(so, nam, td); 710 return (error); 711 } 712 713 static int 714 uipc_connectat(int fd, struct socket *so, struct sockaddr *nam, 715 struct thread *td) 716 { 717 int error; 718 719 KASSERT(td == curthread, ("uipc_connectat: td != curthread")); 720 error = unp_connectat(fd, so, nam, td, false); 721 return (error); 722 } 723 724 static void 725 uipc_close(struct socket *so) 726 { 727 struct unpcb *unp, *unp2; 728 struct vnode *vp = NULL; 729 struct mtx *vplock; 730 731 unp = sotounpcb(so); 732 KASSERT(unp != NULL, ("uipc_close: unp == NULL")); 733 734 vplock = NULL; 735 if ((vp = unp->unp_vnode) != NULL) { 736 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 737 mtx_lock(vplock); 738 } 739 UNP_PCB_LOCK(unp); 740 if (vp && unp->unp_vnode == NULL) { 741 mtx_unlock(vplock); 742 vp = NULL; 743 } 744 if (vp != NULL) { 745 VOP_UNP_DETACH(vp); 746 unp->unp_vnode = NULL; 747 } 748 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 749 unp_disconnect(unp, unp2); 750 else 751 UNP_PCB_UNLOCK(unp); 752 if (vp) { 753 mtx_unlock(vplock); 754 vrele(vp); 755 } 756 } 757 758 static int 759 uipc_chmod(struct socket *so, mode_t mode, struct ucred *cred __unused, 760 struct thread *td __unused) 761 { 762 struct unpcb *unp; 763 int error; 764 765 if ((mode & ~ACCESSPERMS) != 0) 766 return (EINVAL); 767 768 error = 0; 769 unp = sotounpcb(so); 770 UNP_PCB_LOCK(unp); 771 if (unp->unp_vnode != NULL || (unp->unp_flags & UNP_BINDING) != 0) 772 error = EINVAL; 773 else 774 unp->unp_mode = mode; 775 UNP_PCB_UNLOCK(unp); 776 return (error); 777 } 778 779 static int 780 uipc_connect2(struct socket *so1, struct socket *so2) 781 { 782 struct unpcb *unp, *unp2; 783 784 if (so1->so_type != so2->so_type) 785 return (EPROTOTYPE); 786 787 unp = so1->so_pcb; 788 KASSERT(unp != NULL, ("uipc_connect2: unp == NULL")); 789 unp2 = so2->so_pcb; 790 KASSERT(unp2 != NULL, ("uipc_connect2: unp2 == NULL")); 791 unp_pcb_lock_pair(unp, unp2); 792 unp_connect2(so1, so2, false); 793 unp_pcb_unlock_pair(unp, unp2); 794 795 return (0); 796 } 797 798 static void 799 uipc_detach(struct socket *so) 800 { 801 struct unpcb *unp, *unp2; 802 struct mtx *vplock; 803 struct vnode *vp; 804 int local_unp_rights; 805 806 unp = sotounpcb(so); 807 KASSERT(unp != NULL, ("uipc_detach: unp == NULL")); 808 809 vp = NULL; 810 vplock = NULL; 811 812 if (!SOLISTENING(so)) 813 unp_dispose(so); 814 815 UNP_LINK_WLOCK(); 816 LIST_REMOVE(unp, unp_link); 817 if (unp->unp_gcflag & UNPGC_DEAD) 818 LIST_REMOVE(unp, unp_dead); 819 unp->unp_gencnt = ++unp_gencnt; 820 --unp_count; 821 UNP_LINK_WUNLOCK(); 822 823 UNP_PCB_UNLOCK_ASSERT(unp); 824 restart: 825 if ((vp = unp->unp_vnode) != NULL) { 826 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 827 mtx_lock(vplock); 828 } 829 UNP_PCB_LOCK(unp); 830 if (unp->unp_vnode != vp && unp->unp_vnode != NULL) { 831 if (vplock) 832 mtx_unlock(vplock); 833 UNP_PCB_UNLOCK(unp); 834 goto restart; 835 } 836 if ((vp = unp->unp_vnode) != NULL) { 837 VOP_UNP_DETACH(vp); 838 unp->unp_vnode = NULL; 839 } 840 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 841 unp_disconnect(unp, unp2); 842 else 843 UNP_PCB_UNLOCK(unp); 844 845 UNP_REF_LIST_LOCK(); 846 while (!LIST_EMPTY(&unp->unp_refs)) { 847 struct unpcb *ref = LIST_FIRST(&unp->unp_refs); 848 849 unp_pcb_hold(ref); 850 UNP_REF_LIST_UNLOCK(); 851 852 MPASS(ref != unp); 853 UNP_PCB_UNLOCK_ASSERT(ref); 854 unp_drop(ref); 855 UNP_REF_LIST_LOCK(); 856 } 857 UNP_REF_LIST_UNLOCK(); 858 859 UNP_PCB_LOCK(unp); 860 local_unp_rights = unp_rights; 861 unp->unp_socket->so_pcb = NULL; 862 unp->unp_socket = NULL; 863 free(unp->unp_addr, M_SONAME); 864 unp->unp_addr = NULL; 865 if (!unp_pcb_rele(unp)) 866 UNP_PCB_UNLOCK(unp); 867 if (vp) { 868 mtx_unlock(vplock); 869 vrele(vp); 870 } 871 if (local_unp_rights) 872 taskqueue_enqueue_timeout(taskqueue_thread, &unp_gc_task, -1); 873 874 switch (so->so_type) { 875 case SOCK_STREAM: 876 case SOCK_SEQPACKET: 877 MPASS(SOLISTENING(so) || (STAILQ_EMPTY(&so->so_rcv.uxst_mbq) && 878 so->so_rcv.uxst_peer == NULL)); 879 break; 880 case SOCK_DGRAM: 881 /* 882 * Everything should have been unlinked/freed by unp_dispose() 883 * and/or unp_disconnect(). 884 */ 885 MPASS(so->so_rcv.uxdg_peeked == NULL); 886 MPASS(STAILQ_EMPTY(&so->so_rcv.uxdg_mb)); 887 MPASS(TAILQ_EMPTY(&so->so_rcv.uxdg_conns)); 888 MPASS(STAILQ_EMPTY(&so->so_snd.uxdg_mb)); 889 } 890 } 891 892 static int 893 uipc_disconnect(struct socket *so) 894 { 895 struct unpcb *unp, *unp2; 896 897 unp = sotounpcb(so); 898 KASSERT(unp != NULL, ("uipc_disconnect: unp == NULL")); 899 900 UNP_PCB_LOCK(unp); 901 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) 902 unp_disconnect(unp, unp2); 903 else 904 UNP_PCB_UNLOCK(unp); 905 return (0); 906 } 907 908 static int 909 uipc_listen(struct socket *so, int backlog, struct thread *td) 910 { 911 struct unpcb *unp; 912 int error; 913 914 MPASS(so->so_type != SOCK_DGRAM); 915 916 /* 917 * Synchronize with concurrent connection attempts. 918 */ 919 error = 0; 920 unp = sotounpcb(so); 921 UNP_PCB_LOCK(unp); 922 if (unp->unp_conn != NULL || (unp->unp_flags & UNP_CONNECTING) != 0) 923 error = EINVAL; 924 else if (unp->unp_vnode == NULL) 925 error = EDESTADDRREQ; 926 if (error != 0) { 927 UNP_PCB_UNLOCK(unp); 928 return (error); 929 } 930 931 SOCK_LOCK(so); 932 error = solisten_proto_check(so); 933 if (error == 0) { 934 cru2xt(td, &unp->unp_peercred); 935 if (!SOLISTENING(so)) { 936 (void)chgsbsize(so->so_cred->cr_uidinfo, 937 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 938 (void)chgsbsize(so->so_cred->cr_uidinfo, 939 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 940 } 941 solisten_proto(so, backlog); 942 } 943 SOCK_UNLOCK(so); 944 UNP_PCB_UNLOCK(unp); 945 return (error); 946 } 947 948 static int 949 uipc_peeraddr(struct socket *so, struct sockaddr *ret) 950 { 951 struct unpcb *unp, *unp2; 952 const struct sockaddr *sa; 953 954 unp = sotounpcb(so); 955 KASSERT(unp != NULL, ("uipc_peeraddr: unp == NULL")); 956 957 UNP_PCB_LOCK(unp); 958 unp2 = unp_pcb_lock_peer(unp); 959 if (unp2 != NULL) { 960 if (unp2->unp_addr != NULL) 961 sa = (struct sockaddr *)unp2->unp_addr; 962 else 963 sa = &sun_noname; 964 bcopy(sa, ret, sa->sa_len); 965 unp_pcb_unlock_pair(unp, unp2); 966 } else { 967 UNP_PCB_UNLOCK(unp); 968 sa = &sun_noname; 969 bcopy(sa, ret, sa->sa_len); 970 } 971 return (0); 972 } 973 974 /* 975 * pr_sosend() called with mbuf instead of uio is a kernel thread. NFS, 976 * netgraph(4) and other subsystems can call into socket code. The 977 * function will condition the mbuf so that it can be safely put onto socket 978 * buffer and calculate its char count and mbuf count. 979 * 980 * Note: we don't support receiving control data from a kernel thread. Our 981 * pr_sosend methods have MPASS() to check that. This may change. 982 */ 983 static void 984 uipc_reset_kernel_mbuf(struct mbuf *m, struct mchain *mc) 985 { 986 987 M_ASSERTPKTHDR(m); 988 989 m_clrprotoflags(m); 990 m_tag_delete_chain(m, NULL); 991 m->m_pkthdr.rcvif = NULL; 992 m->m_pkthdr.flowid = 0; 993 m->m_pkthdr.csum_flags = 0; 994 m->m_pkthdr.fibnum = 0; 995 m->m_pkthdr.rsstype = 0; 996 997 mc_init_m(mc, m); 998 MPASS(m->m_pkthdr.len == mc->mc_len); 999 } 1000 1001 #ifdef SOCKBUF_DEBUG 1002 static inline void 1003 uipc_stream_sbcheck(struct sockbuf *sb) 1004 { 1005 struct mbuf *d; 1006 u_int dacc, dccc, dctl, dmbcnt; 1007 bool notready = false; 1008 1009 dacc = dccc = dctl = dmbcnt = 0; 1010 STAILQ_FOREACH(d, &sb->uxst_mbq, m_stailq) { 1011 if (d == sb->uxst_fnrdy) { 1012 MPASS(d->m_flags & M_NOTREADY); 1013 notready = true; 1014 } 1015 if (d->m_type == MT_CONTROL) 1016 dctl += d->m_len; 1017 else if (d->m_type == MT_DATA) { 1018 dccc += d->m_len; 1019 if (!notready) 1020 dacc += d->m_len; 1021 } else 1022 MPASS(0); 1023 dmbcnt += MSIZE; 1024 if (d->m_flags & M_EXT) 1025 dmbcnt += d->m_ext.ext_size; 1026 if (d->m_stailq.stqe_next == NULL) 1027 MPASS(sb->uxst_mbq.stqh_last == &d->m_stailq.stqe_next); 1028 } 1029 MPASS(sb->uxst_fnrdy == NULL || notready); 1030 MPASS(dacc == sb->sb_acc); 1031 MPASS(dccc == sb->sb_ccc); 1032 MPASS(dctl == sb->sb_ctl); 1033 MPASS(dmbcnt == sb->sb_mbcnt); 1034 (void)STAILQ_EMPTY(&sb->uxst_mbq); 1035 } 1036 #define UIPC_STREAM_SBCHECK(sb) uipc_stream_sbcheck(sb) 1037 #else 1038 #define UIPC_STREAM_SBCHECK(sb) do {} while (0) 1039 #endif 1040 1041 /* 1042 * uipc_stream_sbspace() returns how much a writer can send, limited by char 1043 * count or mbuf memory use, whatever ends first. 1044 * 1045 * An obvious and legitimate reason for a socket having more data than allowed, 1046 * is lowering the limit with setsockopt(SO_RCVBUF) on already full buffer. 1047 * Also, sb_mbcnt may overcommit sb_mbmax in case if previous write observed 1048 * 'space < mbspace', but mchain allocated to hold 'space' bytes of data ended 1049 * up with 'mc_mlen > mbspace'. A typical scenario would be a full buffer with 1050 * writer trying to push in a large write, and a slow reader, that reads just 1051 * a few bytes at a time. In that case writer will keep creating new mbufs 1052 * with mc_split(). These mbufs will carry little chars, but will all point at 1053 * the same cluster, thus each adding cluster size to sb_mbcnt. This means we 1054 * will count same cluster many times potentially underutilizing socket buffer. 1055 * We aren't optimizing towards ineffective readers. Classic socket buffer had 1056 * the same "feature". 1057 */ 1058 static inline u_int 1059 uipc_stream_sbspace(struct sockbuf *sb) 1060 { 1061 u_int space, mbspace; 1062 1063 if (__predict_true(sb->sb_hiwat >= sb->sb_ccc + sb->sb_ctl)) 1064 space = sb->sb_hiwat - sb->sb_ccc - sb->sb_ctl; 1065 else 1066 return (0); 1067 if (__predict_true(sb->sb_mbmax >= sb->sb_mbcnt)) 1068 mbspace = sb->sb_mbmax - sb->sb_mbcnt; 1069 else 1070 return (0); 1071 1072 return (min(space, mbspace)); 1073 } 1074 1075 static int 1076 uipc_sosend_stream_or_seqpacket(struct socket *so, struct sockaddr *addr, 1077 struct uio *uio0, struct mbuf *m, struct mbuf *c, int flags, 1078 struct thread *td) 1079 { 1080 struct unpcb *unp2; 1081 struct socket *so2; 1082 struct sockbuf *sb; 1083 struct uio *uio; 1084 struct mchain mc, cmc; 1085 size_t resid, sent; 1086 bool nonblock, eor, aio; 1087 int error; 1088 1089 MPASS((uio0 != NULL && m == NULL) || (m != NULL && uio0 == NULL)); 1090 MPASS(m == NULL || c == NULL); 1091 1092 if (__predict_false(flags & MSG_OOB)) 1093 return (EOPNOTSUPP); 1094 1095 nonblock = (so->so_state & SS_NBIO) || 1096 (flags & (MSG_DONTWAIT | MSG_NBIO)); 1097 eor = flags & MSG_EOR; 1098 1099 mc = MCHAIN_INITIALIZER(&mc); 1100 cmc = MCHAIN_INITIALIZER(&cmc); 1101 sent = 0; 1102 aio = false; 1103 1104 if (m == NULL) { 1105 if (c != NULL && (error = unp_internalize(c, &cmc, td))) 1106 goto out; 1107 /* 1108 * This function may read more data from the uio than it would 1109 * then place on socket. That would leave uio inconsistent 1110 * upon return. Normally uio is allocated on the stack of the 1111 * syscall thread and we don't care about leaving it consistent. 1112 * However, aio(9) will allocate a uio as part of job and will 1113 * use it to track progress. We detect aio(9) checking the 1114 * SB_AIO_RUNNING flag. It is safe to check it without lock 1115 * cause it is set and cleared in the same taskqueue thread. 1116 * 1117 * This check can also produce a false positive: there is 1118 * aio(9) job and also there is a syscall we are serving now. 1119 * No sane software does that, it would leave to a mess in 1120 * the socket buffer, as aio(9) doesn't grab the I/O sx(9). 1121 * But syzkaller can create this mess. For such false positive 1122 * our goal is just don't panic or leak memory. 1123 */ 1124 if (__predict_false(so->so_snd.sb_flags & SB_AIO_RUNNING)) { 1125 uio = cloneuio(uio0); 1126 aio = true; 1127 } else { 1128 uio = uio0; 1129 resid = uio->uio_resid; 1130 } 1131 /* 1132 * Optimization for a case when our send fits into the receive 1133 * buffer - do the copyin before taking any locks, sized to our 1134 * send buffer. Later copyins will also take into account 1135 * space in the peer's receive buffer. 1136 */ 1137 error = mc_uiotomc(&mc, uio, so->so_snd.sb_hiwat, 0, M_WAITOK, 1138 eor ? M_EOR : 0); 1139 if (__predict_false(error)) 1140 goto out2; 1141 } else 1142 uipc_reset_kernel_mbuf(m, &mc); 1143 1144 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); 1145 if (error) 1146 goto out2; 1147 1148 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 1149 goto out3; 1150 1151 if (unp2->unp_flags & UNP_WANTCRED_MASK) { 1152 /* 1153 * Credentials are passed only once on SOCK_STREAM and 1154 * SOCK_SEQPACKET (LOCAL_CREDS => WANTCRED_ONESHOT), or 1155 * forever (LOCAL_CREDS_PERSISTENT => WANTCRED_ALWAYS). 1156 */ 1157 unp_addsockcred(td, &cmc, unp2->unp_flags); 1158 unp2->unp_flags &= ~UNP_WANTCRED_ONESHOT; 1159 } 1160 1161 /* 1162 * Cycle through the data to send and available space in the peer's 1163 * receive buffer. Put a reference on the peer socket, so that it 1164 * doesn't get freed while we sbwait(). If peer goes away, we will 1165 * observe the SBS_CANTRCVMORE and our sorele() will finalize peer's 1166 * socket destruction. 1167 */ 1168 so2 = unp2->unp_socket; 1169 soref(so2); 1170 UNP_PCB_UNLOCK(unp2); 1171 sb = &so2->so_rcv; 1172 while (mc.mc_len + cmc.mc_len > 0) { 1173 struct mchain mcnext = MCHAIN_INITIALIZER(&mcnext); 1174 u_int space; 1175 1176 SOCK_RECVBUF_LOCK(so2); 1177 restart: 1178 UIPC_STREAM_SBCHECK(sb); 1179 if (__predict_false(cmc.mc_len > sb->sb_hiwat)) { 1180 SOCK_RECVBUF_UNLOCK(so2); 1181 error = EMSGSIZE; 1182 goto out4; 1183 } 1184 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1185 SOCK_RECVBUF_UNLOCK(so2); 1186 error = EPIPE; 1187 goto out4; 1188 } 1189 /* 1190 * Wait on the peer socket receive buffer until we have enough 1191 * space to put at least control. The data is a stream and can 1192 * be put partially, but control is really a datagram. 1193 */ 1194 space = uipc_stream_sbspace(sb); 1195 if (space < sb->sb_lowat || space < cmc.mc_len) { 1196 if (nonblock) { 1197 if (aio) 1198 sb->uxst_flags |= UXST_PEER_AIO; 1199 SOCK_RECVBUF_UNLOCK(so2); 1200 if (aio) { 1201 SOCK_SENDBUF_LOCK(so); 1202 so->so_snd.sb_ccc = 1203 so->so_snd.sb_hiwat - space; 1204 SOCK_SENDBUF_UNLOCK(so); 1205 } 1206 error = EWOULDBLOCK; 1207 goto out4; 1208 } 1209 if ((error = sbwait(so2, SO_RCV)) != 0) { 1210 SOCK_RECVBUF_UNLOCK(so2); 1211 goto out4; 1212 } else 1213 goto restart; 1214 } 1215 MPASS(space >= cmc.mc_len); 1216 space -= cmc.mc_len; 1217 if (space == 0) { 1218 /* There is space only to send control. */ 1219 MPASS(!STAILQ_EMPTY(&cmc.mc_q)); 1220 mcnext = mc; 1221 mc = MCHAIN_INITIALIZER(&mc); 1222 } else if (space < mc.mc_len) { 1223 /* Not enough space. */ 1224 if (__predict_false(mc_split(&mc, &mcnext, space, 1225 M_NOWAIT) == ENOMEM)) { 1226 /* 1227 * If allocation failed use M_WAITOK and merge 1228 * the chain back. Next time mc_split() will 1229 * easily split at the same place. Only if we 1230 * race with setsockopt(SO_RCVBUF) shrinking 1231 * sb_hiwat can this happen more than once. 1232 */ 1233 SOCK_RECVBUF_UNLOCK(so2); 1234 (void)mc_split(&mc, &mcnext, space, M_WAITOK); 1235 mc_concat(&mc, &mcnext); 1236 SOCK_RECVBUF_LOCK(so2); 1237 goto restart; 1238 } 1239 MPASS(mc.mc_len == space); 1240 } 1241 if (!STAILQ_EMPTY(&cmc.mc_q)) { 1242 STAILQ_CONCAT(&sb->uxst_mbq, &cmc.mc_q); 1243 sb->sb_ctl += cmc.mc_len; 1244 sb->sb_mbcnt += cmc.mc_mlen; 1245 cmc.mc_len = 0; 1246 } 1247 sent += mc.mc_len; 1248 if (sb->uxst_fnrdy == NULL) 1249 sb->sb_acc += mc.mc_len; 1250 sb->sb_ccc += mc.mc_len; 1251 sb->sb_mbcnt += mc.mc_mlen; 1252 STAILQ_CONCAT(&sb->uxst_mbq, &mc.mc_q); 1253 UIPC_STREAM_SBCHECK(sb); 1254 space = uipc_stream_sbspace(sb); 1255 sorwakeup_locked(so2); 1256 if (!STAILQ_EMPTY(&mcnext.mc_q)) { 1257 /* 1258 * Such assignment is unsafe in general, but it is 1259 * safe with !STAILQ_EMPTY(&mcnext.mc_q). In C++ we 1260 * could reload = for STAILQs :) 1261 */ 1262 mc = mcnext; 1263 } else if (uio != NULL && uio->uio_resid > 0) { 1264 /* 1265 * Copyin sum of peer's receive buffer space and our 1266 * sb_hiwat, which is our virtual send buffer size. 1267 * See comment above unpst_sendspace declaration. 1268 * We are reading sb_hiwat locklessly, cause a) we 1269 * don't care about an application that does send(2) 1270 * and setsockopt(2) racing internally, and for an 1271 * application that does this in sequence we will see 1272 * the correct value cause sbsetopt() uses buffer lock 1273 * and we also have already acquired it at least once. 1274 */ 1275 error = mc_uiotomc(&mc, uio, space + 1276 atomic_load_int(&so->so_snd.sb_hiwat), 0, M_WAITOK, 1277 eor ? M_EOR : 0); 1278 if (__predict_false(error)) 1279 goto out4; 1280 } else 1281 mc = MCHAIN_INITIALIZER(&mc); 1282 } 1283 1284 MPASS(STAILQ_EMPTY(&mc.mc_q)); 1285 1286 td->td_ru.ru_msgsnd++; 1287 out4: 1288 sorele(so2); 1289 out3: 1290 SOCK_IO_SEND_UNLOCK(so); 1291 out2: 1292 if (aio) { 1293 freeuio(uio); 1294 uioadvance(uio0, sent); 1295 } else if (uio != NULL) 1296 uio->uio_resid = resid - sent; 1297 if (!mc_empty(&cmc)) 1298 unp_scan(mc_first(&cmc), unp_freerights); 1299 out: 1300 mc_freem(&mc); 1301 mc_freem(&cmc); 1302 1303 return (error); 1304 } 1305 1306 /* 1307 * Our version of sowakeup(), used by recv(2) and shutdown(2). 1308 * 1309 * @param so Points to a connected stream socket with receive buffer locked 1310 * 1311 * In a blocking mode peer is sleeping on our receive buffer, and we need just 1312 * wakeup(9) on it. But to wake up various event engines, we need to reach 1313 * over to peer's selinfo. This can be safely done as the socket buffer 1314 * receive lock is protecting us from the peer going away. 1315 */ 1316 static void 1317 uipc_wakeup(struct socket *so) 1318 { 1319 struct sockbuf *sb = &so->so_rcv; 1320 struct selinfo *sel; 1321 1322 SOCK_RECVBUF_LOCK_ASSERT(so); 1323 MPASS(sb->uxst_peer != NULL); 1324 1325 sel = &sb->uxst_peer->so_wrsel; 1326 1327 if (sb->uxst_flags & UXST_PEER_SEL) { 1328 selwakeuppri(sel, PSOCK); 1329 /* 1330 * XXXGL: sowakeup() does SEL_WAITING() without locks. 1331 */ 1332 if (!SEL_WAITING(sel)) 1333 sb->uxst_flags &= ~UXST_PEER_SEL; 1334 } 1335 if (sb->sb_flags & SB_WAIT) { 1336 sb->sb_flags &= ~SB_WAIT; 1337 wakeup(&sb->sb_acc); 1338 } 1339 KNOTE_LOCKED(&sel->si_note, 0); 1340 SOCK_RECVBUF_UNLOCK(so); 1341 } 1342 1343 static void 1344 uipc_cantrcvmore(struct socket *so) 1345 { 1346 1347 SOCK_RECVBUF_LOCK(so); 1348 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 1349 if (so->so_rcv.uxst_peer != NULL) 1350 uipc_wakeup(so); 1351 else 1352 SOCK_RECVBUF_UNLOCK(so); 1353 } 1354 1355 static int 1356 uipc_soreceive_stream_or_seqpacket(struct socket *so, struct sockaddr **psa, 1357 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1358 { 1359 struct sockbuf *sb = &so->so_rcv; 1360 struct mbuf *control, *m, *first, *last, *next; 1361 u_int ctl, space, datalen, mbcnt, lastlen; 1362 int error, flags; 1363 bool nonblock, waitall, peek; 1364 1365 MPASS(mp0 == NULL); 1366 1367 if (psa != NULL) 1368 *psa = NULL; 1369 if (controlp != NULL) 1370 *controlp = NULL; 1371 1372 flags = flagsp != NULL ? *flagsp : 0; 1373 nonblock = (so->so_state & SS_NBIO) || 1374 (flags & (MSG_DONTWAIT | MSG_NBIO)); 1375 peek = flags & MSG_PEEK; 1376 waitall = (flags & MSG_WAITALL) && !peek; 1377 1378 /* 1379 * This check may fail only on a socket that never went through 1380 * connect(2). We can check this locklessly, cause: a) for a new born 1381 * socket we don't care about applications that may race internally 1382 * between connect(2) and recv(2), and b) for a dying socket if we 1383 * miss update by unp_sosidisconnected(), we would still get the check 1384 * correct. For dying socket we would observe SBS_CANTRCVMORE later. 1385 */ 1386 if (__predict_false((atomic_load_short(&so->so_state) & 1387 (SS_ISCONNECTED|SS_ISDISCONNECTED)) == 0)) 1388 return (ENOTCONN); 1389 1390 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); 1391 if (__predict_false(error)) 1392 return (error); 1393 1394 restart: 1395 SOCK_RECVBUF_LOCK(so); 1396 UIPC_STREAM_SBCHECK(sb); 1397 while (sb->sb_acc < sb->sb_lowat && 1398 (sb->sb_ctl == 0 || controlp == NULL)) { 1399 if (so->so_error) { 1400 error = so->so_error; 1401 if (!peek) 1402 so->so_error = 0; 1403 SOCK_RECVBUF_UNLOCK(so); 1404 SOCK_IO_RECV_UNLOCK(so); 1405 return (error); 1406 } 1407 if (sb->sb_state & SBS_CANTRCVMORE) { 1408 SOCK_RECVBUF_UNLOCK(so); 1409 SOCK_IO_RECV_UNLOCK(so); 1410 return (0); 1411 } 1412 if (nonblock) { 1413 SOCK_RECVBUF_UNLOCK(so); 1414 SOCK_IO_RECV_UNLOCK(so); 1415 return (EWOULDBLOCK); 1416 } 1417 error = sbwait(so, SO_RCV); 1418 if (error) { 1419 SOCK_RECVBUF_UNLOCK(so); 1420 SOCK_IO_RECV_UNLOCK(so); 1421 return (error); 1422 } 1423 } 1424 1425 MPASS(STAILQ_FIRST(&sb->uxst_mbq)); 1426 MPASS(sb->sb_acc > 0 || sb->sb_ctl > 0); 1427 1428 mbcnt = 0; 1429 ctl = 0; 1430 first = STAILQ_FIRST(&sb->uxst_mbq); 1431 if (first->m_type == MT_CONTROL) { 1432 control = first; 1433 STAILQ_FOREACH_FROM(first, &sb->uxst_mbq, m_stailq) { 1434 if (first->m_type != MT_CONTROL) 1435 break; 1436 ctl += first->m_len; 1437 mbcnt += MSIZE; 1438 if (first->m_flags & M_EXT) 1439 mbcnt += first->m_ext.ext_size; 1440 } 1441 } else 1442 control = NULL; 1443 1444 /* 1445 * Find split point for the next copyout. On exit from the loop: 1446 * last == NULL - socket to be flushed 1447 * last != NULL 1448 * lastlen > last->m_len - uio to be filled, last to be adjusted 1449 * lastlen == 0 - MT_CONTROL, M_EOR or M_NOTREADY encountered 1450 */ 1451 space = uio->uio_resid; 1452 datalen = 0; 1453 for (m = first, last = sb->uxst_fnrdy, lastlen = 0; 1454 m != sb->uxst_fnrdy; 1455 m = STAILQ_NEXT(m, m_stailq)) { 1456 if (m->m_type != MT_DATA) { 1457 last = m; 1458 lastlen = 0; 1459 break; 1460 } 1461 if (space >= m->m_len) { 1462 space -= m->m_len; 1463 datalen += m->m_len; 1464 mbcnt += MSIZE; 1465 if (m->m_flags & M_EXT) 1466 mbcnt += m->m_ext.ext_size; 1467 if (m->m_flags & M_EOR) { 1468 last = STAILQ_NEXT(m, m_stailq); 1469 lastlen = 0; 1470 flags |= MSG_EOR; 1471 break; 1472 } 1473 } else { 1474 datalen += space; 1475 last = m; 1476 lastlen = space; 1477 break; 1478 } 1479 } 1480 1481 UIPC_STREAM_SBCHECK(sb); 1482 if (!peek) { 1483 if (last == NULL) 1484 STAILQ_INIT(&sb->uxst_mbq); 1485 else { 1486 STAILQ_FIRST(&sb->uxst_mbq) = last; 1487 MPASS(last->m_len > lastlen); 1488 last->m_len -= lastlen; 1489 last->m_data += lastlen; 1490 } 1491 MPASS(sb->sb_acc >= datalen); 1492 sb->sb_acc -= datalen; 1493 sb->sb_ccc -= datalen; 1494 MPASS(sb->sb_ctl >= ctl); 1495 sb->sb_ctl -= ctl; 1496 MPASS(sb->sb_mbcnt >= mbcnt); 1497 sb->sb_mbcnt -= mbcnt; 1498 UIPC_STREAM_SBCHECK(sb); 1499 if (__predict_true(sb->uxst_peer != NULL)) { 1500 struct unpcb *unp2; 1501 bool aio; 1502 1503 if ((aio = sb->uxst_flags & UXST_PEER_AIO)) 1504 sb->uxst_flags &= ~UXST_PEER_AIO; 1505 1506 uipc_wakeup(so); 1507 /* 1508 * XXXGL: need to go through uipc_lock_peer() after 1509 * the receive buffer lock dropped, it was protecting 1510 * us from unp_soisdisconnected(). The aio workarounds 1511 * should be refactored to the aio(4) side. 1512 */ 1513 if (aio && uipc_lock_peer(so, &unp2) == 0) { 1514 struct socket *so2 = unp2->unp_socket; 1515 1516 SOCK_SENDBUF_LOCK(so2); 1517 so2->so_snd.sb_ccc -= datalen; 1518 sowakeup_aio(so2, SO_SND); 1519 SOCK_SENDBUF_UNLOCK(so2); 1520 UNP_PCB_UNLOCK(unp2); 1521 } 1522 } else 1523 SOCK_RECVBUF_UNLOCK(so); 1524 } else 1525 SOCK_RECVBUF_UNLOCK(so); 1526 1527 while (control != NULL && control->m_type == MT_CONTROL) { 1528 if (!peek) { 1529 /* 1530 * unp_externalize() failure must abort entire read(2). 1531 * Such failure should also free the problematic 1532 * control, but link back the remaining data to the head 1533 * of the buffer, so that socket is not left in a state 1534 * where it can't progress forward with reading. 1535 * Probability of such a failure is really low, so it 1536 * is fine that we need to perform pretty complex 1537 * operation here to reconstruct the buffer. 1538 */ 1539 error = unp_externalize(control, controlp, flags); 1540 control = m_free(control); 1541 if (__predict_false(error && control != NULL)) { 1542 struct mchain cmc; 1543 1544 mc_init_m(&cmc, control); 1545 1546 SOCK_RECVBUF_LOCK(so); 1547 MPASS(!(sb->sb_state & SBS_CANTRCVMORE)); 1548 1549 if (__predict_false(cmc.mc_len + sb->sb_ccc + 1550 sb->sb_ctl > sb->sb_hiwat)) { 1551 /* 1552 * Too bad, while unp_externalize() was 1553 * failing, the other side had filled 1554 * the buffer and we can't prepend data 1555 * back. Losing data! 1556 */ 1557 SOCK_RECVBUF_UNLOCK(so); 1558 SOCK_IO_RECV_UNLOCK(so); 1559 unp_scan(mc_first(&cmc), 1560 unp_freerights); 1561 mc_freem(&cmc); 1562 return (error); 1563 } 1564 1565 UIPC_STREAM_SBCHECK(sb); 1566 /* XXXGL: STAILQ_PREPEND */ 1567 STAILQ_CONCAT(&cmc.mc_q, &sb->uxst_mbq); 1568 STAILQ_SWAP(&cmc.mc_q, &sb->uxst_mbq, mbuf); 1569 1570 sb->sb_ctl = sb->sb_acc = sb->sb_ccc = 1571 sb->sb_mbcnt = 0; 1572 STAILQ_FOREACH(m, &sb->uxst_mbq, m_stailq) { 1573 if (m->m_type == MT_DATA) { 1574 sb->sb_acc += m->m_len; 1575 sb->sb_ccc += m->m_len; 1576 } else { 1577 sb->sb_ctl += m->m_len; 1578 } 1579 sb->sb_mbcnt += MSIZE; 1580 if (m->m_flags & M_EXT) 1581 sb->sb_mbcnt += 1582 m->m_ext.ext_size; 1583 } 1584 UIPC_STREAM_SBCHECK(sb); 1585 SOCK_RECVBUF_UNLOCK(so); 1586 SOCK_IO_RECV_UNLOCK(so); 1587 return (error); 1588 } 1589 if (controlp != NULL) { 1590 while (*controlp != NULL) 1591 controlp = &(*controlp)->m_next; 1592 } 1593 } else { 1594 /* 1595 * XXXGL 1596 * 1597 * In MSG_PEEK case control is not externalized. This 1598 * means we are leaking some kernel pointers to the 1599 * userland. They are useless to a law-abiding 1600 * application, but may be useful to a malware. This 1601 * is what the historical implementation in the 1602 * soreceive_generic() did. To be improved? 1603 */ 1604 if (controlp != NULL) { 1605 *controlp = m_copym(control, 0, control->m_len, 1606 M_WAITOK); 1607 controlp = &(*controlp)->m_next; 1608 } 1609 control = STAILQ_NEXT(control, m_stailq); 1610 } 1611 } 1612 1613 for (m = first; m != last; m = next) { 1614 next = STAILQ_NEXT(m, m_stailq); 1615 error = uiomove(mtod(m, char *), m->m_len, uio); 1616 if (__predict_false(error)) { 1617 SOCK_IO_RECV_UNLOCK(so); 1618 if (!peek) 1619 for (; m != last; m = next) { 1620 next = STAILQ_NEXT(m, m_stailq); 1621 m_free(m); 1622 } 1623 return (error); 1624 } 1625 if (!peek) 1626 m_free(m); 1627 } 1628 if (last != NULL && lastlen > 0) { 1629 if (!peek) { 1630 MPASS(!(m->m_flags & M_PKTHDR)); 1631 MPASS(last->m_data - M_START(last) >= lastlen); 1632 error = uiomove(mtod(last, char *) - lastlen, 1633 lastlen, uio); 1634 } else 1635 error = uiomove(mtod(last, char *), lastlen, uio); 1636 if (__predict_false(error)) { 1637 SOCK_IO_RECV_UNLOCK(so); 1638 return (error); 1639 } 1640 } 1641 if (waitall && !(flags & MSG_EOR) && uio->uio_resid > 0) 1642 goto restart; 1643 SOCK_IO_RECV_UNLOCK(so); 1644 1645 if (flagsp != NULL) 1646 *flagsp |= flags; 1647 1648 uio->uio_td->td_ru.ru_msgrcv++; 1649 1650 return (0); 1651 } 1652 1653 static int 1654 uipc_sopoll_stream_or_seqpacket(struct socket *so, int events, 1655 struct thread *td) 1656 { 1657 struct unpcb *unp = sotounpcb(so); 1658 int revents; 1659 1660 UNP_PCB_LOCK(unp); 1661 if (SOLISTENING(so)) { 1662 /* The above check is safe, since conversion to listening uses 1663 * both protocol and socket lock. 1664 */ 1665 SOCK_LOCK(so); 1666 if (!(events & (POLLIN | POLLRDNORM))) 1667 revents = 0; 1668 else if (!TAILQ_EMPTY(&so->sol_comp)) 1669 revents = events & (POLLIN | POLLRDNORM); 1670 else if (so->so_error) 1671 revents = (events & (POLLIN | POLLRDNORM)) | POLLHUP; 1672 else { 1673 selrecord(td, &so->so_rdsel); 1674 revents = 0; 1675 } 1676 SOCK_UNLOCK(so); 1677 } else { 1678 if (so->so_state & SS_ISDISCONNECTED) 1679 revents = POLLHUP; 1680 else 1681 revents = 0; 1682 if (events & (POLLIN | POLLRDNORM | POLLRDHUP)) { 1683 SOCK_RECVBUF_LOCK(so); 1684 if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat || 1685 so->so_error || so->so_rerror) 1686 revents |= events & (POLLIN | POLLRDNORM); 1687 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1688 revents |= events & 1689 (POLLIN | POLLRDNORM | POLLRDHUP); 1690 if (!(revents & (POLLIN | POLLRDNORM | POLLRDHUP))) { 1691 selrecord(td, &so->so_rdsel); 1692 so->so_rcv.sb_flags |= SB_SEL; 1693 } 1694 SOCK_RECVBUF_UNLOCK(so); 1695 } 1696 if (events & (POLLOUT | POLLWRNORM)) { 1697 struct socket *so2 = so->so_rcv.uxst_peer; 1698 1699 if (so2 != NULL) { 1700 struct sockbuf *sb = &so2->so_rcv; 1701 1702 SOCK_RECVBUF_LOCK(so2); 1703 if (uipc_stream_sbspace(sb) >= sb->sb_lowat) 1704 revents |= events & 1705 (POLLOUT | POLLWRNORM); 1706 if (sb->sb_state & SBS_CANTRCVMORE) 1707 revents |= POLLHUP; 1708 if (!(revents & (POLLOUT | POLLWRNORM))) { 1709 so2->so_rcv.uxst_flags |= UXST_PEER_SEL; 1710 selrecord(td, &so->so_wrsel); 1711 } 1712 SOCK_RECVBUF_UNLOCK(so2); 1713 } else 1714 selrecord(td, &so->so_wrsel); 1715 } 1716 } 1717 UNP_PCB_UNLOCK(unp); 1718 return (revents); 1719 } 1720 1721 static void 1722 uipc_wrknl_lock(void *arg) 1723 { 1724 struct socket *so = arg; 1725 struct unpcb *unp = sotounpcb(so); 1726 1727 retry: 1728 if (SOLISTENING(so)) { 1729 SOLISTEN_LOCK(so); 1730 } else { 1731 UNP_PCB_LOCK(unp); 1732 if (__predict_false(SOLISTENING(so))) { 1733 UNP_PCB_UNLOCK(unp); 1734 goto retry; 1735 } 1736 if (so->so_rcv.uxst_peer != NULL) 1737 SOCK_RECVBUF_LOCK(so->so_rcv.uxst_peer); 1738 } 1739 } 1740 1741 static void 1742 uipc_wrknl_unlock(void *arg) 1743 { 1744 struct socket *so = arg; 1745 struct unpcb *unp = sotounpcb(so); 1746 1747 if (SOLISTENING(so)) 1748 SOLISTEN_UNLOCK(so); 1749 else { 1750 if (so->so_rcv.uxst_peer != NULL) 1751 SOCK_RECVBUF_UNLOCK(so->so_rcv.uxst_peer); 1752 UNP_PCB_UNLOCK(unp); 1753 } 1754 } 1755 1756 static void 1757 uipc_wrknl_assert_lock(void *arg, int what) 1758 { 1759 struct socket *so = arg; 1760 1761 if (SOLISTENING(so)) { 1762 if (what == LA_LOCKED) 1763 SOLISTEN_LOCK_ASSERT(so); 1764 else 1765 SOLISTEN_UNLOCK_ASSERT(so); 1766 } else { 1767 /* 1768 * The pr_soreceive method will put a note without owning the 1769 * unp lock, so we can't assert it here. But we can safely 1770 * dereference uxst_peer pointer, since receive buffer lock 1771 * is assumed to be held here. 1772 */ 1773 if (what == LA_LOCKED && so->so_rcv.uxst_peer != NULL) 1774 SOCK_RECVBUF_LOCK_ASSERT(so->so_rcv.uxst_peer); 1775 } 1776 } 1777 1778 static void 1779 uipc_filt_sowdetach(struct knote *kn) 1780 { 1781 struct socket *so = kn->kn_fp->f_data; 1782 1783 uipc_wrknl_lock(so); 1784 knlist_remove(&so->so_wrsel.si_note, kn, 1); 1785 uipc_wrknl_unlock(so); 1786 } 1787 1788 static int 1789 uipc_filt_sowrite(struct knote *kn, long hint) 1790 { 1791 struct socket *so = kn->kn_fp->f_data, *so2; 1792 struct unpcb *unp = sotounpcb(so), *unp2 = unp->unp_conn; 1793 1794 if (SOLISTENING(so)) 1795 return (0); 1796 1797 if (unp2 == NULL) { 1798 if (so->so_state & SS_ISDISCONNECTED) { 1799 kn->kn_flags |= EV_EOF; 1800 kn->kn_fflags = so->so_error; 1801 return (1); 1802 } else 1803 return (0); 1804 } 1805 1806 so2 = unp2->unp_socket; 1807 SOCK_RECVBUF_LOCK_ASSERT(so2); 1808 kn->kn_data = uipc_stream_sbspace(&so2->so_rcv); 1809 1810 if (so2->so_rcv.sb_state & SBS_CANTRCVMORE) { 1811 /* 1812 * XXXGL: maybe kn->kn_flags |= EV_EOF ? 1813 */ 1814 return (1); 1815 } else if (kn->kn_sfflags & NOTE_LOWAT) 1816 return (kn->kn_data >= kn->kn_sdata); 1817 else 1818 return (kn->kn_data >= so2->so_rcv.sb_lowat); 1819 } 1820 1821 static int 1822 uipc_filt_soempty(struct knote *kn, long hint) 1823 { 1824 struct socket *so = kn->kn_fp->f_data, *so2; 1825 struct unpcb *unp = sotounpcb(so), *unp2 = unp->unp_conn; 1826 1827 if (SOLISTENING(so) || unp2 == NULL) 1828 return (1); 1829 1830 so2 = unp2->unp_socket; 1831 SOCK_RECVBUF_LOCK_ASSERT(so2); 1832 kn->kn_data = uipc_stream_sbspace(&so2->so_rcv); 1833 1834 return (kn->kn_data == 0 ? 1 : 0); 1835 } 1836 1837 static const struct filterops uipc_write_filtops = { 1838 .f_isfd = 1, 1839 .f_detach = uipc_filt_sowdetach, 1840 .f_event = uipc_filt_sowrite, 1841 }; 1842 static const struct filterops uipc_empty_filtops = { 1843 .f_isfd = 1, 1844 .f_detach = uipc_filt_sowdetach, 1845 .f_event = uipc_filt_soempty, 1846 }; 1847 1848 static int 1849 uipc_kqfilter_stream_or_seqpacket(struct socket *so, struct knote *kn) 1850 { 1851 struct unpcb *unp = sotounpcb(so); 1852 struct knlist *knl; 1853 1854 switch (kn->kn_filter) { 1855 case EVFILT_READ: 1856 return (sokqfilter_generic(so, kn)); 1857 case EVFILT_WRITE: 1858 kn->kn_fop = &uipc_write_filtops; 1859 break; 1860 case EVFILT_EMPTY: 1861 kn->kn_fop = &uipc_empty_filtops; 1862 break; 1863 default: 1864 return (EINVAL); 1865 } 1866 1867 knl = &so->so_wrsel.si_note; 1868 UNP_PCB_LOCK(unp); 1869 if (SOLISTENING(so)) { 1870 SOLISTEN_LOCK(so); 1871 knlist_add(knl, kn, 1); 1872 SOLISTEN_UNLOCK(so); 1873 } else { 1874 struct socket *so2 = so->so_rcv.uxst_peer; 1875 1876 if (so2 != NULL) 1877 SOCK_RECVBUF_LOCK(so2); 1878 knlist_add(knl, kn, 1); 1879 if (so2 != NULL) 1880 SOCK_RECVBUF_UNLOCK(so2); 1881 } 1882 UNP_PCB_UNLOCK(unp); 1883 return (0); 1884 } 1885 1886 /* PF_UNIX/SOCK_DGRAM version of sbspace() */ 1887 static inline bool 1888 uipc_dgram_sbspace(struct sockbuf *sb, u_int cc, u_int mbcnt) 1889 { 1890 u_int bleft, mleft; 1891 1892 /* 1893 * Negative space may happen if send(2) is followed by 1894 * setsockopt(SO_SNDBUF/SO_RCVBUF) that shrinks maximum. 1895 */ 1896 if (__predict_false(sb->sb_hiwat < sb->uxdg_cc || 1897 sb->sb_mbmax < sb->uxdg_mbcnt)) 1898 return (false); 1899 1900 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) 1901 return (false); 1902 1903 bleft = sb->sb_hiwat - sb->uxdg_cc; 1904 mleft = sb->sb_mbmax - sb->uxdg_mbcnt; 1905 1906 return (bleft >= cc && mleft >= mbcnt); 1907 } 1908 1909 /* 1910 * PF_UNIX/SOCK_DGRAM send 1911 * 1912 * Allocate a record consisting of 3 mbufs in the sequence of 1913 * from -> control -> data and append it to the socket buffer. 1914 * 1915 * The first mbuf carries sender's name and is a pkthdr that stores 1916 * overall length of datagram, its memory consumption and control length. 1917 */ 1918 #define ctllen PH_loc.thirtytwo[1] 1919 _Static_assert(offsetof(struct pkthdr, memlen) + sizeof(u_int) <= 1920 offsetof(struct pkthdr, ctllen), "unix/dgram can not store ctllen"); 1921 static int 1922 uipc_sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, 1923 struct mbuf *m, struct mbuf *c, int flags, struct thread *td) 1924 { 1925 struct unpcb *unp, *unp2; 1926 const struct sockaddr *from; 1927 struct socket *so2; 1928 struct sockbuf *sb; 1929 struct mchain cmc = MCHAIN_INITIALIZER(&cmc); 1930 struct mbuf *f; 1931 u_int cc, ctl, mbcnt; 1932 u_int dcc __diagused, dctl __diagused, dmbcnt __diagused; 1933 int error; 1934 1935 MPASS((uio != NULL && m == NULL) || (m != NULL && uio == NULL)); 1936 1937 error = 0; 1938 f = NULL; 1939 1940 if (__predict_false(flags & MSG_OOB)) { 1941 error = EOPNOTSUPP; 1942 goto out; 1943 } 1944 if (m == NULL) { 1945 if (__predict_false(uio->uio_resid > unpdg_maxdgram)) { 1946 error = EMSGSIZE; 1947 goto out; 1948 } 1949 m = m_uiotombuf(uio, M_WAITOK, 0, max_hdr, M_PKTHDR); 1950 if (__predict_false(m == NULL)) { 1951 error = EFAULT; 1952 goto out; 1953 } 1954 f = m_gethdr(M_WAITOK, MT_SONAME); 1955 cc = m->m_pkthdr.len; 1956 mbcnt = MSIZE + m->m_pkthdr.memlen; 1957 if (c != NULL && (error = unp_internalize(c, &cmc, td))) 1958 goto out; 1959 } else { 1960 struct mchain mc; 1961 1962 uipc_reset_kernel_mbuf(m, &mc); 1963 cc = mc.mc_len; 1964 mbcnt = mc.mc_mlen; 1965 if (__predict_false(m->m_pkthdr.len > unpdg_maxdgram)) { 1966 error = EMSGSIZE; 1967 goto out; 1968 } 1969 if ((f = m_gethdr(M_NOWAIT, MT_SONAME)) == NULL) { 1970 error = ENOBUFS; 1971 goto out; 1972 } 1973 } 1974 1975 unp = sotounpcb(so); 1976 MPASS(unp); 1977 1978 /* 1979 * XXXGL: would be cool to fully remove so_snd out of the equation 1980 * and avoid this lock, which is not only extraneous, but also being 1981 * released, thus still leaving possibility for a race. We can easily 1982 * handle SBS_CANTSENDMORE/SS_ISCONNECTED complement in unpcb, but it 1983 * is more difficult to invent something to handle so_error. 1984 */ 1985 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); 1986 if (error) 1987 goto out2; 1988 SOCK_SENDBUF_LOCK(so); 1989 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1990 SOCK_SENDBUF_UNLOCK(so); 1991 error = EPIPE; 1992 goto out3; 1993 } 1994 if (so->so_error != 0) { 1995 error = so->so_error; 1996 so->so_error = 0; 1997 SOCK_SENDBUF_UNLOCK(so); 1998 goto out3; 1999 } 2000 if (((so->so_state & SS_ISCONNECTED) == 0) && addr == NULL) { 2001 SOCK_SENDBUF_UNLOCK(so); 2002 error = EDESTADDRREQ; 2003 goto out3; 2004 } 2005 SOCK_SENDBUF_UNLOCK(so); 2006 2007 if (addr != NULL) { 2008 if ((error = unp_connectat(AT_FDCWD, so, addr, td, true))) 2009 goto out3; 2010 UNP_PCB_LOCK_ASSERT(unp); 2011 unp2 = unp->unp_conn; 2012 UNP_PCB_LOCK_ASSERT(unp2); 2013 } else { 2014 UNP_PCB_LOCK(unp); 2015 unp2 = unp_pcb_lock_peer(unp); 2016 if (unp2 == NULL) { 2017 UNP_PCB_UNLOCK(unp); 2018 error = ENOTCONN; 2019 goto out3; 2020 } 2021 } 2022 2023 if (unp2->unp_flags & UNP_WANTCRED_MASK) 2024 unp_addsockcred(td, &cmc, unp2->unp_flags); 2025 if (unp->unp_addr != NULL) 2026 from = (struct sockaddr *)unp->unp_addr; 2027 else 2028 from = &sun_noname; 2029 f->m_len = from->sa_len; 2030 MPASS(from->sa_len <= MLEN); 2031 bcopy(from, mtod(f, void *), from->sa_len); 2032 2033 /* 2034 * Concatenate mbufs: from -> control -> data. 2035 * Save overall cc and mbcnt in "from" mbuf. 2036 */ 2037 if (!STAILQ_EMPTY(&cmc.mc_q)) { 2038 f->m_next = mc_first(&cmc); 2039 mc_last(&cmc)->m_next = m; 2040 /* XXXGL: This is dirty as well as rollback after ENOBUFS. */ 2041 STAILQ_INIT(&cmc.mc_q); 2042 } else 2043 f->m_next = m; 2044 m = NULL; 2045 ctl = f->m_len + cmc.mc_len; 2046 mbcnt += cmc.mc_mlen; 2047 #ifdef INVARIANTS 2048 dcc = dctl = dmbcnt = 0; 2049 for (struct mbuf *mb = f; mb != NULL; mb = mb->m_next) { 2050 if (mb->m_type == MT_DATA) 2051 dcc += mb->m_len; 2052 else 2053 dctl += mb->m_len; 2054 dmbcnt += MSIZE; 2055 if (mb->m_flags & M_EXT) 2056 dmbcnt += mb->m_ext.ext_size; 2057 } 2058 MPASS(dcc == cc); 2059 MPASS(dctl == ctl); 2060 MPASS(dmbcnt == mbcnt); 2061 #endif 2062 f->m_pkthdr.len = cc + ctl; 2063 f->m_pkthdr.memlen = mbcnt; 2064 f->m_pkthdr.ctllen = ctl; 2065 2066 /* 2067 * Destination socket buffer selection. 2068 * 2069 * Unconnected sends, when !(so->so_state & SS_ISCONNECTED) and the 2070 * destination address is supplied, create a temporary connection for 2071 * the run time of the function (see call to unp_connectat() above and 2072 * to unp_disconnect() below). We distinguish them by condition of 2073 * (addr != NULL). We intentionally avoid adding 'bool connected' for 2074 * that condition, since, again, through the run time of this code we 2075 * are always connected. For such "unconnected" sends, the destination 2076 * buffer would be the receive buffer of destination socket so2. 2077 * 2078 * For connected sends, data lands on the send buffer of the sender's 2079 * socket "so". Then, if we just added the very first datagram 2080 * on this send buffer, we need to add the send buffer on to the 2081 * receiving socket's buffer list. We put ourselves on top of the 2082 * list. Such logic gives infrequent senders priority over frequent 2083 * senders. 2084 * 2085 * Note on byte count management. As long as event methods kevent(2), 2086 * select(2) are not protocol specific (yet), we need to maintain 2087 * meaningful values on the receive buffer. So, the receive buffer 2088 * would accumulate counters from all connected buffers potentially 2089 * having sb_ccc > sb_hiwat or sb_mbcnt > sb_mbmax. 2090 */ 2091 so2 = unp2->unp_socket; 2092 sb = (addr == NULL) ? &so->so_snd : &so2->so_rcv; 2093 SOCK_RECVBUF_LOCK(so2); 2094 if (uipc_dgram_sbspace(sb, cc + ctl, mbcnt)) { 2095 if (addr == NULL && STAILQ_EMPTY(&sb->uxdg_mb)) 2096 TAILQ_INSERT_HEAD(&so2->so_rcv.uxdg_conns, &so->so_snd, 2097 uxdg_clist); 2098 STAILQ_INSERT_TAIL(&sb->uxdg_mb, f, m_stailqpkt); 2099 sb->uxdg_cc += cc + ctl; 2100 sb->uxdg_ctl += ctl; 2101 sb->uxdg_mbcnt += mbcnt; 2102 so2->so_rcv.sb_acc += cc + ctl; 2103 so2->so_rcv.sb_ccc += cc + ctl; 2104 so2->so_rcv.sb_ctl += ctl; 2105 so2->so_rcv.sb_mbcnt += mbcnt; 2106 sorwakeup_locked(so2); 2107 f = NULL; 2108 } else { 2109 soroverflow_locked(so2); 2110 error = ENOBUFS; 2111 if (f->m_next->m_type == MT_CONTROL) { 2112 STAILQ_FIRST(&cmc.mc_q) = f->m_next; 2113 f->m_next = NULL; 2114 } 2115 } 2116 2117 if (addr != NULL) 2118 unp_disconnect(unp, unp2); 2119 else 2120 unp_pcb_unlock_pair(unp, unp2); 2121 2122 td->td_ru.ru_msgsnd++; 2123 2124 out3: 2125 SOCK_IO_SEND_UNLOCK(so); 2126 out2: 2127 if (!mc_empty(&cmc)) 2128 unp_scan(mc_first(&cmc), unp_freerights); 2129 out: 2130 if (f) 2131 m_freem(f); 2132 mc_freem(&cmc); 2133 if (m) 2134 m_freem(m); 2135 2136 return (error); 2137 } 2138 2139 /* 2140 * PF_UNIX/SOCK_DGRAM receive with MSG_PEEK. 2141 * The mbuf has already been unlinked from the uxdg_mb of socket buffer 2142 * and needs to be linked onto uxdg_peeked of receive socket buffer. 2143 */ 2144 static int 2145 uipc_peek_dgram(struct socket *so, struct mbuf *m, struct sockaddr **psa, 2146 struct uio *uio, struct mbuf **controlp, int *flagsp) 2147 { 2148 ssize_t len = 0; 2149 int error; 2150 2151 so->so_rcv.uxdg_peeked = m; 2152 so->so_rcv.uxdg_cc += m->m_pkthdr.len; 2153 so->so_rcv.uxdg_ctl += m->m_pkthdr.ctllen; 2154 so->so_rcv.uxdg_mbcnt += m->m_pkthdr.memlen; 2155 SOCK_RECVBUF_UNLOCK(so); 2156 2157 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); 2158 if (psa != NULL) 2159 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK); 2160 2161 m = m->m_next; 2162 KASSERT(m, ("%s: no data or control after soname", __func__)); 2163 2164 /* 2165 * With MSG_PEEK the control isn't executed, just copied. 2166 */ 2167 while (m != NULL && m->m_type == MT_CONTROL) { 2168 if (controlp != NULL) { 2169 *controlp = m_copym(m, 0, m->m_len, M_WAITOK); 2170 controlp = &(*controlp)->m_next; 2171 } 2172 m = m->m_next; 2173 } 2174 KASSERT(m == NULL || m->m_type == MT_DATA, 2175 ("%s: not MT_DATA mbuf %p", __func__, m)); 2176 while (m != NULL && uio->uio_resid > 0) { 2177 len = uio->uio_resid; 2178 if (len > m->m_len) 2179 len = m->m_len; 2180 error = uiomove(mtod(m, char *), (int)len, uio); 2181 if (error) { 2182 SOCK_IO_RECV_UNLOCK(so); 2183 return (error); 2184 } 2185 if (len == m->m_len) 2186 m = m->m_next; 2187 } 2188 SOCK_IO_RECV_UNLOCK(so); 2189 2190 if (flagsp != NULL) { 2191 if (m != NULL) { 2192 if (*flagsp & MSG_TRUNC) { 2193 /* Report real length of the packet */ 2194 uio->uio_resid -= m_length(m, NULL) - len; 2195 } 2196 *flagsp |= MSG_TRUNC; 2197 } else 2198 *flagsp &= ~MSG_TRUNC; 2199 } 2200 2201 return (0); 2202 } 2203 2204 /* 2205 * PF_UNIX/SOCK_DGRAM receive 2206 */ 2207 static int 2208 uipc_soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, 2209 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2210 { 2211 struct sockbuf *sb = NULL; 2212 struct mbuf *m; 2213 int flags, error; 2214 ssize_t len = 0; 2215 bool nonblock; 2216 2217 MPASS(mp0 == NULL); 2218 2219 if (psa != NULL) 2220 *psa = NULL; 2221 if (controlp != NULL) 2222 *controlp = NULL; 2223 2224 flags = flagsp != NULL ? *flagsp : 0; 2225 nonblock = (so->so_state & SS_NBIO) || 2226 (flags & (MSG_DONTWAIT | MSG_NBIO)); 2227 2228 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); 2229 if (__predict_false(error)) 2230 return (error); 2231 2232 /* 2233 * Loop blocking while waiting for a datagram. Prioritize connected 2234 * peers over unconnected sends. Set sb to selected socket buffer 2235 * containing an mbuf on exit from the wait loop. A datagram that 2236 * had already been peeked at has top priority. 2237 */ 2238 SOCK_RECVBUF_LOCK(so); 2239 while ((m = so->so_rcv.uxdg_peeked) == NULL && 2240 (sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) == NULL && 2241 (m = STAILQ_FIRST(&so->so_rcv.uxdg_mb)) == NULL) { 2242 if (so->so_error) { 2243 error = so->so_error; 2244 if (!(flags & MSG_PEEK)) 2245 so->so_error = 0; 2246 SOCK_RECVBUF_UNLOCK(so); 2247 SOCK_IO_RECV_UNLOCK(so); 2248 return (error); 2249 } 2250 if (so->so_rcv.sb_state & SBS_CANTRCVMORE || 2251 uio->uio_resid == 0) { 2252 SOCK_RECVBUF_UNLOCK(so); 2253 SOCK_IO_RECV_UNLOCK(so); 2254 return (0); 2255 } 2256 if (nonblock) { 2257 SOCK_RECVBUF_UNLOCK(so); 2258 SOCK_IO_RECV_UNLOCK(so); 2259 return (EWOULDBLOCK); 2260 } 2261 error = sbwait(so, SO_RCV); 2262 if (error) { 2263 SOCK_RECVBUF_UNLOCK(so); 2264 SOCK_IO_RECV_UNLOCK(so); 2265 return (error); 2266 } 2267 } 2268 2269 if (sb == NULL) 2270 sb = &so->so_rcv; 2271 else if (m == NULL) 2272 m = STAILQ_FIRST(&sb->uxdg_mb); 2273 else 2274 MPASS(m == so->so_rcv.uxdg_peeked); 2275 2276 MPASS(sb->uxdg_cc > 0); 2277 M_ASSERTPKTHDR(m); 2278 KASSERT(m->m_type == MT_SONAME, ("m->m_type == %d", m->m_type)); 2279 2280 if (uio->uio_td) 2281 uio->uio_td->td_ru.ru_msgrcv++; 2282 2283 if (__predict_true(m != so->so_rcv.uxdg_peeked)) { 2284 STAILQ_REMOVE_HEAD(&sb->uxdg_mb, m_stailqpkt); 2285 if (STAILQ_EMPTY(&sb->uxdg_mb) && sb != &so->so_rcv) 2286 TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist); 2287 } else 2288 so->so_rcv.uxdg_peeked = NULL; 2289 2290 sb->uxdg_cc -= m->m_pkthdr.len; 2291 sb->uxdg_ctl -= m->m_pkthdr.ctllen; 2292 sb->uxdg_mbcnt -= m->m_pkthdr.memlen; 2293 2294 if (__predict_false(flags & MSG_PEEK)) 2295 return (uipc_peek_dgram(so, m, psa, uio, controlp, flagsp)); 2296 2297 so->so_rcv.sb_acc -= m->m_pkthdr.len; 2298 so->so_rcv.sb_ccc -= m->m_pkthdr.len; 2299 so->so_rcv.sb_ctl -= m->m_pkthdr.ctllen; 2300 so->so_rcv.sb_mbcnt -= m->m_pkthdr.memlen; 2301 SOCK_RECVBUF_UNLOCK(so); 2302 2303 if (psa != NULL) 2304 *psa = sodupsockaddr(mtod(m, struct sockaddr *), M_WAITOK); 2305 m = m_free(m); 2306 KASSERT(m, ("%s: no data or control after soname", __func__)); 2307 2308 /* 2309 * Packet to copyout() is now in 'm' and it is disconnected from the 2310 * queue. 2311 * 2312 * Process one or more MT_CONTROL mbufs present before any data mbufs 2313 * in the first mbuf chain on the socket buffer. We call into the 2314 * unp_externalize() to perform externalization (or freeing if 2315 * controlp == NULL). In some cases there can be only MT_CONTROL mbufs 2316 * without MT_DATA mbufs. 2317 */ 2318 while (m != NULL && m->m_type == MT_CONTROL) { 2319 error = unp_externalize(m, controlp, flags); 2320 m = m_free(m); 2321 if (error != 0) { 2322 SOCK_IO_RECV_UNLOCK(so); 2323 unp_scan(m, unp_freerights); 2324 m_freem(m); 2325 return (error); 2326 } 2327 if (controlp != NULL) { 2328 while (*controlp != NULL) 2329 controlp = &(*controlp)->m_next; 2330 } 2331 } 2332 KASSERT(m == NULL || m->m_type == MT_DATA, 2333 ("%s: not MT_DATA mbuf %p", __func__, m)); 2334 while (m != NULL && uio->uio_resid > 0) { 2335 len = uio->uio_resid; 2336 if (len > m->m_len) 2337 len = m->m_len; 2338 error = uiomove(mtod(m, char *), (int)len, uio); 2339 if (error) { 2340 SOCK_IO_RECV_UNLOCK(so); 2341 m_freem(m); 2342 return (error); 2343 } 2344 if (len == m->m_len) 2345 m = m_free(m); 2346 else { 2347 m->m_data += len; 2348 m->m_len -= len; 2349 } 2350 } 2351 SOCK_IO_RECV_UNLOCK(so); 2352 2353 if (m != NULL) { 2354 if (flagsp != NULL) { 2355 if (flags & MSG_TRUNC) { 2356 /* Report real length of the packet */ 2357 uio->uio_resid -= m_length(m, NULL); 2358 } 2359 *flagsp |= MSG_TRUNC; 2360 } 2361 m_freem(m); 2362 } else if (flagsp != NULL) 2363 *flagsp &= ~MSG_TRUNC; 2364 2365 return (0); 2366 } 2367 2368 static int 2369 uipc_sendfile_wait(struct socket *so, off_t need, int *space) 2370 { 2371 struct unpcb *unp2; 2372 struct socket *so2; 2373 struct sockbuf *sb; 2374 bool nonblock, sockref; 2375 int error; 2376 2377 MPASS(so->so_type == SOCK_STREAM); 2378 MPASS(need > 0); 2379 MPASS(space != NULL); 2380 2381 nonblock = so->so_state & SS_NBIO; 2382 sockref = false; 2383 2384 if (__predict_false((so->so_state & SS_ISCONNECTED) == 0)) 2385 return (ENOTCONN); 2386 2387 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 2388 return (error); 2389 2390 so2 = unp2->unp_socket; 2391 sb = &so2->so_rcv; 2392 SOCK_RECVBUF_LOCK(so2); 2393 UNP_PCB_UNLOCK(unp2); 2394 while ((*space = uipc_stream_sbspace(sb)) < need && 2395 (*space < so->so_snd.sb_hiwat / 2)) { 2396 UIPC_STREAM_SBCHECK(sb); 2397 if (nonblock) { 2398 SOCK_RECVBUF_UNLOCK(so2); 2399 return (EAGAIN); 2400 } 2401 if (!sockref) 2402 soref(so2); 2403 error = sbwait(so2, SO_RCV); 2404 if (error == 0 && 2405 __predict_false(sb->sb_state & SBS_CANTRCVMORE)) 2406 error = EPIPE; 2407 if (error) { 2408 SOCK_RECVBUF_UNLOCK(so2); 2409 sorele(so2); 2410 return (error); 2411 } 2412 } 2413 UIPC_STREAM_SBCHECK(sb); 2414 SOCK_RECVBUF_UNLOCK(so2); 2415 if (sockref) 2416 sorele(so2); 2417 2418 return (0); 2419 } 2420 2421 /* 2422 * Although this is a pr_send method, for unix(4) it is called only via 2423 * sendfile(2) path. This means we can be sure that mbufs are clear of 2424 * any extra flags and don't require any conditioning. 2425 */ 2426 static int 2427 uipc_sendfile(struct socket *so, int flags, struct mbuf *m, 2428 struct sockaddr *from, struct mbuf *control, struct thread *td) 2429 { 2430 struct mchain mc; 2431 struct unpcb *unp2; 2432 struct socket *so2; 2433 struct sockbuf *sb; 2434 bool notready, wakeup; 2435 int error; 2436 2437 MPASS(so->so_type == SOCK_STREAM); 2438 MPASS(from == NULL && control == NULL); 2439 KASSERT(!(m->m_flags & M_EXTPG), 2440 ("unix(4): TLS sendfile(2) not supported")); 2441 2442 notready = flags & PRUS_NOTREADY; 2443 2444 if (__predict_false((so->so_state & SS_ISCONNECTED) == 0)) { 2445 error = ENOTCONN; 2446 goto out; 2447 } 2448 2449 if (__predict_false((error = uipc_lock_peer(so, &unp2)) != 0)) 2450 goto out; 2451 2452 mc_init_m(&mc, m); 2453 2454 so2 = unp2->unp_socket; 2455 sb = &so2->so_rcv; 2456 SOCK_RECVBUF_LOCK(so2); 2457 UNP_PCB_UNLOCK(unp2); 2458 UIPC_STREAM_SBCHECK(sb); 2459 sb->sb_ccc += mc.mc_len; 2460 sb->sb_mbcnt += mc.mc_mlen; 2461 if (sb->uxst_fnrdy == NULL) { 2462 if (notready) { 2463 wakeup = false; 2464 STAILQ_FOREACH(m, &mc.mc_q, m_stailq) { 2465 if (m->m_flags & M_NOTREADY) { 2466 sb->uxst_fnrdy = m; 2467 break; 2468 } else { 2469 sb->sb_acc += m->m_len; 2470 wakeup = true; 2471 } 2472 } 2473 } else { 2474 wakeup = true; 2475 sb->sb_acc += mc.mc_len; 2476 } 2477 } else { 2478 wakeup = false; 2479 } 2480 STAILQ_CONCAT(&sb->uxst_mbq, &mc.mc_q); 2481 UIPC_STREAM_SBCHECK(sb); 2482 if (wakeup) 2483 sorwakeup_locked(so2); 2484 else 2485 SOCK_RECVBUF_UNLOCK(so2); 2486 2487 return (0); 2488 out: 2489 /* 2490 * In case of not ready data, uipc_ready() is responsible 2491 * for freeing memory. 2492 */ 2493 if (m != NULL && !notready) 2494 m_freem(m); 2495 2496 return (error); 2497 } 2498 2499 static int 2500 uipc_sbready(struct sockbuf *sb, struct mbuf *m, int count) 2501 { 2502 bool blocker; 2503 2504 /* assert locked */ 2505 2506 blocker = (sb->uxst_fnrdy == m); 2507 STAILQ_FOREACH_FROM(m, &sb->uxst_mbq, m_stailq) { 2508 if (count > 0) { 2509 MPASS(m->m_flags & M_NOTREADY); 2510 m->m_flags &= ~M_NOTREADY; 2511 if (blocker) 2512 sb->sb_acc += m->m_len; 2513 count--; 2514 } else if (m->m_flags & M_NOTREADY) 2515 break; 2516 else if (blocker) 2517 sb->sb_acc += m->m_len; 2518 } 2519 if (blocker) { 2520 sb->uxst_fnrdy = m; 2521 return (0); 2522 } else 2523 return (EINPROGRESS); 2524 } 2525 2526 static bool 2527 uipc_ready_scan(struct socket *so, struct mbuf *m, int count, int *errorp) 2528 { 2529 struct mbuf *mb; 2530 struct sockbuf *sb; 2531 2532 SOCK_LOCK(so); 2533 if (SOLISTENING(so)) { 2534 SOCK_UNLOCK(so); 2535 return (false); 2536 } 2537 mb = NULL; 2538 sb = &so->so_rcv; 2539 SOCK_RECVBUF_LOCK(so); 2540 if (sb->uxst_fnrdy != NULL) { 2541 STAILQ_FOREACH(mb, &sb->uxst_mbq, m_stailq) { 2542 if (mb == m) { 2543 *errorp = uipc_sbready(sb, m, count); 2544 break; 2545 } 2546 } 2547 } 2548 SOCK_RECVBUF_UNLOCK(so); 2549 SOCK_UNLOCK(so); 2550 return (mb != NULL); 2551 } 2552 2553 static int 2554 uipc_ready(struct socket *so, struct mbuf *m, int count) 2555 { 2556 struct unpcb *unp, *unp2; 2557 int error; 2558 2559 MPASS(so->so_type == SOCK_STREAM); 2560 2561 if (__predict_true(uipc_lock_peer(so, &unp2) == 0)) { 2562 struct socket *so2; 2563 struct sockbuf *sb; 2564 2565 so2 = unp2->unp_socket; 2566 sb = &so2->so_rcv; 2567 SOCK_RECVBUF_LOCK(so2); 2568 UNP_PCB_UNLOCK(unp2); 2569 UIPC_STREAM_SBCHECK(sb); 2570 error = uipc_sbready(sb, m, count); 2571 UIPC_STREAM_SBCHECK(sb); 2572 if (error == 0) 2573 sorwakeup_locked(so2); 2574 else 2575 SOCK_RECVBUF_UNLOCK(so2); 2576 } else { 2577 /* 2578 * The receiving socket has been disconnected, but may still 2579 * be valid. In this case, the not-ready mbufs are still 2580 * present in its socket buffer, so perform an exhaustive 2581 * search before giving up and freeing the mbufs. 2582 */ 2583 UNP_LINK_RLOCK(); 2584 LIST_FOREACH(unp, &unp_shead, unp_link) { 2585 if (uipc_ready_scan(unp->unp_socket, m, count, &error)) 2586 break; 2587 } 2588 UNP_LINK_RUNLOCK(); 2589 2590 if (unp == NULL) { 2591 for (int i = 0; i < count; i++) 2592 m = m_free(m); 2593 return (ECONNRESET); 2594 } 2595 } 2596 return (error); 2597 } 2598 2599 static int 2600 uipc_sense(struct socket *so, struct stat *sb) 2601 { 2602 struct unpcb *unp; 2603 2604 unp = sotounpcb(so); 2605 KASSERT(unp != NULL, ("uipc_sense: unp == NULL")); 2606 2607 sb->st_blksize = so->so_snd.sb_hiwat; 2608 sb->st_dev = NODEV; 2609 sb->st_ino = unp->unp_ino; 2610 return (0); 2611 } 2612 2613 static int 2614 uipc_shutdown(struct socket *so, enum shutdown_how how) 2615 { 2616 struct unpcb *unp = sotounpcb(so); 2617 int error; 2618 2619 SOCK_LOCK(so); 2620 if (SOLISTENING(so)) { 2621 if (how != SHUT_WR) { 2622 so->so_error = ECONNABORTED; 2623 solisten_wakeup(so); /* unlocks so */ 2624 } else 2625 SOCK_UNLOCK(so); 2626 return (ENOTCONN); 2627 } else if ((so->so_state & 2628 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 2629 /* 2630 * POSIX mandates us to just return ENOTCONN when shutdown(2) is 2631 * invoked on a datagram sockets, however historically we would 2632 * actually tear socket down. This is known to be leveraged by 2633 * some applications to unblock process waiting in recv(2) by 2634 * other process that it shares that socket with. Try to meet 2635 * both backward-compatibility and POSIX requirements by forcing 2636 * ENOTCONN but still flushing buffers and performing wakeup(9). 2637 * 2638 * XXXGL: it remains unknown what applications expect this 2639 * behavior and is this isolated to unix/dgram or inet/dgram or 2640 * both. See: D10351, D3039. 2641 */ 2642 error = ENOTCONN; 2643 if (so->so_type != SOCK_DGRAM) { 2644 SOCK_UNLOCK(so); 2645 return (error); 2646 } 2647 } else 2648 error = 0; 2649 SOCK_UNLOCK(so); 2650 2651 switch (how) { 2652 case SHUT_RD: 2653 if (so->so_type == SOCK_DGRAM) 2654 socantrcvmore(so); 2655 else 2656 uipc_cantrcvmore(so); 2657 unp_dispose(so); 2658 break; 2659 case SHUT_RDWR: 2660 if (so->so_type == SOCK_DGRAM) 2661 socantrcvmore(so); 2662 else 2663 uipc_cantrcvmore(so); 2664 unp_dispose(so); 2665 /* FALLTHROUGH */ 2666 case SHUT_WR: 2667 if (so->so_type == SOCK_DGRAM) { 2668 socantsendmore(so); 2669 } else { 2670 UNP_PCB_LOCK(unp); 2671 if (unp->unp_conn != NULL) 2672 uipc_cantrcvmore(unp->unp_conn->unp_socket); 2673 UNP_PCB_UNLOCK(unp); 2674 } 2675 } 2676 wakeup(&so->so_timeo); 2677 2678 return (error); 2679 } 2680 2681 static int 2682 uipc_sockaddr(struct socket *so, struct sockaddr *ret) 2683 { 2684 struct unpcb *unp; 2685 const struct sockaddr *sa; 2686 2687 unp = sotounpcb(so); 2688 KASSERT(unp != NULL, ("uipc_sockaddr: unp == NULL")); 2689 2690 UNP_PCB_LOCK(unp); 2691 if (unp->unp_addr != NULL) 2692 sa = (struct sockaddr *) unp->unp_addr; 2693 else 2694 sa = &sun_noname; 2695 bcopy(sa, ret, sa->sa_len); 2696 UNP_PCB_UNLOCK(unp); 2697 return (0); 2698 } 2699 2700 static int 2701 uipc_ctloutput(struct socket *so, struct sockopt *sopt) 2702 { 2703 struct unpcb *unp; 2704 struct xucred xu; 2705 int error, optval; 2706 2707 if (sopt->sopt_level != SOL_LOCAL) 2708 return (EINVAL); 2709 2710 unp = sotounpcb(so); 2711 KASSERT(unp != NULL, ("uipc_ctloutput: unp == NULL")); 2712 error = 0; 2713 switch (sopt->sopt_dir) { 2714 case SOPT_GET: 2715 switch (sopt->sopt_name) { 2716 case LOCAL_PEERCRED: 2717 UNP_PCB_LOCK(unp); 2718 if (unp->unp_flags & UNP_HAVEPC) 2719 xu = unp->unp_peercred; 2720 else { 2721 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 2722 error = ENOTCONN; 2723 else 2724 error = EINVAL; 2725 } 2726 UNP_PCB_UNLOCK(unp); 2727 if (error == 0) 2728 error = sooptcopyout(sopt, &xu, sizeof(xu)); 2729 break; 2730 2731 case LOCAL_CREDS: 2732 /* Unlocked read. */ 2733 optval = unp->unp_flags & UNP_WANTCRED_ONESHOT ? 1 : 0; 2734 error = sooptcopyout(sopt, &optval, sizeof(optval)); 2735 break; 2736 2737 case LOCAL_CREDS_PERSISTENT: 2738 /* Unlocked read. */ 2739 optval = unp->unp_flags & UNP_WANTCRED_ALWAYS ? 1 : 0; 2740 error = sooptcopyout(sopt, &optval, sizeof(optval)); 2741 break; 2742 2743 default: 2744 error = EOPNOTSUPP; 2745 break; 2746 } 2747 break; 2748 2749 case SOPT_SET: 2750 switch (sopt->sopt_name) { 2751 case LOCAL_CREDS: 2752 case LOCAL_CREDS_PERSISTENT: 2753 error = sooptcopyin(sopt, &optval, sizeof(optval), 2754 sizeof(optval)); 2755 if (error) 2756 break; 2757 2758 #define OPTSET(bit, exclusive) do { \ 2759 UNP_PCB_LOCK(unp); \ 2760 if (optval) { \ 2761 if ((unp->unp_flags & (exclusive)) != 0) { \ 2762 UNP_PCB_UNLOCK(unp); \ 2763 error = EINVAL; \ 2764 break; \ 2765 } \ 2766 unp->unp_flags |= (bit); \ 2767 } else \ 2768 unp->unp_flags &= ~(bit); \ 2769 UNP_PCB_UNLOCK(unp); \ 2770 } while (0) 2771 2772 switch (sopt->sopt_name) { 2773 case LOCAL_CREDS: 2774 OPTSET(UNP_WANTCRED_ONESHOT, UNP_WANTCRED_ALWAYS); 2775 break; 2776 2777 case LOCAL_CREDS_PERSISTENT: 2778 OPTSET(UNP_WANTCRED_ALWAYS, UNP_WANTCRED_ONESHOT); 2779 break; 2780 2781 default: 2782 break; 2783 } 2784 break; 2785 #undef OPTSET 2786 default: 2787 error = ENOPROTOOPT; 2788 break; 2789 } 2790 break; 2791 2792 default: 2793 error = EOPNOTSUPP; 2794 break; 2795 } 2796 return (error); 2797 } 2798 2799 static int 2800 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 2801 { 2802 2803 return (unp_connectat(AT_FDCWD, so, nam, td, false)); 2804 } 2805 2806 static int 2807 unp_connectat(int fd, struct socket *so, struct sockaddr *nam, 2808 struct thread *td, bool return_locked) 2809 { 2810 struct mtx *vplock; 2811 struct sockaddr_un *soun; 2812 struct vnode *vp; 2813 struct socket *so2; 2814 struct unpcb *unp, *unp2, *unp3; 2815 struct nameidata nd; 2816 char buf[SOCK_MAXADDRLEN]; 2817 struct sockaddr *sa; 2818 cap_rights_t rights; 2819 int error, len; 2820 bool connreq; 2821 2822 CURVNET_ASSERT_SET(); 2823 2824 if (nam->sa_family != AF_UNIX) 2825 return (EAFNOSUPPORT); 2826 if (nam->sa_len > sizeof(struct sockaddr_un)) 2827 return (EINVAL); 2828 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); 2829 if (len <= 0) 2830 return (EINVAL); 2831 soun = (struct sockaddr_un *)nam; 2832 bcopy(soun->sun_path, buf, len); 2833 buf[len] = 0; 2834 2835 error = 0; 2836 unp = sotounpcb(so); 2837 UNP_PCB_LOCK(unp); 2838 for (;;) { 2839 /* 2840 * Wait for connection state to stabilize. If a connection 2841 * already exists, give up. For datagram sockets, which permit 2842 * multiple consecutive connect(2) calls, upper layers are 2843 * responsible for disconnecting in advance of a subsequent 2844 * connect(2), but this is not synchronized with PCB connection 2845 * state. 2846 * 2847 * Also make sure that no threads are currently attempting to 2848 * lock the peer socket, to ensure that unp_conn cannot 2849 * transition between two valid sockets while locks are dropped. 2850 */ 2851 if (SOLISTENING(so)) 2852 error = EOPNOTSUPP; 2853 else if (unp->unp_conn != NULL) 2854 error = EISCONN; 2855 else if ((unp->unp_flags & UNP_CONNECTING) != 0) { 2856 error = EALREADY; 2857 } 2858 if (error != 0) { 2859 UNP_PCB_UNLOCK(unp); 2860 return (error); 2861 } 2862 if (unp->unp_pairbusy > 0) { 2863 unp->unp_flags |= UNP_WAITING; 2864 mtx_sleep(unp, UNP_PCB_LOCKPTR(unp), 0, "unpeer", 0); 2865 continue; 2866 } 2867 break; 2868 } 2869 unp->unp_flags |= UNP_CONNECTING; 2870 UNP_PCB_UNLOCK(unp); 2871 2872 connreq = (so->so_proto->pr_flags & PR_CONNREQUIRED) != 0; 2873 if (connreq) 2874 sa = malloc(sizeof(struct sockaddr_un), M_SONAME, M_WAITOK); 2875 else 2876 sa = NULL; 2877 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, 2878 UIO_SYSSPACE, buf, fd, cap_rights_init_one(&rights, CAP_CONNECTAT)); 2879 error = namei(&nd); 2880 if (error) 2881 vp = NULL; 2882 else 2883 vp = nd.ni_vp; 2884 ASSERT_VOP_LOCKED(vp, "unp_connect"); 2885 if (error) 2886 goto bad; 2887 NDFREE_PNBUF(&nd); 2888 2889 if (vp->v_type != VSOCK) { 2890 error = ENOTSOCK; 2891 goto bad; 2892 } 2893 #ifdef MAC 2894 error = mac_vnode_check_open(td->td_ucred, vp, VWRITE | VREAD); 2895 if (error) 2896 goto bad; 2897 #endif 2898 error = VOP_ACCESS(vp, VWRITE, td->td_ucred, td); 2899 if (error) 2900 goto bad; 2901 2902 unp = sotounpcb(so); 2903 KASSERT(unp != NULL, ("unp_connect: unp == NULL")); 2904 2905 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 2906 mtx_lock(vplock); 2907 VOP_UNP_CONNECT(vp, &unp2); 2908 if (unp2 == NULL) { 2909 error = ECONNREFUSED; 2910 goto bad2; 2911 } 2912 so2 = unp2->unp_socket; 2913 if (so->so_type != so2->so_type) { 2914 error = EPROTOTYPE; 2915 goto bad2; 2916 } 2917 if (connreq) { 2918 if (SOLISTENING(so2)) 2919 so2 = solisten_clone(so2); 2920 else 2921 so2 = NULL; 2922 if (so2 == NULL) { 2923 error = ECONNREFUSED; 2924 goto bad2; 2925 } 2926 if ((error = uipc_attach(so2, 0, NULL)) != 0) { 2927 sodealloc(so2); 2928 goto bad2; 2929 } 2930 unp3 = sotounpcb(so2); 2931 unp_pcb_lock_pair(unp2, unp3); 2932 if (unp2->unp_addr != NULL) { 2933 bcopy(unp2->unp_addr, sa, unp2->unp_addr->sun_len); 2934 unp3->unp_addr = (struct sockaddr_un *) sa; 2935 sa = NULL; 2936 } 2937 2938 unp_copy_peercred(td, unp3, unp, unp2); 2939 2940 UNP_PCB_UNLOCK(unp2); 2941 unp2 = unp3; 2942 2943 /* 2944 * It is safe to block on the PCB lock here since unp2 is 2945 * nascent and cannot be connected to any other sockets. 2946 */ 2947 UNP_PCB_LOCK(unp); 2948 #ifdef MAC 2949 mac_socketpeer_set_from_socket(so, so2); 2950 mac_socketpeer_set_from_socket(so2, so); 2951 #endif 2952 } else { 2953 unp_pcb_lock_pair(unp, unp2); 2954 } 2955 KASSERT(unp2 != NULL && so2 != NULL && unp2->unp_socket == so2 && 2956 sotounpcb(so2) == unp2, 2957 ("%s: unp2 %p so2 %p", __func__, unp2, so2)); 2958 unp_connect2(so, so2, connreq); 2959 if (connreq) 2960 (void)solisten_enqueue(so2, SS_ISCONNECTED); 2961 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0, 2962 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp)); 2963 unp->unp_flags &= ~UNP_CONNECTING; 2964 if (!return_locked) 2965 unp_pcb_unlock_pair(unp, unp2); 2966 bad2: 2967 mtx_unlock(vplock); 2968 bad: 2969 if (vp != NULL) { 2970 /* 2971 * If we are returning locked (called via uipc_sosend_dgram()), 2972 * we need to be sure that vput() won't sleep. This is 2973 * guaranteed by VOP_UNP_CONNECT() call above and unp2 lock. 2974 * SOCK_STREAM/SEQPACKET can't request return_locked (yet). 2975 */ 2976 MPASS(!(return_locked && connreq)); 2977 vput(vp); 2978 } 2979 free(sa, M_SONAME); 2980 if (__predict_false(error)) { 2981 UNP_PCB_LOCK(unp); 2982 KASSERT((unp->unp_flags & UNP_CONNECTING) != 0, 2983 ("%s: unp %p has UNP_CONNECTING clear", __func__, unp)); 2984 unp->unp_flags &= ~UNP_CONNECTING; 2985 UNP_PCB_UNLOCK(unp); 2986 } 2987 return (error); 2988 } 2989 2990 /* 2991 * Set socket peer credentials at connection time. 2992 * 2993 * The client's PCB credentials are copied from its process structure. The 2994 * server's PCB credentials are copied from the socket on which it called 2995 * listen(2). uipc_listen cached that process's credentials at the time. 2996 */ 2997 void 2998 unp_copy_peercred(struct thread *td, struct unpcb *client_unp, 2999 struct unpcb *server_unp, struct unpcb *listen_unp) 3000 { 3001 cru2xt(td, &client_unp->unp_peercred); 3002 client_unp->unp_flags |= UNP_HAVEPC; 3003 3004 memcpy(&server_unp->unp_peercred, &listen_unp->unp_peercred, 3005 sizeof(server_unp->unp_peercred)); 3006 server_unp->unp_flags |= UNP_HAVEPC; 3007 client_unp->unp_flags |= (listen_unp->unp_flags & UNP_WANTCRED_MASK); 3008 } 3009 3010 /* 3011 * unix/stream & unix/seqpacket version of soisconnected(). 3012 * 3013 * The crucial thing we are doing here is setting up the uxst_peer linkage, 3014 * holding unp and receive buffer locks of the both sockets. The disconnect 3015 * procedure does the same. This gives as a safe way to access the peer in the 3016 * send(2) and recv(2) during the socket lifetime. 3017 * 3018 * The less important thing is event notification of the fact that a socket is 3019 * now connected. It is unusual for a software to put a socket into event 3020 * mechanism before connect(2), but is supposed to be supported. Note that 3021 * there can not be any sleeping I/O on the socket, yet, only presence in the 3022 * select/poll/kevent. 3023 * 3024 * This function can be called via two call paths: 3025 * 1) socketpair(2) - in this case socket has not been yet reported to userland 3026 * and just can't have any event notifications mechanisms set up. The 3027 * 'wakeup' boolean is always false. 3028 * 2) connect(2) of existing socket to a recent clone of a listener: 3029 * 2.1) Socket that connect(2)s will have 'wakeup' true. An application 3030 * could have already put it into event mechanism, is it shall be 3031 * reported as readable and as writable. 3032 * 2.2) Socket that was just cloned with solisten_clone(). Same as 1). 3033 */ 3034 static void 3035 unp_soisconnected(struct socket *so, bool wakeup) 3036 { 3037 struct socket *so2 = sotounpcb(so)->unp_conn->unp_socket; 3038 struct sockbuf *sb; 3039 3040 SOCK_LOCK_ASSERT(so); 3041 UNP_PCB_LOCK_ASSERT(sotounpcb(so)); 3042 UNP_PCB_LOCK_ASSERT(sotounpcb(so2)); 3043 SOCK_RECVBUF_LOCK_ASSERT(so); 3044 SOCK_RECVBUF_LOCK_ASSERT(so2); 3045 3046 MPASS(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET); 3047 MPASS((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 3048 SS_ISDISCONNECTING)) == 0); 3049 MPASS(so->so_qstate == SQ_NONE); 3050 3051 so->so_state &= ~SS_ISDISCONNECTED; 3052 so->so_state |= SS_ISCONNECTED; 3053 3054 sb = &so2->so_rcv; 3055 sb->uxst_peer = so; 3056 3057 if (wakeup) { 3058 KNOTE_LOCKED(&sb->sb_sel->si_note, 0); 3059 sb = &so->so_rcv; 3060 selwakeuppri(sb->sb_sel, PSOCK); 3061 SOCK_SENDBUF_LOCK_ASSERT(so); 3062 sb = &so->so_snd; 3063 selwakeuppri(sb->sb_sel, PSOCK); 3064 SOCK_SENDBUF_UNLOCK(so); 3065 } 3066 } 3067 3068 static void 3069 unp_connect2(struct socket *so, struct socket *so2, bool wakeup) 3070 { 3071 struct unpcb *unp; 3072 struct unpcb *unp2; 3073 3074 MPASS(so2->so_type == so->so_type); 3075 unp = sotounpcb(so); 3076 KASSERT(unp != NULL, ("unp_connect2: unp == NULL")); 3077 unp2 = sotounpcb(so2); 3078 KASSERT(unp2 != NULL, ("unp_connect2: unp2 == NULL")); 3079 3080 UNP_PCB_LOCK_ASSERT(unp); 3081 UNP_PCB_LOCK_ASSERT(unp2); 3082 KASSERT(unp->unp_conn == NULL, 3083 ("%s: socket %p is already connected", __func__, unp)); 3084 3085 unp->unp_conn = unp2; 3086 unp_pcb_hold(unp2); 3087 unp_pcb_hold(unp); 3088 switch (so->so_type) { 3089 case SOCK_DGRAM: 3090 UNP_REF_LIST_LOCK(); 3091 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); 3092 UNP_REF_LIST_UNLOCK(); 3093 soisconnected(so); 3094 break; 3095 3096 case SOCK_STREAM: 3097 case SOCK_SEQPACKET: 3098 KASSERT(unp2->unp_conn == NULL, 3099 ("%s: socket %p is already connected", __func__, unp2)); 3100 unp2->unp_conn = unp; 3101 SOCK_LOCK(so); 3102 SOCK_LOCK(so2); 3103 if (wakeup) /* Avoid LOR with receive buffer lock. */ 3104 SOCK_SENDBUF_LOCK(so); 3105 SOCK_RECVBUF_LOCK(so); 3106 SOCK_RECVBUF_LOCK(so2); 3107 unp_soisconnected(so, wakeup); /* Will unlock send buffer. */ 3108 unp_soisconnected(so2, false); 3109 SOCK_RECVBUF_UNLOCK(so); 3110 SOCK_RECVBUF_UNLOCK(so2); 3111 SOCK_UNLOCK(so); 3112 SOCK_UNLOCK(so2); 3113 break; 3114 3115 default: 3116 panic("unp_connect2"); 3117 } 3118 } 3119 3120 static void 3121 unp_soisdisconnected(struct socket *so) 3122 { 3123 SOCK_LOCK_ASSERT(so); 3124 SOCK_RECVBUF_LOCK_ASSERT(so); 3125 MPASS(so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET); 3126 MPASS(!SOLISTENING(so)); 3127 MPASS((so->so_state & (SS_ISCONNECTING | SS_ISDISCONNECTING | 3128 SS_ISDISCONNECTED)) == 0); 3129 MPASS(so->so_state & SS_ISCONNECTED); 3130 3131 so->so_state |= SS_ISDISCONNECTED; 3132 so->so_state &= ~SS_ISCONNECTED; 3133 so->so_rcv.uxst_peer = NULL; 3134 socantrcvmore_locked(so); 3135 } 3136 3137 static void 3138 unp_disconnect(struct unpcb *unp, struct unpcb *unp2) 3139 { 3140 struct socket *so, *so2; 3141 struct mbuf *m = NULL; 3142 #ifdef INVARIANTS 3143 struct unpcb *unptmp; 3144 #endif 3145 3146 UNP_PCB_LOCK_ASSERT(unp); 3147 UNP_PCB_LOCK_ASSERT(unp2); 3148 KASSERT(unp->unp_conn == unp2, 3149 ("%s: unpcb %p is not connected to %p", __func__, unp, unp2)); 3150 3151 unp->unp_conn = NULL; 3152 so = unp->unp_socket; 3153 so2 = unp2->unp_socket; 3154 switch (unp->unp_socket->so_type) { 3155 case SOCK_DGRAM: 3156 /* 3157 * Remove our send socket buffer from the peer's receive buffer. 3158 * Move the data to the receive buffer only if it is empty. 3159 * This is a protection against a scenario where a peer 3160 * connects, floods and disconnects, effectively blocking 3161 * sendto() from unconnected sockets. 3162 */ 3163 SOCK_RECVBUF_LOCK(so2); 3164 if (!STAILQ_EMPTY(&so->so_snd.uxdg_mb)) { 3165 TAILQ_REMOVE(&so2->so_rcv.uxdg_conns, &so->so_snd, 3166 uxdg_clist); 3167 if (__predict_true((so2->so_rcv.sb_state & 3168 SBS_CANTRCVMORE) == 0) && 3169 STAILQ_EMPTY(&so2->so_rcv.uxdg_mb)) { 3170 STAILQ_CONCAT(&so2->so_rcv.uxdg_mb, 3171 &so->so_snd.uxdg_mb); 3172 so2->so_rcv.uxdg_cc += so->so_snd.uxdg_cc; 3173 so2->so_rcv.uxdg_ctl += so->so_snd.uxdg_ctl; 3174 so2->so_rcv.uxdg_mbcnt += so->so_snd.uxdg_mbcnt; 3175 } else { 3176 m = STAILQ_FIRST(&so->so_snd.uxdg_mb); 3177 STAILQ_INIT(&so->so_snd.uxdg_mb); 3178 so2->so_rcv.sb_acc -= so->so_snd.uxdg_cc; 3179 so2->so_rcv.sb_ccc -= so->so_snd.uxdg_cc; 3180 so2->so_rcv.sb_ctl -= so->so_snd.uxdg_ctl; 3181 so2->so_rcv.sb_mbcnt -= so->so_snd.uxdg_mbcnt; 3182 } 3183 /* Note: so may reconnect. */ 3184 so->so_snd.uxdg_cc = 0; 3185 so->so_snd.uxdg_ctl = 0; 3186 so->so_snd.uxdg_mbcnt = 0; 3187 } 3188 SOCK_RECVBUF_UNLOCK(so2); 3189 UNP_REF_LIST_LOCK(); 3190 #ifdef INVARIANTS 3191 LIST_FOREACH(unptmp, &unp2->unp_refs, unp_reflink) { 3192 if (unptmp == unp) 3193 break; 3194 } 3195 KASSERT(unptmp != NULL, 3196 ("%s: %p not found in reflist of %p", __func__, unp, unp2)); 3197 #endif 3198 LIST_REMOVE(unp, unp_reflink); 3199 UNP_REF_LIST_UNLOCK(); 3200 if (so) { 3201 SOCK_LOCK(so); 3202 so->so_state &= ~SS_ISCONNECTED; 3203 SOCK_UNLOCK(so); 3204 } 3205 break; 3206 3207 case SOCK_STREAM: 3208 case SOCK_SEQPACKET: 3209 SOCK_LOCK(so); 3210 SOCK_LOCK(so2); 3211 SOCK_RECVBUF_LOCK(so); 3212 SOCK_RECVBUF_LOCK(so2); 3213 unp_soisdisconnected(so); 3214 MPASS(unp2->unp_conn == unp); 3215 unp2->unp_conn = NULL; 3216 unp_soisdisconnected(so2); 3217 SOCK_UNLOCK(so); 3218 SOCK_UNLOCK(so2); 3219 break; 3220 } 3221 3222 if (unp == unp2) { 3223 unp_pcb_rele_notlast(unp); 3224 if (!unp_pcb_rele(unp)) 3225 UNP_PCB_UNLOCK(unp); 3226 } else { 3227 if (!unp_pcb_rele(unp)) 3228 UNP_PCB_UNLOCK(unp); 3229 if (!unp_pcb_rele(unp2)) 3230 UNP_PCB_UNLOCK(unp2); 3231 } 3232 3233 if (m != NULL) { 3234 unp_scan(m, unp_freerights); 3235 m_freemp(m); 3236 } 3237 } 3238 3239 /* 3240 * unp_pcblist() walks the global list of struct unpcb's to generate a 3241 * pointer list, bumping the refcount on each unpcb. It then copies them out 3242 * sequentially, validating the generation number on each to see if it has 3243 * been detached. All of this is necessary because copyout() may sleep on 3244 * disk I/O. 3245 */ 3246 static int 3247 unp_pcblist(SYSCTL_HANDLER_ARGS) 3248 { 3249 struct unpcb *unp, **unp_list; 3250 unp_gen_t gencnt; 3251 struct xunpgen *xug; 3252 struct unp_head *head; 3253 struct xunpcb *xu; 3254 u_int i; 3255 int error, n; 3256 3257 switch ((intptr_t)arg1) { 3258 case SOCK_STREAM: 3259 head = &unp_shead; 3260 break; 3261 3262 case SOCK_DGRAM: 3263 head = &unp_dhead; 3264 break; 3265 3266 case SOCK_SEQPACKET: 3267 head = &unp_sphead; 3268 break; 3269 3270 default: 3271 panic("unp_pcblist: arg1 %d", (int)(intptr_t)arg1); 3272 } 3273 3274 /* 3275 * The process of preparing the PCB list is too time-consuming and 3276 * resource-intensive to repeat twice on every request. 3277 */ 3278 if (req->oldptr == NULL) { 3279 n = unp_count; 3280 req->oldidx = 2 * (sizeof *xug) 3281 + (n + n/8) * sizeof(struct xunpcb); 3282 return (0); 3283 } 3284 3285 if (req->newptr != NULL) 3286 return (EPERM); 3287 3288 /* 3289 * OK, now we're committed to doing something. 3290 */ 3291 xug = malloc(sizeof(*xug), M_TEMP, M_WAITOK | M_ZERO); 3292 UNP_LINK_RLOCK(); 3293 gencnt = unp_gencnt; 3294 n = unp_count; 3295 UNP_LINK_RUNLOCK(); 3296 3297 xug->xug_len = sizeof *xug; 3298 xug->xug_count = n; 3299 xug->xug_gen = gencnt; 3300 xug->xug_sogen = so_gencnt; 3301 error = SYSCTL_OUT(req, xug, sizeof *xug); 3302 if (error) { 3303 free(xug, M_TEMP); 3304 return (error); 3305 } 3306 3307 unp_list = malloc(n * sizeof *unp_list, M_TEMP, M_WAITOK); 3308 3309 UNP_LINK_RLOCK(); 3310 for (unp = LIST_FIRST(head), i = 0; unp && i < n; 3311 unp = LIST_NEXT(unp, unp_link)) { 3312 UNP_PCB_LOCK(unp); 3313 if (unp->unp_gencnt <= gencnt) { 3314 if (cr_cansee(req->td->td_ucred, 3315 unp->unp_socket->so_cred)) { 3316 UNP_PCB_UNLOCK(unp); 3317 continue; 3318 } 3319 unp_list[i++] = unp; 3320 unp_pcb_hold(unp); 3321 } 3322 UNP_PCB_UNLOCK(unp); 3323 } 3324 UNP_LINK_RUNLOCK(); 3325 n = i; /* In case we lost some during malloc. */ 3326 3327 error = 0; 3328 xu = malloc(sizeof(*xu), M_TEMP, M_WAITOK | M_ZERO); 3329 for (i = 0; i < n; i++) { 3330 unp = unp_list[i]; 3331 UNP_PCB_LOCK(unp); 3332 if (unp_pcb_rele(unp)) 3333 continue; 3334 3335 if (unp->unp_gencnt <= gencnt) { 3336 xu->xu_len = sizeof *xu; 3337 xu->xu_unpp = (uintptr_t)unp; 3338 /* 3339 * XXX - need more locking here to protect against 3340 * connect/disconnect races for SMP. 3341 */ 3342 if (unp->unp_addr != NULL) 3343 bcopy(unp->unp_addr, &xu->xu_addr, 3344 unp->unp_addr->sun_len); 3345 else 3346 bzero(&xu->xu_addr, sizeof(xu->xu_addr)); 3347 if (unp->unp_conn != NULL && 3348 unp->unp_conn->unp_addr != NULL) 3349 bcopy(unp->unp_conn->unp_addr, 3350 &xu->xu_caddr, 3351 unp->unp_conn->unp_addr->sun_len); 3352 else 3353 bzero(&xu->xu_caddr, sizeof(xu->xu_caddr)); 3354 xu->unp_vnode = (uintptr_t)unp->unp_vnode; 3355 xu->unp_conn = (uintptr_t)unp->unp_conn; 3356 xu->xu_firstref = (uintptr_t)LIST_FIRST(&unp->unp_refs); 3357 xu->xu_nextref = (uintptr_t)LIST_NEXT(unp, unp_reflink); 3358 xu->unp_gencnt = unp->unp_gencnt; 3359 sotoxsocket(unp->unp_socket, &xu->xu_socket); 3360 UNP_PCB_UNLOCK(unp); 3361 error = SYSCTL_OUT(req, xu, sizeof *xu); 3362 } else { 3363 UNP_PCB_UNLOCK(unp); 3364 } 3365 } 3366 free(xu, M_TEMP); 3367 if (!error) { 3368 /* 3369 * Give the user an updated idea of our state. If the 3370 * generation differs from what we told her before, she knows 3371 * that something happened while we were processing this 3372 * request, and it might be necessary to retry. 3373 */ 3374 xug->xug_gen = unp_gencnt; 3375 xug->xug_sogen = so_gencnt; 3376 xug->xug_count = unp_count; 3377 error = SYSCTL_OUT(req, xug, sizeof *xug); 3378 } 3379 free(unp_list, M_TEMP); 3380 free(xug, M_TEMP); 3381 return (error); 3382 } 3383 3384 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, 3385 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3386 (void *)(intptr_t)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", 3387 "List of active local datagram sockets"); 3388 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, 3389 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3390 (void *)(intptr_t)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", 3391 "List of active local stream sockets"); 3392 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, 3393 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, 3394 (void *)(intptr_t)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb", 3395 "List of active local seqpacket sockets"); 3396 3397 static void 3398 unp_drop(struct unpcb *unp) 3399 { 3400 struct socket *so; 3401 struct unpcb *unp2; 3402 3403 /* 3404 * Regardless of whether the socket's peer dropped the connection 3405 * with this socket by aborting or disconnecting, POSIX requires 3406 * that ECONNRESET is returned on next connected send(2) in case of 3407 * a SOCK_DGRAM socket and EPIPE for SOCK_STREAM. 3408 */ 3409 UNP_PCB_LOCK(unp); 3410 if ((so = unp->unp_socket) != NULL) 3411 so->so_error = 3412 so->so_proto->pr_type == SOCK_DGRAM ? ECONNRESET : EPIPE; 3413 if ((unp2 = unp_pcb_lock_peer(unp)) != NULL) { 3414 /* Last reference dropped in unp_disconnect(). */ 3415 unp_pcb_rele_notlast(unp); 3416 unp_disconnect(unp, unp2); 3417 } else if (!unp_pcb_rele(unp)) { 3418 UNP_PCB_UNLOCK(unp); 3419 } 3420 } 3421 3422 static void 3423 unp_freerights(struct filedescent **fdep, int fdcount) 3424 { 3425 struct file *fp; 3426 int i; 3427 3428 KASSERT(fdcount > 0, ("%s: fdcount %d", __func__, fdcount)); 3429 3430 for (i = 0; i < fdcount; i++) { 3431 fp = fdep[i]->fde_file; 3432 filecaps_free(&fdep[i]->fde_caps); 3433 unp_discard(fp); 3434 } 3435 free(fdep[0], M_FILECAPS); 3436 } 3437 3438 static int 3439 unp_externalize(struct mbuf *control, struct mbuf **controlp, int flags) 3440 { 3441 struct thread *td = curthread; /* XXX */ 3442 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 3443 int i; 3444 int *fdp; 3445 struct filedesc *fdesc = td->td_proc->p_fd; 3446 struct filedescent **fdep; 3447 void *data; 3448 socklen_t clen = control->m_len, datalen; 3449 int error, newfds; 3450 u_int newlen; 3451 3452 UNP_LINK_UNLOCK_ASSERT(); 3453 3454 error = 0; 3455 if (controlp != NULL) /* controlp == NULL => free control messages */ 3456 *controlp = NULL; 3457 while (cm != NULL) { 3458 MPASS(clen >= sizeof(*cm) && clen >= cm->cmsg_len); 3459 3460 data = CMSG_DATA(cm); 3461 datalen = (caddr_t)cm + cm->cmsg_len - (caddr_t)data; 3462 if (cm->cmsg_level == SOL_SOCKET 3463 && cm->cmsg_type == SCM_RIGHTS) { 3464 newfds = datalen / sizeof(*fdep); 3465 if (newfds == 0) 3466 goto next; 3467 fdep = data; 3468 3469 /* If we're not outputting the descriptors free them. */ 3470 if (error || controlp == NULL) { 3471 unp_freerights(fdep, newfds); 3472 goto next; 3473 } 3474 FILEDESC_XLOCK(fdesc); 3475 3476 /* 3477 * Now change each pointer to an fd in the global 3478 * table to an integer that is the index to the local 3479 * fd table entry that we set up to point to the 3480 * global one we are transferring. 3481 */ 3482 newlen = newfds * sizeof(int); 3483 *controlp = sbcreatecontrol(NULL, newlen, 3484 SCM_RIGHTS, SOL_SOCKET, M_WAITOK); 3485 3486 fdp = (int *) 3487 CMSG_DATA(mtod(*controlp, struct cmsghdr *)); 3488 if ((error = fdallocn(td, 0, fdp, newfds))) { 3489 FILEDESC_XUNLOCK(fdesc); 3490 unp_freerights(fdep, newfds); 3491 m_freem(*controlp); 3492 *controlp = NULL; 3493 goto next; 3494 } 3495 for (i = 0; i < newfds; i++, fdp++) { 3496 _finstall(fdesc, fdep[i]->fde_file, *fdp, 3497 (flags & MSG_CMSG_CLOEXEC) != 0 ? O_CLOEXEC : 0, 3498 &fdep[i]->fde_caps); 3499 unp_externalize_fp(fdep[i]->fde_file); 3500 } 3501 3502 /* 3503 * The new type indicates that the mbuf data refers to 3504 * kernel resources that may need to be released before 3505 * the mbuf is freed. 3506 */ 3507 m_chtype(*controlp, MT_EXTCONTROL); 3508 FILEDESC_XUNLOCK(fdesc); 3509 free(fdep[0], M_FILECAPS); 3510 } else { 3511 /* We can just copy anything else across. */ 3512 if (error || controlp == NULL) 3513 goto next; 3514 *controlp = sbcreatecontrol(NULL, datalen, 3515 cm->cmsg_type, cm->cmsg_level, M_WAITOK); 3516 bcopy(data, 3517 CMSG_DATA(mtod(*controlp, struct cmsghdr *)), 3518 datalen); 3519 } 3520 controlp = &(*controlp)->m_next; 3521 3522 next: 3523 if (CMSG_SPACE(datalen) < clen) { 3524 clen -= CMSG_SPACE(datalen); 3525 cm = (struct cmsghdr *) 3526 ((caddr_t)cm + CMSG_SPACE(datalen)); 3527 } else { 3528 clen = 0; 3529 cm = NULL; 3530 } 3531 } 3532 3533 return (error); 3534 } 3535 3536 static void 3537 unp_zone_change(void *tag) 3538 { 3539 3540 uma_zone_set_max(unp_zone, maxsockets); 3541 } 3542 3543 #ifdef INVARIANTS 3544 static void 3545 unp_zdtor(void *mem, int size __unused, void *arg __unused) 3546 { 3547 struct unpcb *unp; 3548 3549 unp = mem; 3550 3551 KASSERT(LIST_EMPTY(&unp->unp_refs), 3552 ("%s: unpcb %p has lingering refs", __func__, unp)); 3553 KASSERT(unp->unp_socket == NULL, 3554 ("%s: unpcb %p has socket backpointer", __func__, unp)); 3555 KASSERT(unp->unp_vnode == NULL, 3556 ("%s: unpcb %p has vnode references", __func__, unp)); 3557 KASSERT(unp->unp_conn == NULL, 3558 ("%s: unpcb %p is still connected", __func__, unp)); 3559 KASSERT(unp->unp_addr == NULL, 3560 ("%s: unpcb %p has leaked addr", __func__, unp)); 3561 } 3562 #endif 3563 3564 static void 3565 unp_init(void *arg __unused) 3566 { 3567 uma_dtor dtor; 3568 3569 #ifdef INVARIANTS 3570 dtor = unp_zdtor; 3571 #else 3572 dtor = NULL; 3573 #endif 3574 unp_zone = uma_zcreate("unpcb", sizeof(struct unpcb), NULL, dtor, 3575 NULL, NULL, UMA_ALIGN_CACHE, 0); 3576 uma_zone_set_max(unp_zone, maxsockets); 3577 uma_zone_set_warning(unp_zone, "kern.ipc.maxsockets limit reached"); 3578 EVENTHANDLER_REGISTER(maxsockets_change, unp_zone_change, 3579 NULL, EVENTHANDLER_PRI_ANY); 3580 LIST_INIT(&unp_dhead); 3581 LIST_INIT(&unp_shead); 3582 LIST_INIT(&unp_sphead); 3583 SLIST_INIT(&unp_defers); 3584 TIMEOUT_TASK_INIT(taskqueue_thread, &unp_gc_task, 0, unp_gc, NULL); 3585 TASK_INIT(&unp_defer_task, 0, unp_process_defers, NULL); 3586 UNP_LINK_LOCK_INIT(); 3587 UNP_DEFERRED_LOCK_INIT(); 3588 unp_vp_mtxpool = mtx_pool_create("unp vp mtxpool", 32, MTX_DEF); 3589 } 3590 SYSINIT(unp_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, unp_init, NULL); 3591 3592 static void 3593 unp_internalize_cleanup_rights(struct mbuf *control) 3594 { 3595 struct cmsghdr *cp; 3596 struct mbuf *m; 3597 void *data; 3598 socklen_t datalen; 3599 3600 for (m = control; m != NULL; m = m->m_next) { 3601 cp = mtod(m, struct cmsghdr *); 3602 if (cp->cmsg_level != SOL_SOCKET || 3603 cp->cmsg_type != SCM_RIGHTS) 3604 continue; 3605 data = CMSG_DATA(cp); 3606 datalen = (caddr_t)cp + cp->cmsg_len - (caddr_t)data; 3607 unp_freerights(data, datalen / sizeof(struct filedesc *)); 3608 } 3609 } 3610 3611 static int 3612 unp_internalize(struct mbuf *control, struct mchain *mc, struct thread *td) 3613 { 3614 struct proc *p; 3615 struct filedesc *fdesc; 3616 struct bintime *bt; 3617 struct cmsghdr *cm; 3618 struct cmsgcred *cmcred; 3619 struct mbuf *m; 3620 struct filedescent *fde, **fdep, *fdev; 3621 struct file *fp; 3622 struct timeval *tv; 3623 struct timespec *ts; 3624 void *data; 3625 socklen_t clen, datalen; 3626 int i, j, error, *fdp, oldfds; 3627 u_int newlen; 3628 3629 MPASS(control->m_next == NULL); /* COMPAT_OLDSOCK may violate */ 3630 UNP_LINK_UNLOCK_ASSERT(); 3631 3632 p = td->td_proc; 3633 fdesc = p->p_fd; 3634 error = 0; 3635 *mc = MCHAIN_INITIALIZER(mc); 3636 for (clen = control->m_len, cm = mtod(control, struct cmsghdr *), 3637 data = CMSG_DATA(cm); 3638 3639 clen >= sizeof(*cm) && cm->cmsg_level == SOL_SOCKET && 3640 clen >= cm->cmsg_len && cm->cmsg_len >= sizeof(*cm) && 3641 (char *)cm + cm->cmsg_len >= (char *)data; 3642 3643 clen -= min(CMSG_SPACE(datalen), clen), 3644 cm = (struct cmsghdr *) ((char *)cm + CMSG_SPACE(datalen)), 3645 data = CMSG_DATA(cm)) { 3646 datalen = (char *)cm + cm->cmsg_len - (char *)data; 3647 switch (cm->cmsg_type) { 3648 case SCM_CREDS: 3649 m = sbcreatecontrol(NULL, sizeof(*cmcred), SCM_CREDS, 3650 SOL_SOCKET, M_WAITOK); 3651 cmcred = (struct cmsgcred *) 3652 CMSG_DATA(mtod(m, struct cmsghdr *)); 3653 cmcred->cmcred_pid = p->p_pid; 3654 cmcred->cmcred_uid = td->td_ucred->cr_ruid; 3655 cmcred->cmcred_gid = td->td_ucred->cr_rgid; 3656 cmcred->cmcred_euid = td->td_ucred->cr_uid; 3657 cmcred->cmcred_ngroups = MIN(td->td_ucred->cr_ngroups, 3658 CMGROUP_MAX); 3659 for (i = 0; i < cmcred->cmcred_ngroups; i++) 3660 cmcred->cmcred_groups[i] = 3661 td->td_ucred->cr_groups[i]; 3662 break; 3663 3664 case SCM_RIGHTS: 3665 oldfds = datalen / sizeof (int); 3666 if (oldfds == 0) 3667 continue; 3668 /* On some machines sizeof pointer is bigger than 3669 * sizeof int, so we need to check if data fits into 3670 * single mbuf. We could allocate several mbufs, and 3671 * unp_externalize() should even properly handle that. 3672 * But it is not worth to complicate the code for an 3673 * insane scenario of passing over 200 file descriptors 3674 * at once. 3675 */ 3676 newlen = oldfds * sizeof(fdep[0]); 3677 if (CMSG_SPACE(newlen) > MCLBYTES) { 3678 error = EMSGSIZE; 3679 goto out; 3680 } 3681 /* 3682 * Check that all the FDs passed in refer to legal 3683 * files. If not, reject the entire operation. 3684 */ 3685 fdp = data; 3686 FILEDESC_SLOCK(fdesc); 3687 for (i = 0; i < oldfds; i++, fdp++) { 3688 fp = fget_noref(fdesc, *fdp); 3689 if (fp == NULL) { 3690 FILEDESC_SUNLOCK(fdesc); 3691 error = EBADF; 3692 goto out; 3693 } 3694 if (!(fp->f_ops->fo_flags & DFLAG_PASSABLE)) { 3695 FILEDESC_SUNLOCK(fdesc); 3696 error = EOPNOTSUPP; 3697 goto out; 3698 } 3699 } 3700 3701 /* 3702 * Now replace the integer FDs with pointers to the 3703 * file structure and capability rights. 3704 */ 3705 m = sbcreatecontrol(NULL, newlen, SCM_RIGHTS, 3706 SOL_SOCKET, M_WAITOK); 3707 fdp = data; 3708 for (i = 0; i < oldfds; i++, fdp++) { 3709 if (!fhold(fdesc->fd_ofiles[*fdp].fde_file)) { 3710 fdp = data; 3711 for (j = 0; j < i; j++, fdp++) { 3712 fdrop(fdesc->fd_ofiles[*fdp]. 3713 fde_file, td); 3714 } 3715 FILEDESC_SUNLOCK(fdesc); 3716 error = EBADF; 3717 goto out; 3718 } 3719 } 3720 fdp = data; 3721 fdep = (struct filedescent **) 3722 CMSG_DATA(mtod(m, struct cmsghdr *)); 3723 fdev = malloc(sizeof(*fdev) * oldfds, M_FILECAPS, 3724 M_WAITOK); 3725 for (i = 0; i < oldfds; i++, fdev++, fdp++) { 3726 fde = &fdesc->fd_ofiles[*fdp]; 3727 fdep[i] = fdev; 3728 fdep[i]->fde_file = fde->fde_file; 3729 filecaps_copy(&fde->fde_caps, 3730 &fdep[i]->fde_caps, true); 3731 unp_internalize_fp(fdep[i]->fde_file); 3732 } 3733 FILEDESC_SUNLOCK(fdesc); 3734 break; 3735 3736 case SCM_TIMESTAMP: 3737 m = sbcreatecontrol(NULL, sizeof(*tv), SCM_TIMESTAMP, 3738 SOL_SOCKET, M_WAITOK); 3739 tv = (struct timeval *) 3740 CMSG_DATA(mtod(m, struct cmsghdr *)); 3741 microtime(tv); 3742 break; 3743 3744 case SCM_BINTIME: 3745 m = sbcreatecontrol(NULL, sizeof(*bt), SCM_BINTIME, 3746 SOL_SOCKET, M_WAITOK); 3747 bt = (struct bintime *) 3748 CMSG_DATA(mtod(m, struct cmsghdr *)); 3749 bintime(bt); 3750 break; 3751 3752 case SCM_REALTIME: 3753 m = sbcreatecontrol(NULL, sizeof(*ts), SCM_REALTIME, 3754 SOL_SOCKET, M_WAITOK); 3755 ts = (struct timespec *) 3756 CMSG_DATA(mtod(m, struct cmsghdr *)); 3757 nanotime(ts); 3758 break; 3759 3760 case SCM_MONOTONIC: 3761 m = sbcreatecontrol(NULL, sizeof(*ts), SCM_MONOTONIC, 3762 SOL_SOCKET, M_WAITOK); 3763 ts = (struct timespec *) 3764 CMSG_DATA(mtod(m, struct cmsghdr *)); 3765 nanouptime(ts); 3766 break; 3767 3768 default: 3769 error = EINVAL; 3770 goto out; 3771 } 3772 3773 mc_append(mc, m); 3774 } 3775 if (clen > 0) 3776 error = EINVAL; 3777 3778 out: 3779 if (error != 0) 3780 unp_internalize_cleanup_rights(mc_first(mc)); 3781 m_freem(control); 3782 return (error); 3783 } 3784 3785 static void 3786 unp_addsockcred(struct thread *td, struct mchain *mc, int mode) 3787 { 3788 struct mbuf *m, *n, *n_prev; 3789 const struct cmsghdr *cm; 3790 int ngroups, i, cmsgtype; 3791 size_t ctrlsz; 3792 3793 ngroups = MIN(td->td_ucred->cr_ngroups, CMGROUP_MAX); 3794 if (mode & UNP_WANTCRED_ALWAYS) { 3795 ctrlsz = SOCKCRED2SIZE(ngroups); 3796 cmsgtype = SCM_CREDS2; 3797 } else { 3798 ctrlsz = SOCKCREDSIZE(ngroups); 3799 cmsgtype = SCM_CREDS; 3800 } 3801 3802 /* XXXGL: uipc_sosend_*() need to be improved so that we can M_WAITOK */ 3803 m = sbcreatecontrol(NULL, ctrlsz, cmsgtype, SOL_SOCKET, M_NOWAIT); 3804 if (m == NULL) 3805 return; 3806 MPASS((m->m_flags & M_EXT) == 0 && m->m_next == NULL); 3807 3808 if (mode & UNP_WANTCRED_ALWAYS) { 3809 struct sockcred2 *sc; 3810 3811 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *)); 3812 sc->sc_version = 0; 3813 sc->sc_pid = td->td_proc->p_pid; 3814 sc->sc_uid = td->td_ucred->cr_ruid; 3815 sc->sc_euid = td->td_ucred->cr_uid; 3816 sc->sc_gid = td->td_ucred->cr_rgid; 3817 sc->sc_egid = td->td_ucred->cr_gid; 3818 sc->sc_ngroups = ngroups; 3819 for (i = 0; i < sc->sc_ngroups; i++) 3820 sc->sc_groups[i] = td->td_ucred->cr_groups[i]; 3821 } else { 3822 struct sockcred *sc; 3823 3824 sc = (void *)CMSG_DATA(mtod(m, struct cmsghdr *)); 3825 sc->sc_uid = td->td_ucred->cr_ruid; 3826 sc->sc_euid = td->td_ucred->cr_uid; 3827 sc->sc_gid = td->td_ucred->cr_rgid; 3828 sc->sc_egid = td->td_ucred->cr_gid; 3829 sc->sc_ngroups = ngroups; 3830 for (i = 0; i < sc->sc_ngroups; i++) 3831 sc->sc_groups[i] = td->td_ucred->cr_groups[i]; 3832 } 3833 3834 /* 3835 * Unlink SCM_CREDS control messages (struct cmsgcred), since just 3836 * created SCM_CREDS control message (struct sockcred) has another 3837 * format. 3838 */ 3839 if (!STAILQ_EMPTY(&mc->mc_q) && cmsgtype == SCM_CREDS) 3840 STAILQ_FOREACH_SAFE(n, &mc->mc_q, m_stailq, n_prev) { 3841 cm = mtod(n, struct cmsghdr *); 3842 if (cm->cmsg_level == SOL_SOCKET && 3843 cm->cmsg_type == SCM_CREDS) { 3844 mc_remove(mc, n); 3845 m_free(n); 3846 } 3847 } 3848 3849 /* Prepend it to the head. */ 3850 mc_prepend(mc, m); 3851 } 3852 3853 static struct unpcb * 3854 fptounp(struct file *fp) 3855 { 3856 struct socket *so; 3857 3858 if (fp->f_type != DTYPE_SOCKET) 3859 return (NULL); 3860 if ((so = fp->f_data) == NULL) 3861 return (NULL); 3862 if (so->so_proto->pr_domain != &localdomain) 3863 return (NULL); 3864 return sotounpcb(so); 3865 } 3866 3867 static void 3868 unp_discard(struct file *fp) 3869 { 3870 struct unp_defer *dr; 3871 3872 if (unp_externalize_fp(fp)) { 3873 dr = malloc(sizeof(*dr), M_TEMP, M_WAITOK); 3874 dr->ud_fp = fp; 3875 UNP_DEFERRED_LOCK(); 3876 SLIST_INSERT_HEAD(&unp_defers, dr, ud_link); 3877 UNP_DEFERRED_UNLOCK(); 3878 atomic_add_int(&unp_defers_count, 1); 3879 taskqueue_enqueue(taskqueue_thread, &unp_defer_task); 3880 } else 3881 closef_nothread(fp); 3882 } 3883 3884 static void 3885 unp_process_defers(void *arg __unused, int pending) 3886 { 3887 struct unp_defer *dr; 3888 SLIST_HEAD(, unp_defer) drl; 3889 int count; 3890 3891 SLIST_INIT(&drl); 3892 for (;;) { 3893 UNP_DEFERRED_LOCK(); 3894 if (SLIST_FIRST(&unp_defers) == NULL) { 3895 UNP_DEFERRED_UNLOCK(); 3896 break; 3897 } 3898 SLIST_SWAP(&unp_defers, &drl, unp_defer); 3899 UNP_DEFERRED_UNLOCK(); 3900 count = 0; 3901 while ((dr = SLIST_FIRST(&drl)) != NULL) { 3902 SLIST_REMOVE_HEAD(&drl, ud_link); 3903 closef_nothread(dr->ud_fp); 3904 free(dr, M_TEMP); 3905 count++; 3906 } 3907 atomic_add_int(&unp_defers_count, -count); 3908 } 3909 } 3910 3911 static void 3912 unp_internalize_fp(struct file *fp) 3913 { 3914 struct unpcb *unp; 3915 3916 UNP_LINK_WLOCK(); 3917 if ((unp = fptounp(fp)) != NULL) { 3918 unp->unp_file = fp; 3919 unp->unp_msgcount++; 3920 } 3921 unp_rights++; 3922 UNP_LINK_WUNLOCK(); 3923 } 3924 3925 static int 3926 unp_externalize_fp(struct file *fp) 3927 { 3928 struct unpcb *unp; 3929 int ret; 3930 3931 UNP_LINK_WLOCK(); 3932 if ((unp = fptounp(fp)) != NULL) { 3933 unp->unp_msgcount--; 3934 ret = 1; 3935 } else 3936 ret = 0; 3937 unp_rights--; 3938 UNP_LINK_WUNLOCK(); 3939 return (ret); 3940 } 3941 3942 /* 3943 * unp_defer indicates whether additional work has been defered for a future 3944 * pass through unp_gc(). It is thread local and does not require explicit 3945 * synchronization. 3946 */ 3947 static int unp_marked; 3948 3949 static void 3950 unp_remove_dead_ref(struct filedescent **fdep, int fdcount) 3951 { 3952 struct unpcb *unp; 3953 struct file *fp; 3954 int i; 3955 3956 /* 3957 * This function can only be called from the gc task. 3958 */ 3959 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0, 3960 ("%s: not on gc callout", __func__)); 3961 UNP_LINK_LOCK_ASSERT(); 3962 3963 for (i = 0; i < fdcount; i++) { 3964 fp = fdep[i]->fde_file; 3965 if ((unp = fptounp(fp)) == NULL) 3966 continue; 3967 if ((unp->unp_gcflag & UNPGC_DEAD) == 0) 3968 continue; 3969 unp->unp_gcrefs--; 3970 } 3971 } 3972 3973 static void 3974 unp_restore_undead_ref(struct filedescent **fdep, int fdcount) 3975 { 3976 struct unpcb *unp; 3977 struct file *fp; 3978 int i; 3979 3980 /* 3981 * This function can only be called from the gc task. 3982 */ 3983 KASSERT(taskqueue_member(taskqueue_thread, curthread) != 0, 3984 ("%s: not on gc callout", __func__)); 3985 UNP_LINK_LOCK_ASSERT(); 3986 3987 for (i = 0; i < fdcount; i++) { 3988 fp = fdep[i]->fde_file; 3989 if ((unp = fptounp(fp)) == NULL) 3990 continue; 3991 if ((unp->unp_gcflag & UNPGC_DEAD) == 0) 3992 continue; 3993 unp->unp_gcrefs++; 3994 unp_marked++; 3995 } 3996 } 3997 3998 static void 3999 unp_scan_socket(struct socket *so, void (*op)(struct filedescent **, int)) 4000 { 4001 struct sockbuf *sb; 4002 4003 SOCK_LOCK_ASSERT(so); 4004 4005 if (sotounpcb(so)->unp_gcflag & UNPGC_IGNORE_RIGHTS) 4006 return; 4007 4008 SOCK_RECVBUF_LOCK(so); 4009 switch (so->so_type) { 4010 case SOCK_DGRAM: 4011 unp_scan(STAILQ_FIRST(&so->so_rcv.uxdg_mb), op); 4012 unp_scan(so->so_rcv.uxdg_peeked, op); 4013 TAILQ_FOREACH(sb, &so->so_rcv.uxdg_conns, uxdg_clist) 4014 unp_scan(STAILQ_FIRST(&sb->uxdg_mb), op); 4015 break; 4016 case SOCK_STREAM: 4017 case SOCK_SEQPACKET: 4018 unp_scan(STAILQ_FIRST(&so->so_rcv.uxst_mbq), op); 4019 break; 4020 } 4021 SOCK_RECVBUF_UNLOCK(so); 4022 } 4023 4024 static void 4025 unp_gc_scan(struct unpcb *unp, void (*op)(struct filedescent **, int)) 4026 { 4027 struct socket *so, *soa; 4028 4029 so = unp->unp_socket; 4030 SOCK_LOCK(so); 4031 if (SOLISTENING(so)) { 4032 /* 4033 * Mark all sockets in our accept queue. 4034 */ 4035 TAILQ_FOREACH(soa, &so->sol_comp, so_list) 4036 unp_scan_socket(soa, op); 4037 } else { 4038 /* 4039 * Mark all sockets we reference with RIGHTS. 4040 */ 4041 unp_scan_socket(so, op); 4042 } 4043 SOCK_UNLOCK(so); 4044 } 4045 4046 static int unp_recycled; 4047 SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0, 4048 "Number of unreachable sockets claimed by the garbage collector."); 4049 4050 static int unp_taskcount; 4051 SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0, 4052 "Number of times the garbage collector has run."); 4053 4054 SYSCTL_UINT(_net_local, OID_AUTO, sockcount, CTLFLAG_RD, &unp_count, 0, 4055 "Number of active local sockets."); 4056 4057 static void 4058 unp_gc(__unused void *arg, int pending) 4059 { 4060 struct unp_head *heads[] = { &unp_dhead, &unp_shead, &unp_sphead, 4061 NULL }; 4062 struct unp_head **head; 4063 struct unp_head unp_deadhead; /* List of potentially-dead sockets. */ 4064 struct file *f, **unref; 4065 struct unpcb *unp, *unptmp; 4066 int i, total, unp_unreachable; 4067 4068 LIST_INIT(&unp_deadhead); 4069 unp_taskcount++; 4070 UNP_LINK_RLOCK(); 4071 /* 4072 * First determine which sockets may be in cycles. 4073 */ 4074 unp_unreachable = 0; 4075 4076 for (head = heads; *head != NULL; head++) 4077 LIST_FOREACH(unp, *head, unp_link) { 4078 KASSERT((unp->unp_gcflag & ~UNPGC_IGNORE_RIGHTS) == 0, 4079 ("%s: unp %p has unexpected gc flags 0x%x", 4080 __func__, unp, (unsigned int)unp->unp_gcflag)); 4081 4082 f = unp->unp_file; 4083 4084 /* 4085 * Check for an unreachable socket potentially in a 4086 * cycle. It must be in a queue as indicated by 4087 * msgcount, and this must equal the file reference 4088 * count. Note that when msgcount is 0 the file is 4089 * NULL. 4090 */ 4091 if (f != NULL && unp->unp_msgcount != 0 && 4092 refcount_load(&f->f_count) == unp->unp_msgcount) { 4093 LIST_INSERT_HEAD(&unp_deadhead, unp, unp_dead); 4094 unp->unp_gcflag |= UNPGC_DEAD; 4095 unp->unp_gcrefs = unp->unp_msgcount; 4096 unp_unreachable++; 4097 } 4098 } 4099 4100 /* 4101 * Scan all sockets previously marked as potentially being in a cycle 4102 * and remove the references each socket holds on any UNPGC_DEAD 4103 * sockets in its queue. After this step, all remaining references on 4104 * sockets marked UNPGC_DEAD should not be part of any cycle. 4105 */ 4106 LIST_FOREACH(unp, &unp_deadhead, unp_dead) 4107 unp_gc_scan(unp, unp_remove_dead_ref); 4108 4109 /* 4110 * If a socket still has a non-negative refcount, it cannot be in a 4111 * cycle. In this case increment refcount of all children iteratively. 4112 * Stop the scan once we do a complete loop without discovering 4113 * a new reachable socket. 4114 */ 4115 do { 4116 unp_marked = 0; 4117 LIST_FOREACH_SAFE(unp, &unp_deadhead, unp_dead, unptmp) 4118 if (unp->unp_gcrefs > 0) { 4119 unp->unp_gcflag &= ~UNPGC_DEAD; 4120 LIST_REMOVE(unp, unp_dead); 4121 KASSERT(unp_unreachable > 0, 4122 ("%s: unp_unreachable underflow.", 4123 __func__)); 4124 unp_unreachable--; 4125 unp_gc_scan(unp, unp_restore_undead_ref); 4126 } 4127 } while (unp_marked); 4128 4129 UNP_LINK_RUNLOCK(); 4130 4131 if (unp_unreachable == 0) 4132 return; 4133 4134 /* 4135 * Allocate space for a local array of dead unpcbs. 4136 * TODO: can this path be simplified by instead using the local 4137 * dead list at unp_deadhead, after taking out references 4138 * on the file object and/or unpcb and dropping the link lock? 4139 */ 4140 unref = malloc(unp_unreachable * sizeof(struct file *), 4141 M_TEMP, M_WAITOK); 4142 4143 /* 4144 * Iterate looking for sockets which have been specifically marked 4145 * as unreachable and store them locally. 4146 */ 4147 UNP_LINK_RLOCK(); 4148 total = 0; 4149 LIST_FOREACH(unp, &unp_deadhead, unp_dead) { 4150 KASSERT((unp->unp_gcflag & UNPGC_DEAD) != 0, 4151 ("%s: unp %p not marked UNPGC_DEAD", __func__, unp)); 4152 unp->unp_gcflag &= ~UNPGC_DEAD; 4153 f = unp->unp_file; 4154 if (unp->unp_msgcount == 0 || f == NULL || 4155 refcount_load(&f->f_count) != unp->unp_msgcount || 4156 !fhold(f)) 4157 continue; 4158 unref[total++] = f; 4159 KASSERT(total <= unp_unreachable, 4160 ("%s: incorrect unreachable count.", __func__)); 4161 } 4162 UNP_LINK_RUNLOCK(); 4163 4164 /* 4165 * Now flush all sockets, free'ing rights. This will free the 4166 * struct files associated with these sockets but leave each socket 4167 * with one remaining ref. 4168 */ 4169 for (i = 0; i < total; i++) { 4170 struct socket *so; 4171 4172 so = unref[i]->f_data; 4173 CURVNET_SET(so->so_vnet); 4174 socantrcvmore(so); 4175 unp_dispose(so); 4176 CURVNET_RESTORE(); 4177 } 4178 4179 /* 4180 * And finally release the sockets so they can be reclaimed. 4181 */ 4182 for (i = 0; i < total; i++) 4183 fdrop(unref[i], NULL); 4184 unp_recycled += total; 4185 free(unref, M_TEMP); 4186 } 4187 4188 /* 4189 * Synchronize against unp_gc, which can trip over data as we are freeing it. 4190 */ 4191 static void 4192 unp_dispose(struct socket *so) 4193 { 4194 struct sockbuf *sb; 4195 struct unpcb *unp; 4196 struct mbuf *m; 4197 int error __diagused; 4198 4199 MPASS(!SOLISTENING(so)); 4200 4201 unp = sotounpcb(so); 4202 UNP_LINK_WLOCK(); 4203 unp->unp_gcflag |= UNPGC_IGNORE_RIGHTS; 4204 UNP_LINK_WUNLOCK(); 4205 4206 /* 4207 * Grab our special mbufs before calling sbrelease(). 4208 */ 4209 error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR); 4210 MPASS(!error); 4211 SOCK_RECVBUF_LOCK(so); 4212 switch (so->so_type) { 4213 case SOCK_DGRAM: 4214 while ((sb = TAILQ_FIRST(&so->so_rcv.uxdg_conns)) != NULL) { 4215 STAILQ_CONCAT(&so->so_rcv.uxdg_mb, &sb->uxdg_mb); 4216 TAILQ_REMOVE(&so->so_rcv.uxdg_conns, sb, uxdg_clist); 4217 /* Note: socket of sb may reconnect. */ 4218 sb->uxdg_cc = sb->uxdg_ctl = sb->uxdg_mbcnt = 0; 4219 } 4220 sb = &so->so_rcv; 4221 if (sb->uxdg_peeked != NULL) { 4222 STAILQ_INSERT_HEAD(&sb->uxdg_mb, sb->uxdg_peeked, 4223 m_stailqpkt); 4224 sb->uxdg_peeked = NULL; 4225 } 4226 m = STAILQ_FIRST(&sb->uxdg_mb); 4227 STAILQ_INIT(&sb->uxdg_mb); 4228 break; 4229 case SOCK_STREAM: 4230 case SOCK_SEQPACKET: 4231 sb = &so->so_rcv; 4232 m = STAILQ_FIRST(&sb->uxst_mbq); 4233 STAILQ_INIT(&sb->uxst_mbq); 4234 sb->sb_acc = sb->sb_ccc = sb->sb_ctl = sb->sb_mbcnt = 0; 4235 /* 4236 * Trim M_NOTREADY buffers from the free list. They are 4237 * referenced by the I/O thread. 4238 */ 4239 if (sb->uxst_fnrdy != NULL) { 4240 struct mbuf *n, *prev; 4241 4242 while (m != NULL && m->m_flags & M_NOTREADY) 4243 m = m->m_next; 4244 for (prev = n = m; n != NULL; n = n->m_next) { 4245 if (n->m_flags & M_NOTREADY) 4246 prev->m_next = n->m_next; 4247 else 4248 prev = n; 4249 } 4250 sb->uxst_fnrdy = NULL; 4251 } 4252 break; 4253 } 4254 /* 4255 * Mark sb with SBS_CANTRCVMORE. This is needed to prevent 4256 * uipc_sosend_*() or unp_disconnect() adding more data to the socket. 4257 * We came here either through shutdown(2) or from the final sofree(). 4258 * The sofree() case is simple as it guarantees that no more sends will 4259 * happen, however we can race with unp_disconnect() from our peer. 4260 * The shutdown(2) case is more exotic. It would call into 4261 * unp_dispose() only if socket is SS_ISCONNECTED. This is possible if 4262 * we did connect(2) on this socket and we also had it bound with 4263 * bind(2) and receive connections from other sockets. Because 4264 * uipc_shutdown() violates POSIX (see comment there) this applies to 4265 * SOCK_DGRAM as well. For SOCK_DGRAM this SBS_CANTRCVMORE will have 4266 * affect not only on the peer we connect(2)ed to, but also on all of 4267 * the peers who had connect(2)ed to us. Their sends would end up 4268 * with ENOBUFS. 4269 */ 4270 sb->sb_state |= SBS_CANTRCVMORE; 4271 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 4272 RLIM_INFINITY); 4273 SOCK_RECVBUF_UNLOCK(so); 4274 SOCK_IO_RECV_UNLOCK(so); 4275 4276 if (m != NULL) { 4277 unp_scan(m, unp_freerights); 4278 m_freemp(m); 4279 } 4280 } 4281 4282 static void 4283 unp_scan(struct mbuf *m0, void (*op)(struct filedescent **, int)) 4284 { 4285 struct mbuf *m; 4286 struct cmsghdr *cm; 4287 void *data; 4288 socklen_t clen, datalen; 4289 4290 while (m0 != NULL) { 4291 for (m = m0; m; m = m->m_next) { 4292 if (m->m_type != MT_CONTROL) 4293 continue; 4294 4295 cm = mtod(m, struct cmsghdr *); 4296 clen = m->m_len; 4297 4298 while (cm != NULL) { 4299 if (sizeof(*cm) > clen || cm->cmsg_len > clen) 4300 break; 4301 4302 data = CMSG_DATA(cm); 4303 datalen = (caddr_t)cm + cm->cmsg_len 4304 - (caddr_t)data; 4305 4306 if (cm->cmsg_level == SOL_SOCKET && 4307 cm->cmsg_type == SCM_RIGHTS) { 4308 (*op)(data, datalen / 4309 sizeof(struct filedescent *)); 4310 } 4311 4312 if (CMSG_SPACE(datalen) < clen) { 4313 clen -= CMSG_SPACE(datalen); 4314 cm = (struct cmsghdr *) 4315 ((caddr_t)cm + CMSG_SPACE(datalen)); 4316 } else { 4317 clen = 0; 4318 cm = NULL; 4319 } 4320 } 4321 } 4322 m0 = m0->m_nextpkt; 4323 } 4324 } 4325 4326 /* 4327 * Definitions of protocols supported in the LOCAL domain. 4328 */ 4329 static struct protosw streamproto = { 4330 .pr_type = SOCK_STREAM, 4331 .pr_flags = PR_CONNREQUIRED | PR_CAPATTACH | PR_SOCKBUF, 4332 .pr_ctloutput = &uipc_ctloutput, 4333 .pr_abort = uipc_abort, 4334 .pr_accept = uipc_peeraddr, 4335 .pr_attach = uipc_attach, 4336 .pr_bind = uipc_bind, 4337 .pr_bindat = uipc_bindat, 4338 .pr_connect = uipc_connect, 4339 .pr_connectat = uipc_connectat, 4340 .pr_connect2 = uipc_connect2, 4341 .pr_detach = uipc_detach, 4342 .pr_disconnect = uipc_disconnect, 4343 .pr_listen = uipc_listen, 4344 .pr_peeraddr = uipc_peeraddr, 4345 .pr_send = uipc_sendfile, 4346 .pr_sendfile_wait = uipc_sendfile_wait, 4347 .pr_ready = uipc_ready, 4348 .pr_sense = uipc_sense, 4349 .pr_shutdown = uipc_shutdown, 4350 .pr_sockaddr = uipc_sockaddr, 4351 .pr_sosend = uipc_sosend_stream_or_seqpacket, 4352 .pr_soreceive = uipc_soreceive_stream_or_seqpacket, 4353 .pr_sopoll = uipc_sopoll_stream_or_seqpacket, 4354 .pr_kqfilter = uipc_kqfilter_stream_or_seqpacket, 4355 .pr_close = uipc_close, 4356 .pr_chmod = uipc_chmod, 4357 }; 4358 4359 static struct protosw dgramproto = { 4360 .pr_type = SOCK_DGRAM, 4361 .pr_flags = PR_ATOMIC | PR_ADDR | PR_CAPATTACH | PR_SOCKBUF, 4362 .pr_ctloutput = &uipc_ctloutput, 4363 .pr_abort = uipc_abort, 4364 .pr_accept = uipc_peeraddr, 4365 .pr_attach = uipc_attach, 4366 .pr_bind = uipc_bind, 4367 .pr_bindat = uipc_bindat, 4368 .pr_connect = uipc_connect, 4369 .pr_connectat = uipc_connectat, 4370 .pr_connect2 = uipc_connect2, 4371 .pr_detach = uipc_detach, 4372 .pr_disconnect = uipc_disconnect, 4373 .pr_peeraddr = uipc_peeraddr, 4374 .pr_sosend = uipc_sosend_dgram, 4375 .pr_sense = uipc_sense, 4376 .pr_shutdown = uipc_shutdown, 4377 .pr_sockaddr = uipc_sockaddr, 4378 .pr_soreceive = uipc_soreceive_dgram, 4379 .pr_close = uipc_close, 4380 .pr_chmod = uipc_chmod, 4381 }; 4382 4383 static struct protosw seqpacketproto = { 4384 .pr_type = SOCK_SEQPACKET, 4385 .pr_flags = PR_CONNREQUIRED | PR_CAPATTACH | PR_SOCKBUF, 4386 .pr_ctloutput = &uipc_ctloutput, 4387 .pr_abort = uipc_abort, 4388 .pr_accept = uipc_peeraddr, 4389 .pr_attach = uipc_attach, 4390 .pr_bind = uipc_bind, 4391 .pr_bindat = uipc_bindat, 4392 .pr_connect = uipc_connect, 4393 .pr_connectat = uipc_connectat, 4394 .pr_connect2 = uipc_connect2, 4395 .pr_detach = uipc_detach, 4396 .pr_disconnect = uipc_disconnect, 4397 .pr_listen = uipc_listen, 4398 .pr_peeraddr = uipc_peeraddr, 4399 .pr_sense = uipc_sense, 4400 .pr_shutdown = uipc_shutdown, 4401 .pr_sockaddr = uipc_sockaddr, 4402 .pr_sosend = uipc_sosend_stream_or_seqpacket, 4403 .pr_soreceive = uipc_soreceive_stream_or_seqpacket, 4404 .pr_sopoll = uipc_sopoll_stream_or_seqpacket, 4405 .pr_kqfilter = uipc_kqfilter_stream_or_seqpacket, 4406 .pr_close = uipc_close, 4407 .pr_chmod = uipc_chmod, 4408 }; 4409 4410 static struct domain localdomain = { 4411 .dom_family = AF_LOCAL, 4412 .dom_name = "local", 4413 .dom_nprotosw = 3, 4414 .dom_protosw = { 4415 &streamproto, 4416 &dgramproto, 4417 &seqpacketproto, 4418 } 4419 }; 4420 DOMAIN_SET(local); 4421 4422 /* 4423 * A helper function called by VFS before socket-type vnode reclamation. 4424 * For an active vnode it clears unp_vnode pointer and decrements unp_vnode 4425 * use count. 4426 */ 4427 void 4428 vfs_unp_reclaim(struct vnode *vp) 4429 { 4430 struct unpcb *unp; 4431 int active; 4432 struct mtx *vplock; 4433 4434 ASSERT_VOP_ELOCKED(vp, "vfs_unp_reclaim"); 4435 KASSERT(vp->v_type == VSOCK, 4436 ("vfs_unp_reclaim: vp->v_type != VSOCK")); 4437 4438 active = 0; 4439 vplock = mtx_pool_find(unp_vp_mtxpool, vp); 4440 mtx_lock(vplock); 4441 VOP_UNP_CONNECT(vp, &unp); 4442 if (unp == NULL) 4443 goto done; 4444 UNP_PCB_LOCK(unp); 4445 if (unp->unp_vnode == vp) { 4446 VOP_UNP_DETACH(vp); 4447 unp->unp_vnode = NULL; 4448 active = 1; 4449 } 4450 UNP_PCB_UNLOCK(unp); 4451 done: 4452 mtx_unlock(vplock); 4453 if (active) 4454 vunref(vp); 4455 } 4456 4457 #ifdef DDB 4458 static void 4459 db_print_indent(int indent) 4460 { 4461 int i; 4462 4463 for (i = 0; i < indent; i++) 4464 db_printf(" "); 4465 } 4466 4467 static void 4468 db_print_unpflags(int unp_flags) 4469 { 4470 int comma; 4471 4472 comma = 0; 4473 if (unp_flags & UNP_HAVEPC) { 4474 db_printf("%sUNP_HAVEPC", comma ? ", " : ""); 4475 comma = 1; 4476 } 4477 if (unp_flags & UNP_WANTCRED_ALWAYS) { 4478 db_printf("%sUNP_WANTCRED_ALWAYS", comma ? ", " : ""); 4479 comma = 1; 4480 } 4481 if (unp_flags & UNP_WANTCRED_ONESHOT) { 4482 db_printf("%sUNP_WANTCRED_ONESHOT", comma ? ", " : ""); 4483 comma = 1; 4484 } 4485 if (unp_flags & UNP_CONNECTING) { 4486 db_printf("%sUNP_CONNECTING", comma ? ", " : ""); 4487 comma = 1; 4488 } 4489 if (unp_flags & UNP_BINDING) { 4490 db_printf("%sUNP_BINDING", comma ? ", " : ""); 4491 comma = 1; 4492 } 4493 } 4494 4495 static void 4496 db_print_xucred(int indent, struct xucred *xu) 4497 { 4498 int comma, i; 4499 4500 db_print_indent(indent); 4501 db_printf("cr_version: %u cr_uid: %u cr_pid: %d cr_ngroups: %d\n", 4502 xu->cr_version, xu->cr_uid, xu->cr_pid, xu->cr_ngroups); 4503 db_print_indent(indent); 4504 db_printf("cr_groups: "); 4505 comma = 0; 4506 for (i = 0; i < xu->cr_ngroups; i++) { 4507 db_printf("%s%u", comma ? ", " : "", xu->cr_groups[i]); 4508 comma = 1; 4509 } 4510 db_printf("\n"); 4511 } 4512 4513 static void 4514 db_print_unprefs(int indent, struct unp_head *uh) 4515 { 4516 struct unpcb *unp; 4517 int counter; 4518 4519 counter = 0; 4520 LIST_FOREACH(unp, uh, unp_reflink) { 4521 if (counter % 4 == 0) 4522 db_print_indent(indent); 4523 db_printf("%p ", unp); 4524 if (counter % 4 == 3) 4525 db_printf("\n"); 4526 counter++; 4527 } 4528 if (counter != 0 && counter % 4 != 0) 4529 db_printf("\n"); 4530 } 4531 4532 DB_SHOW_COMMAND(unpcb, db_show_unpcb) 4533 { 4534 struct unpcb *unp; 4535 4536 if (!have_addr) { 4537 db_printf("usage: show unpcb <addr>\n"); 4538 return; 4539 } 4540 unp = (struct unpcb *)addr; 4541 4542 db_printf("unp_socket: %p unp_vnode: %p\n", unp->unp_socket, 4543 unp->unp_vnode); 4544 4545 db_printf("unp_ino: %ju unp_conn: %p\n", (uintmax_t)unp->unp_ino, 4546 unp->unp_conn); 4547 4548 db_printf("unp_refs:\n"); 4549 db_print_unprefs(2, &unp->unp_refs); 4550 4551 /* XXXRW: Would be nice to print the full address, if any. */ 4552 db_printf("unp_addr: %p\n", unp->unp_addr); 4553 4554 db_printf("unp_gencnt: %llu\n", 4555 (unsigned long long)unp->unp_gencnt); 4556 4557 db_printf("unp_flags: %x (", unp->unp_flags); 4558 db_print_unpflags(unp->unp_flags); 4559 db_printf(")\n"); 4560 4561 db_printf("unp_peercred:\n"); 4562 db_print_xucred(2, &unp->unp_peercred); 4563 4564 db_printf("unp_refcount: %u\n", unp->unp_refcount); 4565 } 4566 #endif 4567