1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2004 The FreeBSD Foundation 7 * Copyright (c) 2004-2008 Robert N. M. Watson 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 35 */ 36 37 /* 38 * Comments on the socket life cycle: 39 * 40 * soalloc() sets of socket layer state for a socket, called only by 41 * socreate() and sonewconn(). Socket layer private. 42 * 43 * sodealloc() tears down socket layer state for a socket, called only by 44 * sofree() and sonewconn(). Socket layer private. 45 * 46 * pru_attach() associates protocol layer state with an allocated socket; 47 * called only once, may fail, aborting socket allocation. This is called 48 * from socreate() and sonewconn(). Socket layer private. 49 * 50 * pru_detach() disassociates protocol layer state from an attached socket, 51 * and will be called exactly once for sockets in which pru_attach() has 52 * been successfully called. If pru_attach() returned an error, 53 * pru_detach() will not be called. Socket layer private. 54 * 55 * pru_abort() and pru_close() notify the protocol layer that the last 56 * consumer of a socket is starting to tear down the socket, and that the 57 * protocol should terminate the connection. Historically, pru_abort() also 58 * detached protocol state from the socket state, but this is no longer the 59 * case. 60 * 61 * socreate() creates a socket and attaches protocol state. This is a public 62 * interface that may be used by socket layer consumers to create new 63 * sockets. 64 * 65 * sonewconn() creates a socket and attaches protocol state. This is a 66 * public interface that may be used by protocols to create new sockets when 67 * a new connection is received and will be available for accept() on a 68 * listen socket. 69 * 70 * soclose() destroys a socket after possibly waiting for it to disconnect. 71 * This is a public interface that socket consumers should use to close and 72 * release a socket when done with it. 73 * 74 * soabort() destroys a socket without waiting for it to disconnect (used 75 * only for incoming connections that are already partially or fully 76 * connected). This is used internally by the socket layer when clearing 77 * listen socket queues (due to overflow or close on the listen socket), but 78 * is also a public interface protocols may use to abort connections in 79 * their incomplete listen queues should they no longer be required. Sockets 80 * placed in completed connection listen queues should not be aborted for 81 * reasons described in the comment above the soclose() implementation. This 82 * is not a general purpose close routine, and except in the specific 83 * circumstances described here, should not be used. 84 * 85 * sofree() will free a socket and its protocol state if all references on 86 * the socket have been released, and is the public interface to attempt to 87 * free a socket when a reference is removed. This is a socket layer private 88 * interface. 89 * 90 * NOTE: In addition to socreate() and soclose(), which provide a single 91 * socket reference to the consumer to be managed as required, there are two 92 * calls to explicitly manage socket references, soref(), and sorele(). 93 * Currently, these are generally required only when transitioning a socket 94 * from a listen queue to a file descriptor, in order to prevent garbage 95 * collection of the socket at an untimely moment. For a number of reasons, 96 * these interfaces are not preferred, and should be avoided. 97 * 98 * NOTE: With regard to VNETs the general rule is that callers do not set 99 * curvnet. Exceptions to this rule include soabort(), sodisconnect(), 100 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn() 101 * and sorflush(), which are usually called from a pre-set VNET context. 102 * sopoll() currently does not need a VNET context to be set. 103 */ 104 105 #include <sys/cdefs.h> 106 __FBSDID("$FreeBSD$"); 107 108 #include "opt_inet.h" 109 #include "opt_inet6.h" 110 #include "opt_sctp.h" 111 112 #include <sys/param.h> 113 #include <sys/systm.h> 114 #include <sys/fcntl.h> 115 #include <sys/limits.h> 116 #include <sys/lock.h> 117 #include <sys/mac.h> 118 #include <sys/malloc.h> 119 #include <sys/mbuf.h> 120 #include <sys/mutex.h> 121 #include <sys/domain.h> 122 #include <sys/file.h> /* for struct knote */ 123 #include <sys/hhook.h> 124 #include <sys/kernel.h> 125 #include <sys/khelp.h> 126 #include <sys/event.h> 127 #include <sys/eventhandler.h> 128 #include <sys/poll.h> 129 #include <sys/proc.h> 130 #include <sys/protosw.h> 131 #include <sys/socket.h> 132 #include <sys/socketvar.h> 133 #include <sys/resourcevar.h> 134 #include <net/route.h> 135 #include <sys/signalvar.h> 136 #include <sys/stat.h> 137 #include <sys/sx.h> 138 #include <sys/sysctl.h> 139 #include <sys/taskqueue.h> 140 #include <sys/uio.h> 141 #include <sys/jail.h> 142 #include <sys/syslog.h> 143 #include <netinet/in.h> 144 145 #include <net/vnet.h> 146 147 #include <security/mac/mac_framework.h> 148 149 #include <vm/uma.h> 150 151 #ifdef COMPAT_FREEBSD32 152 #include <sys/mount.h> 153 #include <sys/sysent.h> 154 #include <compat/freebsd32/freebsd32.h> 155 #endif 156 157 static int soreceive_rcvoob(struct socket *so, struct uio *uio, 158 int flags); 159 static void so_rdknl_lock(void *); 160 static void so_rdknl_unlock(void *); 161 static void so_rdknl_assert_locked(void *); 162 static void so_rdknl_assert_unlocked(void *); 163 static void so_wrknl_lock(void *); 164 static void so_wrknl_unlock(void *); 165 static void so_wrknl_assert_locked(void *); 166 static void so_wrknl_assert_unlocked(void *); 167 168 static void filt_sordetach(struct knote *kn); 169 static int filt_soread(struct knote *kn, long hint); 170 static void filt_sowdetach(struct knote *kn); 171 static int filt_sowrite(struct knote *kn, long hint); 172 static int filt_soempty(struct knote *kn, long hint); 173 static int inline hhook_run_socket(struct socket *so, void *hctx, int32_t h_id); 174 fo_kqfilter_t soo_kqfilter; 175 176 static struct filterops soread_filtops = { 177 .f_isfd = 1, 178 .f_detach = filt_sordetach, 179 .f_event = filt_soread, 180 }; 181 static struct filterops sowrite_filtops = { 182 .f_isfd = 1, 183 .f_detach = filt_sowdetach, 184 .f_event = filt_sowrite, 185 }; 186 static struct filterops soempty_filtops = { 187 .f_isfd = 1, 188 .f_detach = filt_sowdetach, 189 .f_event = filt_soempty, 190 }; 191 192 so_gen_t so_gencnt; /* generation count for sockets */ 193 194 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 195 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 196 197 #define VNET_SO_ASSERT(so) \ 198 VNET_ASSERT(curvnet != NULL, \ 199 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so))); 200 201 VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]); 202 #define V_socket_hhh VNET(socket_hhh) 203 204 /* 205 * Limit on the number of connections in the listen queue waiting 206 * for accept(2). 207 * NB: The original sysctl somaxconn is still available but hidden 208 * to prevent confusion about the actual purpose of this number. 209 */ 210 static u_int somaxconn = SOMAXCONN; 211 212 static int 213 sysctl_somaxconn(SYSCTL_HANDLER_ARGS) 214 { 215 int error; 216 int val; 217 218 val = somaxconn; 219 error = sysctl_handle_int(oidp, &val, 0, req); 220 if (error || !req->newptr ) 221 return (error); 222 223 /* 224 * The purpose of the UINT_MAX / 3 limit, is so that the formula 225 * 3 * so_qlimit / 2 226 * below, will not overflow. 227 */ 228 229 if (val < 1 || val > UINT_MAX / 3) 230 return (EINVAL); 231 232 somaxconn = val; 233 return (0); 234 } 235 SYSCTL_PROC(_kern_ipc, OID_AUTO, soacceptqueue, CTLTYPE_UINT | CTLFLAG_RW, 236 0, sizeof(int), sysctl_somaxconn, "I", 237 "Maximum listen socket pending connection accept queue size"); 238 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, 239 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_SKIP, 240 0, sizeof(int), sysctl_somaxconn, "I", 241 "Maximum listen socket pending connection accept queue size (compat)"); 242 243 static int numopensockets; 244 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 245 &numopensockets, 0, "Number of open sockets"); 246 247 /* 248 * accept_mtx locks down per-socket fields relating to accept queues. See 249 * socketvar.h for an annotation of the protected fields of struct socket. 250 */ 251 struct mtx accept_mtx; 252 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 253 254 /* 255 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 256 * so_gencnt field. 257 */ 258 static struct mtx so_global_mtx; 259 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 260 261 /* 262 * General IPC sysctl name space, used by sockets and a variety of other IPC 263 * types. 264 */ 265 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 266 267 /* 268 * Initialize the socket subsystem and set up the socket 269 * memory allocator. 270 */ 271 static uma_zone_t socket_zone; 272 int maxsockets; 273 274 static void 275 socket_zone_change(void *tag) 276 { 277 278 maxsockets = uma_zone_set_max(socket_zone, maxsockets); 279 } 280 281 static void 282 socket_hhook_register(int subtype) 283 { 284 285 if (hhook_head_register(HHOOK_TYPE_SOCKET, subtype, 286 &V_socket_hhh[subtype], 287 HHOOK_NOWAIT|HHOOK_HEADISINVNET) != 0) 288 printf("%s: WARNING: unable to register hook\n", __func__); 289 } 290 291 static void 292 socket_hhook_deregister(int subtype) 293 { 294 295 if (hhook_head_deregister(V_socket_hhh[subtype]) != 0) 296 printf("%s: WARNING: unable to deregister hook\n", __func__); 297 } 298 299 static void 300 socket_init(void *tag) 301 { 302 303 socket_zone = uma_zcreate("socket", sizeof(struct socket), NULL, NULL, 304 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 305 maxsockets = uma_zone_set_max(socket_zone, maxsockets); 306 uma_zone_set_warning(socket_zone, "kern.ipc.maxsockets limit reached"); 307 EVENTHANDLER_REGISTER(maxsockets_change, socket_zone_change, NULL, 308 EVENTHANDLER_PRI_FIRST); 309 } 310 SYSINIT(socket, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, socket_init, NULL); 311 312 static void 313 socket_vnet_init(const void *unused __unused) 314 { 315 int i; 316 317 /* We expect a contiguous range */ 318 for (i = 0; i <= HHOOK_SOCKET_LAST; i++) 319 socket_hhook_register(i); 320 } 321 VNET_SYSINIT(socket_vnet_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, 322 socket_vnet_init, NULL); 323 324 static void 325 socket_vnet_uninit(const void *unused __unused) 326 { 327 int i; 328 329 for (i = 0; i <= HHOOK_SOCKET_LAST; i++) 330 socket_hhook_deregister(i); 331 } 332 VNET_SYSUNINIT(socket_vnet_uninit, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, 333 socket_vnet_uninit, NULL); 334 335 /* 336 * Initialise maxsockets. This SYSINIT must be run after 337 * tunable_mbinit(). 338 */ 339 static void 340 init_maxsockets(void *ignored) 341 { 342 343 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 344 maxsockets = imax(maxsockets, maxfiles); 345 } 346 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); 347 348 /* 349 * Sysctl to get and set the maximum global sockets limit. Notify protocols 350 * of the change so that they can update their dependent limits as required. 351 */ 352 static int 353 sysctl_maxsockets(SYSCTL_HANDLER_ARGS) 354 { 355 int error, newmaxsockets; 356 357 newmaxsockets = maxsockets; 358 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req); 359 if (error == 0 && req->newptr) { 360 if (newmaxsockets > maxsockets && 361 newmaxsockets <= maxfiles) { 362 maxsockets = newmaxsockets; 363 EVENTHANDLER_INVOKE(maxsockets_change); 364 } else 365 error = EINVAL; 366 } 367 return (error); 368 } 369 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, 370 &maxsockets, 0, sysctl_maxsockets, "IU", 371 "Maximum number of sockets available"); 372 373 /* 374 * Socket operation routines. These routines are called by the routines in 375 * sys_socket.c or from a system process, and implement the semantics of 376 * socket operations by switching out to the protocol specific routines. 377 */ 378 379 /* 380 * Get a socket structure from our zone, and initialize it. Note that it 381 * would probably be better to allocate socket and PCB at the same time, but 382 * I'm not convinced that all the protocols can be easily modified to do 383 * this. 384 * 385 * soalloc() returns a socket with a ref count of 0. 386 */ 387 static struct socket * 388 soalloc(struct vnet *vnet) 389 { 390 struct socket *so; 391 392 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO); 393 if (so == NULL) 394 return (NULL); 395 #ifdef MAC 396 if (mac_socket_init(so, M_NOWAIT) != 0) { 397 uma_zfree(socket_zone, so); 398 return (NULL); 399 } 400 #endif 401 if (khelp_init_osd(HELPER_CLASS_SOCKET, &so->osd)) { 402 uma_zfree(socket_zone, so); 403 return (NULL); 404 } 405 406 /* 407 * The socket locking protocol allows to lock 2 sockets at a time, 408 * however, the first one must be a listening socket. WITNESS lacks 409 * a feature to change class of an existing lock, so we use DUPOK. 410 */ 411 mtx_init(&so->so_lock, "socket", NULL, MTX_DEF | MTX_DUPOK); 412 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 413 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 414 so->so_rcv.sb_sel = &so->so_rdsel; 415 so->so_snd.sb_sel = &so->so_wrsel; 416 sx_init(&so->so_snd.sb_sx, "so_snd_sx"); 417 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx"); 418 TAILQ_INIT(&so->so_snd.sb_aiojobq); 419 TAILQ_INIT(&so->so_rcv.sb_aiojobq); 420 TASK_INIT(&so->so_snd.sb_aiotask, 0, soaio_snd, so); 421 TASK_INIT(&so->so_rcv.sb_aiotask, 0, soaio_rcv, so); 422 #ifdef VIMAGE 423 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p", 424 __func__, __LINE__, so)); 425 so->so_vnet = vnet; 426 #endif 427 /* We shouldn't need the so_global_mtx */ 428 if (hhook_run_socket(so, NULL, HHOOK_SOCKET_CREATE)) { 429 /* Do we need more comprehensive error returns? */ 430 uma_zfree(socket_zone, so); 431 return (NULL); 432 } 433 mtx_lock(&so_global_mtx); 434 so->so_gencnt = ++so_gencnt; 435 ++numopensockets; 436 #ifdef VIMAGE 437 vnet->vnet_sockcnt++; 438 #endif 439 mtx_unlock(&so_global_mtx); 440 441 return (so); 442 } 443 444 /* 445 * Free the storage associated with a socket at the socket layer, tear down 446 * locks, labels, etc. All protocol state is assumed already to have been 447 * torn down (and possibly never set up) by the caller. 448 */ 449 static void 450 sodealloc(struct socket *so) 451 { 452 453 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 454 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); 455 456 mtx_lock(&so_global_mtx); 457 so->so_gencnt = ++so_gencnt; 458 --numopensockets; /* Could be below, but faster here. */ 459 #ifdef VIMAGE 460 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p", 461 __func__, __LINE__, so)); 462 so->so_vnet->vnet_sockcnt--; 463 #endif 464 mtx_unlock(&so_global_mtx); 465 #ifdef MAC 466 mac_socket_destroy(so); 467 #endif 468 hhook_run_socket(so, NULL, HHOOK_SOCKET_CLOSE); 469 470 crfree(so->so_cred); 471 khelp_destroy_osd(&so->osd); 472 if (SOLISTENING(so)) { 473 if (so->sol_accept_filter != NULL) 474 accept_filt_setopt(so, NULL); 475 } else { 476 if (so->so_rcv.sb_hiwat) 477 (void)chgsbsize(so->so_cred->cr_uidinfo, 478 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 479 if (so->so_snd.sb_hiwat) 480 (void)chgsbsize(so->so_cred->cr_uidinfo, 481 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 482 sx_destroy(&so->so_snd.sb_sx); 483 sx_destroy(&so->so_rcv.sb_sx); 484 SOCKBUF_LOCK_DESTROY(&so->so_snd); 485 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 486 } 487 mtx_destroy(&so->so_lock); 488 uma_zfree(socket_zone, so); 489 } 490 491 /* 492 * socreate returns a socket with a ref count of 1. The socket should be 493 * closed with soclose(). 494 */ 495 int 496 socreate(int dom, struct socket **aso, int type, int proto, 497 struct ucred *cred, struct thread *td) 498 { 499 struct protosw *prp; 500 struct socket *so; 501 int error; 502 503 if (proto) 504 prp = pffindproto(dom, proto, type); 505 else 506 prp = pffindtype(dom, type); 507 508 if (prp == NULL) { 509 /* No support for domain. */ 510 if (pffinddomain(dom) == NULL) 511 return (EAFNOSUPPORT); 512 /* No support for socket type. */ 513 if (proto == 0 && type != 0) 514 return (EPROTOTYPE); 515 return (EPROTONOSUPPORT); 516 } 517 if (prp->pr_usrreqs->pru_attach == NULL || 518 prp->pr_usrreqs->pru_attach == pru_attach_notsupp) 519 return (EPROTONOSUPPORT); 520 521 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0) 522 return (EPROTONOSUPPORT); 523 524 if (prp->pr_type != type) 525 return (EPROTOTYPE); 526 so = soalloc(CRED_TO_VNET(cred)); 527 if (so == NULL) 528 return (ENOBUFS); 529 530 so->so_type = type; 531 so->so_cred = crhold(cred); 532 if ((prp->pr_domain->dom_family == PF_INET) || 533 (prp->pr_domain->dom_family == PF_INET6) || 534 (prp->pr_domain->dom_family == PF_ROUTE)) 535 so->so_fibnum = td->td_proc->p_fibnum; 536 else 537 so->so_fibnum = 0; 538 so->so_proto = prp; 539 #ifdef MAC 540 mac_socket_create(cred, so); 541 #endif 542 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock, 543 so_rdknl_assert_locked, so_rdknl_assert_unlocked); 544 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock, 545 so_wrknl_assert_locked, so_wrknl_assert_unlocked); 546 /* 547 * Auto-sizing of socket buffers is managed by the protocols and 548 * the appropriate flags must be set in the pru_attach function. 549 */ 550 CURVNET_SET(so->so_vnet); 551 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 552 CURVNET_RESTORE(); 553 if (error) { 554 sodealloc(so); 555 return (error); 556 } 557 soref(so); 558 *aso = so; 559 return (0); 560 } 561 562 #ifdef REGRESSION 563 static int regression_sonewconn_earlytest = 1; 564 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, 565 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); 566 #endif 567 568 /* 569 * When an attempt at a new connection is noted on a socket which accepts 570 * connections, sonewconn is called. If the connection is possible (subject 571 * to space constraints, etc.) then we allocate a new structure, properly 572 * linked into the data structure of the original socket, and return this. 573 * Connstatus may be 0, or SS_ISCONFIRMING, or SS_ISCONNECTED. 574 * 575 * Note: the ref count on the socket is 0 on return. 576 */ 577 struct socket * 578 sonewconn(struct socket *head, int connstatus) 579 { 580 static struct timeval lastover; 581 static struct timeval overinterval = { 60, 0 }; 582 static int overcount; 583 584 struct socket *so; 585 u_int over; 586 587 SOLISTEN_LOCK(head); 588 over = (head->sol_qlen > 3 * head->sol_qlimit / 2); 589 SOLISTEN_UNLOCK(head); 590 #ifdef REGRESSION 591 if (regression_sonewconn_earlytest && over) { 592 #else 593 if (over) { 594 #endif 595 overcount++; 596 597 if (ratecheck(&lastover, &overinterval)) { 598 log(LOG_DEBUG, "%s: pcb %p: Listen queue overflow: " 599 "%i already in queue awaiting acceptance " 600 "(%d occurrences)\n", 601 __func__, head->so_pcb, head->sol_qlen, overcount); 602 603 overcount = 0; 604 } 605 606 return (NULL); 607 } 608 VNET_ASSERT(head->so_vnet != NULL, ("%s: so %p vnet is NULL", 609 __func__, head)); 610 so = soalloc(head->so_vnet); 611 if (so == NULL) { 612 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: " 613 "limit reached or out of memory\n", 614 __func__, head->so_pcb); 615 return (NULL); 616 } 617 so->so_listen = head; 618 so->so_type = head->so_type; 619 so->so_linger = head->so_linger; 620 so->so_state = head->so_state | SS_NOFDREF; 621 so->so_fibnum = head->so_fibnum; 622 so->so_proto = head->so_proto; 623 so->so_cred = crhold(head->so_cred); 624 #ifdef MAC 625 mac_socket_newconn(head, so); 626 #endif 627 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock, 628 so_rdknl_assert_locked, so_rdknl_assert_unlocked); 629 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock, 630 so_wrknl_assert_locked, so_wrknl_assert_unlocked); 631 VNET_SO_ASSERT(head); 632 if (soreserve(so, head->sol_sbsnd_hiwat, head->sol_sbrcv_hiwat)) { 633 sodealloc(so); 634 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n", 635 __func__, head->so_pcb); 636 return (NULL); 637 } 638 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { 639 sodealloc(so); 640 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n", 641 __func__, head->so_pcb); 642 return (NULL); 643 } 644 so->so_rcv.sb_lowat = head->sol_sbrcv_lowat; 645 so->so_snd.sb_lowat = head->sol_sbsnd_lowat; 646 so->so_rcv.sb_timeo = head->sol_sbrcv_timeo; 647 so->so_snd.sb_timeo = head->sol_sbsnd_timeo; 648 so->so_rcv.sb_flags |= head->sol_sbrcv_flags & SB_AUTOSIZE; 649 so->so_snd.sb_flags |= head->sol_sbsnd_flags & SB_AUTOSIZE; 650 651 SOLISTEN_LOCK(head); 652 if (head->sol_accept_filter != NULL) 653 connstatus = 0; 654 so->so_state |= connstatus; 655 so->so_options = head->so_options & ~SO_ACCEPTCONN; 656 soref(head); /* A socket on (in)complete queue refs head. */ 657 if (connstatus) { 658 TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list); 659 so->so_qstate = SQ_COMP; 660 head->sol_qlen++; 661 solisten_wakeup(head); /* unlocks */ 662 } else { 663 /* 664 * Keep removing sockets from the head until there's room for 665 * us to insert on the tail. In pre-locking revisions, this 666 * was a simple if(), but as we could be racing with other 667 * threads and soabort() requires dropping locks, we must 668 * loop waiting for the condition to be true. 669 */ 670 while (head->sol_incqlen > head->sol_qlimit) { 671 struct socket *sp; 672 673 sp = TAILQ_FIRST(&head->sol_incomp); 674 TAILQ_REMOVE(&head->sol_incomp, sp, so_list); 675 head->sol_incqlen--; 676 SOCK_LOCK(sp); 677 sp->so_qstate = SQ_NONE; 678 sp->so_listen = NULL; 679 SOCK_UNLOCK(sp); 680 sorele(head); /* does SOLISTEN_UNLOCK, head stays */ 681 soabort(sp); 682 SOLISTEN_LOCK(head); 683 } 684 TAILQ_INSERT_TAIL(&head->sol_incomp, so, so_list); 685 so->so_qstate = SQ_INCOMP; 686 head->sol_incqlen++; 687 SOLISTEN_UNLOCK(head); 688 } 689 return (so); 690 } 691 692 #ifdef SCTP 693 /* 694 * Socket part of sctp_peeloff(). Detach a new socket from an 695 * association. The new socket is returned with a reference. 696 */ 697 struct socket * 698 sopeeloff(struct socket *head) 699 { 700 struct socket *so; 701 702 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p", 703 __func__, __LINE__, head)); 704 so = soalloc(head->so_vnet); 705 if (so == NULL) { 706 log(LOG_DEBUG, "%s: pcb %p: New socket allocation failure: " 707 "limit reached or out of memory\n", 708 __func__, head->so_pcb); 709 return (NULL); 710 } 711 so->so_type = head->so_type; 712 so->so_options = head->so_options; 713 so->so_linger = head->so_linger; 714 so->so_state = (head->so_state & SS_NBIO) | SS_ISCONNECTED; 715 so->so_fibnum = head->so_fibnum; 716 so->so_proto = head->so_proto; 717 so->so_cred = crhold(head->so_cred); 718 #ifdef MAC 719 mac_socket_newconn(head, so); 720 #endif 721 knlist_init(&so->so_rdsel.si_note, so, so_rdknl_lock, so_rdknl_unlock, 722 so_rdknl_assert_locked, so_rdknl_assert_unlocked); 723 knlist_init(&so->so_wrsel.si_note, so, so_wrknl_lock, so_wrknl_unlock, 724 so_wrknl_assert_locked, so_wrknl_assert_unlocked); 725 VNET_SO_ASSERT(head); 726 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) { 727 sodealloc(so); 728 log(LOG_DEBUG, "%s: pcb %p: soreserve() failed\n", 729 __func__, head->so_pcb); 730 return (NULL); 731 } 732 if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { 733 sodealloc(so); 734 log(LOG_DEBUG, "%s: pcb %p: pru_attach() failed\n", 735 __func__, head->so_pcb); 736 return (NULL); 737 } 738 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat; 739 so->so_snd.sb_lowat = head->so_snd.sb_lowat; 740 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo; 741 so->so_snd.sb_timeo = head->so_snd.sb_timeo; 742 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE; 743 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE; 744 745 soref(so); 746 747 return (so); 748 } 749 #endif /* SCTP */ 750 751 int 752 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 753 { 754 int error; 755 756 CURVNET_SET(so->so_vnet); 757 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td); 758 CURVNET_RESTORE(); 759 return (error); 760 } 761 762 int 763 sobindat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) 764 { 765 int error; 766 767 CURVNET_SET(so->so_vnet); 768 error = (*so->so_proto->pr_usrreqs->pru_bindat)(fd, so, nam, td); 769 CURVNET_RESTORE(); 770 return (error); 771 } 772 773 /* 774 * solisten() transitions a socket from a non-listening state to a listening 775 * state, but can also be used to update the listen queue depth on an 776 * existing listen socket. The protocol will call back into the sockets 777 * layer using solisten_proto_check() and solisten_proto() to check and set 778 * socket-layer listen state. Call backs are used so that the protocol can 779 * acquire both protocol and socket layer locks in whatever order is required 780 * by the protocol. 781 * 782 * Protocol implementors are advised to hold the socket lock across the 783 * socket-layer test and set to avoid races at the socket layer. 784 */ 785 int 786 solisten(struct socket *so, int backlog, struct thread *td) 787 { 788 int error; 789 790 CURVNET_SET(so->so_vnet); 791 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td); 792 CURVNET_RESTORE(); 793 return (error); 794 } 795 796 int 797 solisten_proto_check(struct socket *so) 798 { 799 800 SOCK_LOCK_ASSERT(so); 801 802 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 803 SS_ISDISCONNECTING)) 804 return (EINVAL); 805 return (0); 806 } 807 808 void 809 solisten_proto(struct socket *so, int backlog) 810 { 811 int sbrcv_lowat, sbsnd_lowat; 812 u_int sbrcv_hiwat, sbsnd_hiwat; 813 short sbrcv_flags, sbsnd_flags; 814 sbintime_t sbrcv_timeo, sbsnd_timeo; 815 816 SOCK_LOCK_ASSERT(so); 817 818 if (SOLISTENING(so)) 819 goto listening; 820 821 /* 822 * Change this socket to listening state. 823 */ 824 sbrcv_lowat = so->so_rcv.sb_lowat; 825 sbsnd_lowat = so->so_snd.sb_lowat; 826 sbrcv_hiwat = so->so_rcv.sb_hiwat; 827 sbsnd_hiwat = so->so_snd.sb_hiwat; 828 sbrcv_flags = so->so_rcv.sb_flags; 829 sbsnd_flags = so->so_snd.sb_flags; 830 sbrcv_timeo = so->so_rcv.sb_timeo; 831 sbsnd_timeo = so->so_snd.sb_timeo; 832 833 sbdestroy(&so->so_snd, so); 834 sbdestroy(&so->so_rcv, so); 835 sx_destroy(&so->so_snd.sb_sx); 836 sx_destroy(&so->so_rcv.sb_sx); 837 SOCKBUF_LOCK_DESTROY(&so->so_snd); 838 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 839 840 #ifdef INVARIANTS 841 bzero(&so->so_rcv, 842 sizeof(struct socket) - offsetof(struct socket, so_rcv)); 843 #endif 844 845 so->sol_sbrcv_lowat = sbrcv_lowat; 846 so->sol_sbsnd_lowat = sbsnd_lowat; 847 so->sol_sbrcv_hiwat = sbrcv_hiwat; 848 so->sol_sbsnd_hiwat = sbsnd_hiwat; 849 so->sol_sbrcv_flags = sbrcv_flags; 850 so->sol_sbsnd_flags = sbsnd_flags; 851 so->sol_sbrcv_timeo = sbrcv_timeo; 852 so->sol_sbsnd_timeo = sbsnd_timeo; 853 854 so->sol_qlen = so->sol_incqlen = 0; 855 TAILQ_INIT(&so->sol_incomp); 856 TAILQ_INIT(&so->sol_comp); 857 858 so->sol_accept_filter = NULL; 859 so->sol_accept_filter_arg = NULL; 860 so->sol_accept_filter_str = NULL; 861 862 so->sol_upcall = NULL; 863 so->sol_upcallarg = NULL; 864 865 so->so_options |= SO_ACCEPTCONN; 866 867 listening: 868 if (backlog < 0 || backlog > somaxconn) 869 backlog = somaxconn; 870 so->sol_qlimit = backlog; 871 } 872 873 /* 874 * Wakeup listeners/subsystems once we have a complete connection. 875 * Enters with lock, returns unlocked. 876 */ 877 void 878 solisten_wakeup(struct socket *sol) 879 { 880 881 if (sol->sol_upcall != NULL) 882 (void )sol->sol_upcall(sol, sol->sol_upcallarg, M_NOWAIT); 883 else { 884 selwakeuppri(&sol->so_rdsel, PSOCK); 885 KNOTE_LOCKED(&sol->so_rdsel.si_note, 0); 886 } 887 SOLISTEN_UNLOCK(sol); 888 wakeup_one(&sol->sol_comp); 889 if ((sol->so_state & SS_ASYNC) && sol->so_sigio != NULL) 890 pgsigio(&sol->so_sigio, SIGIO, 0); 891 } 892 893 /* 894 * Return single connection off a listening socket queue. Main consumer of 895 * the function is kern_accept4(). Some modules, that do their own accept 896 * management also use the function. 897 * 898 * Listening socket must be locked on entry and is returned unlocked on 899 * return. 900 * The flags argument is set of accept4(2) flags and ACCEPT4_INHERIT. 901 */ 902 int 903 solisten_dequeue(struct socket *head, struct socket **ret, int flags) 904 { 905 struct socket *so; 906 int error; 907 908 SOLISTEN_LOCK_ASSERT(head); 909 910 while (!(head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp) && 911 head->so_error == 0) { 912 error = msleep(&head->sol_comp, &head->so_lock, PSOCK | PCATCH, 913 "accept", 0); 914 if (error != 0) { 915 SOLISTEN_UNLOCK(head); 916 return (error); 917 } 918 } 919 if (head->so_error) { 920 error = head->so_error; 921 head->so_error = 0; 922 } else if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->sol_comp)) 923 error = EWOULDBLOCK; 924 else 925 error = 0; 926 if (error) { 927 SOLISTEN_UNLOCK(head); 928 return (error); 929 } 930 so = TAILQ_FIRST(&head->sol_comp); 931 SOCK_LOCK(so); 932 KASSERT(so->so_qstate == SQ_COMP, 933 ("%s: so %p not SQ_COMP", __func__, so)); 934 soref(so); 935 head->sol_qlen--; 936 so->so_qstate = SQ_NONE; 937 so->so_listen = NULL; 938 TAILQ_REMOVE(&head->sol_comp, so, so_list); 939 if (flags & ACCEPT4_INHERIT) 940 so->so_state |= (head->so_state & SS_NBIO); 941 else 942 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0; 943 SOCK_UNLOCK(so); 944 sorele(head); 945 946 *ret = so; 947 return (0); 948 } 949 950 /* 951 * Evaluate the reference count and named references on a socket; if no 952 * references remain, free it. This should be called whenever a reference is 953 * released, such as in sorele(), but also when named reference flags are 954 * cleared in socket or protocol code. 955 * 956 * sofree() will free the socket if: 957 * 958 * - There are no outstanding file descriptor references or related consumers 959 * (so_count == 0). 960 * 961 * - The socket has been closed by user space, if ever open (SS_NOFDREF). 962 * 963 * - The protocol does not have an outstanding strong reference on the socket 964 * (SS_PROTOREF). 965 * 966 * - The socket is not in a completed connection queue, so a process has been 967 * notified that it is present. If it is removed, the user process may 968 * block in accept() despite select() saying the socket was ready. 969 */ 970 void 971 sofree(struct socket *so) 972 { 973 struct protosw *pr = so->so_proto; 974 975 SOCK_LOCK_ASSERT(so); 976 977 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 || 978 (so->so_state & SS_PROTOREF) || (so->so_qstate == SQ_COMP)) { 979 SOCK_UNLOCK(so); 980 return; 981 } 982 983 if (!SOLISTENING(so) && so->so_qstate == SQ_INCOMP) { 984 struct socket *sol; 985 986 sol = so->so_listen; 987 KASSERT(sol, ("%s: so %p on incomp of NULL", __func__, so)); 988 989 /* 990 * To solve race between close of a listening socket and 991 * a socket on its incomplete queue, we need to lock both. 992 * The order is first listening socket, then regular. 993 * Since we don't have SS_NOFDREF neither SS_PROTOREF, this 994 * function and the listening socket are the only pointers 995 * to so. To preserve so and sol, we reference both and then 996 * relock. 997 * After relock the socket may not move to so_comp since it 998 * doesn't have PCB already, but it may be removed from 999 * so_incomp. If that happens, we share responsiblity on 1000 * freeing the socket, but soclose() has already removed 1001 * it from queue. 1002 */ 1003 soref(sol); 1004 soref(so); 1005 SOCK_UNLOCK(so); 1006 SOLISTEN_LOCK(sol); 1007 SOCK_LOCK(so); 1008 if (so->so_qstate == SQ_INCOMP) { 1009 KASSERT(so->so_listen == sol, 1010 ("%s: so %p migrated out of sol %p", 1011 __func__, so, sol)); 1012 TAILQ_REMOVE(&sol->sol_incomp, so, so_list); 1013 sol->sol_incqlen--; 1014 /* This is guarenteed not to be the last. */ 1015 refcount_release(&sol->so_count); 1016 so->so_qstate = SQ_NONE; 1017 so->so_listen = NULL; 1018 } else 1019 KASSERT(so->so_listen == NULL, 1020 ("%s: so %p not on (in)comp with so_listen", 1021 __func__, so)); 1022 sorele(sol); 1023 KASSERT(so->so_count == 1, 1024 ("%s: so %p count %u", __func__, so, so->so_count)); 1025 so->so_count = 0; 1026 } 1027 if (SOLISTENING(so)) 1028 so->so_error = ECONNABORTED; 1029 SOCK_UNLOCK(so); 1030 1031 if (so->so_dtor != NULL) 1032 so->so_dtor(so); 1033 1034 VNET_SO_ASSERT(so); 1035 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 1036 (*pr->pr_domain->dom_dispose)(so); 1037 if (pr->pr_usrreqs->pru_detach != NULL) 1038 (*pr->pr_usrreqs->pru_detach)(so); 1039 1040 /* 1041 * From this point on, we assume that no other references to this 1042 * socket exist anywhere else in the stack. Therefore, no locks need 1043 * to be acquired or held. 1044 * 1045 * We used to do a lot of socket buffer and socket locking here, as 1046 * well as invoke sorflush() and perform wakeups. The direct call to 1047 * dom_dispose() and sbrelease_internal() are an inlining of what was 1048 * necessary from sorflush(). 1049 * 1050 * Notice that the socket buffer and kqueue state are torn down 1051 * before calling pru_detach. This means that protocols shold not 1052 * assume they can perform socket wakeups, etc, in their detach code. 1053 */ 1054 if (!SOLISTENING(so)) { 1055 sbdestroy(&so->so_snd, so); 1056 sbdestroy(&so->so_rcv, so); 1057 } 1058 seldrain(&so->so_rdsel); 1059 seldrain(&so->so_wrsel); 1060 knlist_destroy(&so->so_rdsel.si_note); 1061 knlist_destroy(&so->so_wrsel.si_note); 1062 sodealloc(so); 1063 } 1064 1065 /* 1066 * Close a socket on last file table reference removal. Initiate disconnect 1067 * if connected. Free socket when disconnect complete. 1068 * 1069 * This function will sorele() the socket. Note that soclose() may be called 1070 * prior to the ref count reaching zero. The actual socket structure will 1071 * not be freed until the ref count reaches zero. 1072 */ 1073 int 1074 soclose(struct socket *so) 1075 { 1076 struct accept_queue lqueue; 1077 bool listening; 1078 int error = 0; 1079 1080 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); 1081 1082 CURVNET_SET(so->so_vnet); 1083 funsetown(&so->so_sigio); 1084 if (so->so_state & SS_ISCONNECTED) { 1085 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 1086 error = sodisconnect(so); 1087 if (error) { 1088 if (error == ENOTCONN) 1089 error = 0; 1090 goto drop; 1091 } 1092 } 1093 if (so->so_options & SO_LINGER) { 1094 if ((so->so_state & SS_ISDISCONNECTING) && 1095 (so->so_state & SS_NBIO)) 1096 goto drop; 1097 while (so->so_state & SS_ISCONNECTED) { 1098 error = tsleep(&so->so_timeo, 1099 PSOCK | PCATCH, "soclos", 1100 so->so_linger * hz); 1101 if (error) 1102 break; 1103 } 1104 } 1105 } 1106 1107 drop: 1108 if (so->so_proto->pr_usrreqs->pru_close != NULL) 1109 (*so->so_proto->pr_usrreqs->pru_close)(so); 1110 1111 SOCK_LOCK(so); 1112 if ((listening = (so->so_options & SO_ACCEPTCONN))) { 1113 struct socket *sp; 1114 1115 TAILQ_INIT(&lqueue); 1116 TAILQ_SWAP(&lqueue, &so->sol_incomp, socket, so_list); 1117 TAILQ_CONCAT(&lqueue, &so->sol_comp, so_list); 1118 1119 so->sol_qlen = so->sol_incqlen = 0; 1120 1121 TAILQ_FOREACH(sp, &lqueue, so_list) { 1122 SOCK_LOCK(sp); 1123 sp->so_qstate = SQ_NONE; 1124 sp->so_listen = NULL; 1125 SOCK_UNLOCK(sp); 1126 /* Guaranteed not to be the last. */ 1127 refcount_release(&so->so_count); 1128 } 1129 } 1130 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 1131 so->so_state |= SS_NOFDREF; 1132 sorele(so); 1133 if (listening) { 1134 struct socket *sp; 1135 1136 TAILQ_FOREACH(sp, &lqueue, so_list) { 1137 SOCK_LOCK(sp); 1138 if (sp->so_count == 0) { 1139 SOCK_UNLOCK(sp); 1140 soabort(sp); 1141 } else 1142 /* sp is now in sofree() */ 1143 SOCK_UNLOCK(sp); 1144 } 1145 } 1146 CURVNET_RESTORE(); 1147 return (error); 1148 } 1149 1150 /* 1151 * soabort() is used to abruptly tear down a connection, such as when a 1152 * resource limit is reached (listen queue depth exceeded), or if a listen 1153 * socket is closed while there are sockets waiting to be accepted. 1154 * 1155 * This interface is tricky, because it is called on an unreferenced socket, 1156 * and must be called only by a thread that has actually removed the socket 1157 * from the listen queue it was on, or races with other threads are risked. 1158 * 1159 * This interface will call into the protocol code, so must not be called 1160 * with any socket locks held. Protocols do call it while holding their own 1161 * recursible protocol mutexes, but this is something that should be subject 1162 * to review in the future. 1163 */ 1164 void 1165 soabort(struct socket *so) 1166 { 1167 1168 /* 1169 * In as much as is possible, assert that no references to this 1170 * socket are held. This is not quite the same as asserting that the 1171 * current thread is responsible for arranging for no references, but 1172 * is as close as we can get for now. 1173 */ 1174 KASSERT(so->so_count == 0, ("soabort: so_count")); 1175 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF")); 1176 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF")); 1177 KASSERT(so->so_qstate == SQ_NONE, ("soabort: !SQ_NONE")); 1178 VNET_SO_ASSERT(so); 1179 1180 if (so->so_proto->pr_usrreqs->pru_abort != NULL) 1181 (*so->so_proto->pr_usrreqs->pru_abort)(so); 1182 SOCK_LOCK(so); 1183 sofree(so); 1184 } 1185 1186 int 1187 soaccept(struct socket *so, struct sockaddr **nam) 1188 { 1189 int error; 1190 1191 SOCK_LOCK(so); 1192 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 1193 so->so_state &= ~SS_NOFDREF; 1194 SOCK_UNLOCK(so); 1195 1196 CURVNET_SET(so->so_vnet); 1197 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 1198 CURVNET_RESTORE(); 1199 return (error); 1200 } 1201 1202 int 1203 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 1204 { 1205 1206 return (soconnectat(AT_FDCWD, so, nam, td)); 1207 } 1208 1209 int 1210 soconnectat(int fd, struct socket *so, struct sockaddr *nam, struct thread *td) 1211 { 1212 int error; 1213 1214 if (so->so_options & SO_ACCEPTCONN) 1215 return (EOPNOTSUPP); 1216 1217 CURVNET_SET(so->so_vnet); 1218 /* 1219 * If protocol is connection-based, can only connect once. 1220 * Otherwise, if connected, try to disconnect first. This allows 1221 * user to disconnect by connecting to, e.g., a null address. 1222 */ 1223 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 1224 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 1225 (error = sodisconnect(so)))) { 1226 error = EISCONN; 1227 } else { 1228 /* 1229 * Prevent accumulated error from previous connection from 1230 * biting us. 1231 */ 1232 so->so_error = 0; 1233 if (fd == AT_FDCWD) { 1234 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, 1235 nam, td); 1236 } else { 1237 error = (*so->so_proto->pr_usrreqs->pru_connectat)(fd, 1238 so, nam, td); 1239 } 1240 } 1241 CURVNET_RESTORE(); 1242 1243 return (error); 1244 } 1245 1246 int 1247 soconnect2(struct socket *so1, struct socket *so2) 1248 { 1249 int error; 1250 1251 CURVNET_SET(so1->so_vnet); 1252 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2); 1253 CURVNET_RESTORE(); 1254 return (error); 1255 } 1256 1257 int 1258 sodisconnect(struct socket *so) 1259 { 1260 int error; 1261 1262 if ((so->so_state & SS_ISCONNECTED) == 0) 1263 return (ENOTCONN); 1264 if (so->so_state & SS_ISDISCONNECTING) 1265 return (EALREADY); 1266 VNET_SO_ASSERT(so); 1267 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 1268 return (error); 1269 } 1270 1271 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 1272 1273 int 1274 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, 1275 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1276 { 1277 long space; 1278 ssize_t resid; 1279 int clen = 0, error, dontroute; 1280 1281 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM")); 1282 KASSERT(so->so_proto->pr_flags & PR_ATOMIC, 1283 ("sosend_dgram: !PR_ATOMIC")); 1284 1285 if (uio != NULL) 1286 resid = uio->uio_resid; 1287 else 1288 resid = top->m_pkthdr.len; 1289 /* 1290 * In theory resid should be unsigned. However, space must be 1291 * signed, as it might be less than 0 if we over-committed, and we 1292 * must use a signed comparison of space and resid. On the other 1293 * hand, a negative resid causes us to loop sending 0-length 1294 * segments to the protocol. 1295 */ 1296 if (resid < 0) { 1297 error = EINVAL; 1298 goto out; 1299 } 1300 1301 dontroute = 1302 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; 1303 if (td != NULL) 1304 td->td_ru.ru_msgsnd++; 1305 if (control != NULL) 1306 clen = control->m_len; 1307 1308 SOCKBUF_LOCK(&so->so_snd); 1309 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1310 SOCKBUF_UNLOCK(&so->so_snd); 1311 error = EPIPE; 1312 goto out; 1313 } 1314 if (so->so_error) { 1315 error = so->so_error; 1316 so->so_error = 0; 1317 SOCKBUF_UNLOCK(&so->so_snd); 1318 goto out; 1319 } 1320 if ((so->so_state & SS_ISCONNECTED) == 0) { 1321 /* 1322 * `sendto' and `sendmsg' is allowed on a connection-based 1323 * socket if it supports implied connect. Return ENOTCONN if 1324 * not connected and no address is supplied. 1325 */ 1326 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1327 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1328 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1329 !(resid == 0 && clen != 0)) { 1330 SOCKBUF_UNLOCK(&so->so_snd); 1331 error = ENOTCONN; 1332 goto out; 1333 } 1334 } else if (addr == NULL) { 1335 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1336 error = ENOTCONN; 1337 else 1338 error = EDESTADDRREQ; 1339 SOCKBUF_UNLOCK(&so->so_snd); 1340 goto out; 1341 } 1342 } 1343 1344 /* 1345 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a 1346 * problem and need fixing. 1347 */ 1348 space = sbspace(&so->so_snd); 1349 if (flags & MSG_OOB) 1350 space += 1024; 1351 space -= clen; 1352 SOCKBUF_UNLOCK(&so->so_snd); 1353 if (resid > space) { 1354 error = EMSGSIZE; 1355 goto out; 1356 } 1357 if (uio == NULL) { 1358 resid = 0; 1359 if (flags & MSG_EOR) 1360 top->m_flags |= M_EOR; 1361 } else { 1362 /* 1363 * Copy the data from userland into a mbuf chain. 1364 * If no data is to be copied in, a single empty mbuf 1365 * is returned. 1366 */ 1367 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr, 1368 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0))); 1369 if (top == NULL) { 1370 error = EFAULT; /* only possible error */ 1371 goto out; 1372 } 1373 space -= resid - uio->uio_resid; 1374 resid = uio->uio_resid; 1375 } 1376 KASSERT(resid == 0, ("sosend_dgram: resid != 0")); 1377 /* 1378 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock 1379 * than with. 1380 */ 1381 if (dontroute) { 1382 SOCK_LOCK(so); 1383 so->so_options |= SO_DONTROUTE; 1384 SOCK_UNLOCK(so); 1385 } 1386 /* 1387 * XXX all the SBS_CANTSENDMORE checks previously done could be out 1388 * of date. We could have received a reset packet in an interrupt or 1389 * maybe we slept while doing page faults in uiomove() etc. We could 1390 * probably recheck again inside the locking protection here, but 1391 * there are probably other places that this also happens. We must 1392 * rethink this. 1393 */ 1394 VNET_SO_ASSERT(so); 1395 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1396 (flags & MSG_OOB) ? PRUS_OOB : 1397 /* 1398 * If the user set MSG_EOF, the protocol understands this flag and 1399 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND. 1400 */ 1401 ((flags & MSG_EOF) && 1402 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1403 (resid <= 0)) ? 1404 PRUS_EOF : 1405 /* If there is more to send set PRUS_MORETOCOME */ 1406 (flags & MSG_MORETOCOME) || 1407 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1408 top, addr, control, td); 1409 if (dontroute) { 1410 SOCK_LOCK(so); 1411 so->so_options &= ~SO_DONTROUTE; 1412 SOCK_UNLOCK(so); 1413 } 1414 clen = 0; 1415 control = NULL; 1416 top = NULL; 1417 out: 1418 if (top != NULL) 1419 m_freem(top); 1420 if (control != NULL) 1421 m_freem(control); 1422 return (error); 1423 } 1424 1425 /* 1426 * Send on a socket. If send must go all at once and message is larger than 1427 * send buffering, then hard error. Lock against other senders. If must go 1428 * all at once and not enough room now, then inform user that this would 1429 * block and do nothing. Otherwise, if nonblocking, send as much as 1430 * possible. The data to be sent is described by "uio" if nonzero, otherwise 1431 * by the mbuf chain "top" (which must be null if uio is not). Data provided 1432 * in mbuf chain must be small enough to send all at once. 1433 * 1434 * Returns nonzero on error, timeout or signal; callers must check for short 1435 * counts if EINTR/ERESTART are returned. Data and control buffers are freed 1436 * on return. 1437 */ 1438 int 1439 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, 1440 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1441 { 1442 long space; 1443 ssize_t resid; 1444 int clen = 0, error, dontroute; 1445 int atomic = sosendallatonce(so) || top; 1446 1447 if (uio != NULL) 1448 resid = uio->uio_resid; 1449 else 1450 resid = top->m_pkthdr.len; 1451 /* 1452 * In theory resid should be unsigned. However, space must be 1453 * signed, as it might be less than 0 if we over-committed, and we 1454 * must use a signed comparison of space and resid. On the other 1455 * hand, a negative resid causes us to loop sending 0-length 1456 * segments to the protocol. 1457 * 1458 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 1459 * type sockets since that's an error. 1460 */ 1461 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 1462 error = EINVAL; 1463 goto out; 1464 } 1465 1466 dontroute = 1467 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 1468 (so->so_proto->pr_flags & PR_ATOMIC); 1469 if (td != NULL) 1470 td->td_ru.ru_msgsnd++; 1471 if (control != NULL) 1472 clen = control->m_len; 1473 1474 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1475 if (error) 1476 goto out; 1477 1478 restart: 1479 do { 1480 SOCKBUF_LOCK(&so->so_snd); 1481 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1482 SOCKBUF_UNLOCK(&so->so_snd); 1483 error = EPIPE; 1484 goto release; 1485 } 1486 if (so->so_error) { 1487 error = so->so_error; 1488 so->so_error = 0; 1489 SOCKBUF_UNLOCK(&so->so_snd); 1490 goto release; 1491 } 1492 if ((so->so_state & SS_ISCONNECTED) == 0) { 1493 /* 1494 * `sendto' and `sendmsg' is allowed on a connection- 1495 * based socket if it supports implied connect. 1496 * Return ENOTCONN if not connected and no address is 1497 * supplied. 1498 */ 1499 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1500 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1501 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1502 !(resid == 0 && clen != 0)) { 1503 SOCKBUF_UNLOCK(&so->so_snd); 1504 error = ENOTCONN; 1505 goto release; 1506 } 1507 } else if (addr == NULL) { 1508 SOCKBUF_UNLOCK(&so->so_snd); 1509 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1510 error = ENOTCONN; 1511 else 1512 error = EDESTADDRREQ; 1513 goto release; 1514 } 1515 } 1516 space = sbspace(&so->so_snd); 1517 if (flags & MSG_OOB) 1518 space += 1024; 1519 if ((atomic && resid > so->so_snd.sb_hiwat) || 1520 clen > so->so_snd.sb_hiwat) { 1521 SOCKBUF_UNLOCK(&so->so_snd); 1522 error = EMSGSIZE; 1523 goto release; 1524 } 1525 if (space < resid + clen && 1526 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 1527 if ((so->so_state & SS_NBIO) || 1528 (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) { 1529 SOCKBUF_UNLOCK(&so->so_snd); 1530 error = EWOULDBLOCK; 1531 goto release; 1532 } 1533 error = sbwait(&so->so_snd); 1534 SOCKBUF_UNLOCK(&so->so_snd); 1535 if (error) 1536 goto release; 1537 goto restart; 1538 } 1539 SOCKBUF_UNLOCK(&so->so_snd); 1540 space -= clen; 1541 do { 1542 if (uio == NULL) { 1543 resid = 0; 1544 if (flags & MSG_EOR) 1545 top->m_flags |= M_EOR; 1546 } else { 1547 /* 1548 * Copy the data from userland into a mbuf 1549 * chain. If resid is 0, which can happen 1550 * only if we have control to send, then 1551 * a single empty mbuf is returned. This 1552 * is a workaround to prevent protocol send 1553 * methods to panic. 1554 */ 1555 top = m_uiotombuf(uio, M_WAITOK, space, 1556 (atomic ? max_hdr : 0), 1557 (atomic ? M_PKTHDR : 0) | 1558 ((flags & MSG_EOR) ? M_EOR : 0)); 1559 if (top == NULL) { 1560 error = EFAULT; /* only possible error */ 1561 goto release; 1562 } 1563 space -= resid - uio->uio_resid; 1564 resid = uio->uio_resid; 1565 } 1566 if (dontroute) { 1567 SOCK_LOCK(so); 1568 so->so_options |= SO_DONTROUTE; 1569 SOCK_UNLOCK(so); 1570 } 1571 /* 1572 * XXX all the SBS_CANTSENDMORE checks previously 1573 * done could be out of date. We could have received 1574 * a reset packet in an interrupt or maybe we slept 1575 * while doing page faults in uiomove() etc. We 1576 * could probably recheck again inside the locking 1577 * protection here, but there are probably other 1578 * places that this also happens. We must rethink 1579 * this. 1580 */ 1581 VNET_SO_ASSERT(so); 1582 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1583 (flags & MSG_OOB) ? PRUS_OOB : 1584 /* 1585 * If the user set MSG_EOF, the protocol understands 1586 * this flag and nothing left to send then use 1587 * PRU_SEND_EOF instead of PRU_SEND. 1588 */ 1589 ((flags & MSG_EOF) && 1590 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1591 (resid <= 0)) ? 1592 PRUS_EOF : 1593 /* If there is more to send set PRUS_MORETOCOME. */ 1594 (flags & MSG_MORETOCOME) || 1595 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1596 top, addr, control, td); 1597 if (dontroute) { 1598 SOCK_LOCK(so); 1599 so->so_options &= ~SO_DONTROUTE; 1600 SOCK_UNLOCK(so); 1601 } 1602 clen = 0; 1603 control = NULL; 1604 top = NULL; 1605 if (error) 1606 goto release; 1607 } while (resid && space > 0); 1608 } while (resid); 1609 1610 release: 1611 sbunlock(&so->so_snd); 1612 out: 1613 if (top != NULL) 1614 m_freem(top); 1615 if (control != NULL) 1616 m_freem(control); 1617 return (error); 1618 } 1619 1620 int 1621 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 1622 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1623 { 1624 int error; 1625 1626 CURVNET_SET(so->so_vnet); 1627 if (!SOLISTENING(so)) 1628 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, 1629 top, control, flags, td); 1630 else { 1631 m_freem(top); 1632 m_freem(control); 1633 error = ENOTCONN; 1634 } 1635 CURVNET_RESTORE(); 1636 return (error); 1637 } 1638 1639 /* 1640 * The part of soreceive() that implements reading non-inline out-of-band 1641 * data from a socket. For more complete comments, see soreceive(), from 1642 * which this code originated. 1643 * 1644 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is 1645 * unable to return an mbuf chain to the caller. 1646 */ 1647 static int 1648 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) 1649 { 1650 struct protosw *pr = so->so_proto; 1651 struct mbuf *m; 1652 int error; 1653 1654 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 1655 VNET_SO_ASSERT(so); 1656 1657 m = m_get(M_WAITOK, MT_DATA); 1658 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 1659 if (error) 1660 goto bad; 1661 do { 1662 error = uiomove(mtod(m, void *), 1663 (int) min(uio->uio_resid, m->m_len), uio); 1664 m = m_free(m); 1665 } while (uio->uio_resid && error == 0 && m); 1666 bad: 1667 if (m != NULL) 1668 m_freem(m); 1669 return (error); 1670 } 1671 1672 /* 1673 * Following replacement or removal of the first mbuf on the first mbuf chain 1674 * of a socket buffer, push necessary state changes back into the socket 1675 * buffer so that other consumers see the values consistently. 'nextrecord' 1676 * is the callers locally stored value of the original value of 1677 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 1678 * NOTE: 'nextrecord' may be NULL. 1679 */ 1680 static __inline void 1681 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 1682 { 1683 1684 SOCKBUF_LOCK_ASSERT(sb); 1685 /* 1686 * First, update for the new value of nextrecord. If necessary, make 1687 * it the first record. 1688 */ 1689 if (sb->sb_mb != NULL) 1690 sb->sb_mb->m_nextpkt = nextrecord; 1691 else 1692 sb->sb_mb = nextrecord; 1693 1694 /* 1695 * Now update any dependent socket buffer fields to reflect the new 1696 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 1697 * addition of a second clause that takes care of the case where 1698 * sb_mb has been updated, but remains the last record. 1699 */ 1700 if (sb->sb_mb == NULL) { 1701 sb->sb_mbtail = NULL; 1702 sb->sb_lastrecord = NULL; 1703 } else if (sb->sb_mb->m_nextpkt == NULL) 1704 sb->sb_lastrecord = sb->sb_mb; 1705 } 1706 1707 /* 1708 * Implement receive operations on a socket. We depend on the way that 1709 * records are added to the sockbuf by sbappend. In particular, each record 1710 * (mbufs linked through m_next) must begin with an address if the protocol 1711 * so specifies, followed by an optional mbuf or mbufs containing ancillary 1712 * data, and then zero or more mbufs of data. In order to allow parallelism 1713 * between network receive and copying to user space, as well as avoid 1714 * sleeping with a mutex held, we release the socket buffer mutex during the 1715 * user space copy. Although the sockbuf is locked, new data may still be 1716 * appended, and thus we must maintain consistency of the sockbuf during that 1717 * time. 1718 * 1719 * The caller may receive the data as a single mbuf chain by supplying an 1720 * mbuf **mp0 for use in returning the chain. The uio is then used only for 1721 * the count in uio_resid. 1722 */ 1723 int 1724 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio, 1725 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1726 { 1727 struct mbuf *m, **mp; 1728 int flags, error, offset; 1729 ssize_t len; 1730 struct protosw *pr = so->so_proto; 1731 struct mbuf *nextrecord; 1732 int moff, type = 0; 1733 ssize_t orig_resid = uio->uio_resid; 1734 1735 mp = mp0; 1736 if (psa != NULL) 1737 *psa = NULL; 1738 if (controlp != NULL) 1739 *controlp = NULL; 1740 if (flagsp != NULL) 1741 flags = *flagsp &~ MSG_EOR; 1742 else 1743 flags = 0; 1744 if (flags & MSG_OOB) 1745 return (soreceive_rcvoob(so, uio, flags)); 1746 if (mp != NULL) 1747 *mp = NULL; 1748 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) 1749 && uio->uio_resid) { 1750 VNET_SO_ASSERT(so); 1751 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 1752 } 1753 1754 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 1755 if (error) 1756 return (error); 1757 1758 restart: 1759 SOCKBUF_LOCK(&so->so_rcv); 1760 m = so->so_rcv.sb_mb; 1761 /* 1762 * If we have less data than requested, block awaiting more (subject 1763 * to any timeout) if: 1764 * 1. the current count is less than the low water mark, or 1765 * 2. MSG_DONTWAIT is not set 1766 */ 1767 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1768 sbavail(&so->so_rcv) < uio->uio_resid) && 1769 sbavail(&so->so_rcv) < so->so_rcv.sb_lowat && 1770 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1771 KASSERT(m != NULL || !sbavail(&so->so_rcv), 1772 ("receive: m == %p sbavail == %u", 1773 m, sbavail(&so->so_rcv))); 1774 if (so->so_error) { 1775 if (m != NULL) 1776 goto dontblock; 1777 error = so->so_error; 1778 if ((flags & MSG_PEEK) == 0) 1779 so->so_error = 0; 1780 SOCKBUF_UNLOCK(&so->so_rcv); 1781 goto release; 1782 } 1783 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1784 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1785 if (m == NULL) { 1786 SOCKBUF_UNLOCK(&so->so_rcv); 1787 goto release; 1788 } else 1789 goto dontblock; 1790 } 1791 for (; m != NULL; m = m->m_next) 1792 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1793 m = so->so_rcv.sb_mb; 1794 goto dontblock; 1795 } 1796 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1797 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1798 SOCKBUF_UNLOCK(&so->so_rcv); 1799 error = ENOTCONN; 1800 goto release; 1801 } 1802 if (uio->uio_resid == 0) { 1803 SOCKBUF_UNLOCK(&so->so_rcv); 1804 goto release; 1805 } 1806 if ((so->so_state & SS_NBIO) || 1807 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1808 SOCKBUF_UNLOCK(&so->so_rcv); 1809 error = EWOULDBLOCK; 1810 goto release; 1811 } 1812 SBLASTRECORDCHK(&so->so_rcv); 1813 SBLASTMBUFCHK(&so->so_rcv); 1814 error = sbwait(&so->so_rcv); 1815 SOCKBUF_UNLOCK(&so->so_rcv); 1816 if (error) 1817 goto release; 1818 goto restart; 1819 } 1820 dontblock: 1821 /* 1822 * From this point onward, we maintain 'nextrecord' as a cache of the 1823 * pointer to the next record in the socket buffer. We must keep the 1824 * various socket buffer pointers and local stack versions of the 1825 * pointers in sync, pushing out modifications before dropping the 1826 * socket buffer mutex, and re-reading them when picking it up. 1827 * 1828 * Otherwise, we will race with the network stack appending new data 1829 * or records onto the socket buffer by using inconsistent/stale 1830 * versions of the field, possibly resulting in socket buffer 1831 * corruption. 1832 * 1833 * By holding the high-level sblock(), we prevent simultaneous 1834 * readers from pulling off the front of the socket buffer. 1835 */ 1836 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1837 if (uio->uio_td) 1838 uio->uio_td->td_ru.ru_msgrcv++; 1839 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1840 SBLASTRECORDCHK(&so->so_rcv); 1841 SBLASTMBUFCHK(&so->so_rcv); 1842 nextrecord = m->m_nextpkt; 1843 if (pr->pr_flags & PR_ADDR) { 1844 KASSERT(m->m_type == MT_SONAME, 1845 ("m->m_type == %d", m->m_type)); 1846 orig_resid = 0; 1847 if (psa != NULL) 1848 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1849 M_NOWAIT); 1850 if (flags & MSG_PEEK) { 1851 m = m->m_next; 1852 } else { 1853 sbfree(&so->so_rcv, m); 1854 so->so_rcv.sb_mb = m_free(m); 1855 m = so->so_rcv.sb_mb; 1856 sockbuf_pushsync(&so->so_rcv, nextrecord); 1857 } 1858 } 1859 1860 /* 1861 * Process one or more MT_CONTROL mbufs present before any data mbufs 1862 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1863 * just copy the data; if !MSG_PEEK, we call into the protocol to 1864 * perform externalization (or freeing if controlp == NULL). 1865 */ 1866 if (m != NULL && m->m_type == MT_CONTROL) { 1867 struct mbuf *cm = NULL, *cmn; 1868 struct mbuf **cme = &cm; 1869 1870 do { 1871 if (flags & MSG_PEEK) { 1872 if (controlp != NULL) { 1873 *controlp = m_copym(m, 0, m->m_len, 1874 M_NOWAIT); 1875 controlp = &(*controlp)->m_next; 1876 } 1877 m = m->m_next; 1878 } else { 1879 sbfree(&so->so_rcv, m); 1880 so->so_rcv.sb_mb = m->m_next; 1881 m->m_next = NULL; 1882 *cme = m; 1883 cme = &(*cme)->m_next; 1884 m = so->so_rcv.sb_mb; 1885 } 1886 } while (m != NULL && m->m_type == MT_CONTROL); 1887 if ((flags & MSG_PEEK) == 0) 1888 sockbuf_pushsync(&so->so_rcv, nextrecord); 1889 while (cm != NULL) { 1890 cmn = cm->m_next; 1891 cm->m_next = NULL; 1892 if (pr->pr_domain->dom_externalize != NULL) { 1893 SOCKBUF_UNLOCK(&so->so_rcv); 1894 VNET_SO_ASSERT(so); 1895 error = (*pr->pr_domain->dom_externalize) 1896 (cm, controlp, flags); 1897 SOCKBUF_LOCK(&so->so_rcv); 1898 } else if (controlp != NULL) 1899 *controlp = cm; 1900 else 1901 m_freem(cm); 1902 if (controlp != NULL) { 1903 orig_resid = 0; 1904 while (*controlp != NULL) 1905 controlp = &(*controlp)->m_next; 1906 } 1907 cm = cmn; 1908 } 1909 if (m != NULL) 1910 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1911 else 1912 nextrecord = so->so_rcv.sb_mb; 1913 orig_resid = 0; 1914 } 1915 if (m != NULL) { 1916 if ((flags & MSG_PEEK) == 0) { 1917 KASSERT(m->m_nextpkt == nextrecord, 1918 ("soreceive: post-control, nextrecord !sync")); 1919 if (nextrecord == NULL) { 1920 KASSERT(so->so_rcv.sb_mb == m, 1921 ("soreceive: post-control, sb_mb!=m")); 1922 KASSERT(so->so_rcv.sb_lastrecord == m, 1923 ("soreceive: post-control, lastrecord!=m")); 1924 } 1925 } 1926 type = m->m_type; 1927 if (type == MT_OOBDATA) 1928 flags |= MSG_OOB; 1929 } else { 1930 if ((flags & MSG_PEEK) == 0) { 1931 KASSERT(so->so_rcv.sb_mb == nextrecord, 1932 ("soreceive: sb_mb != nextrecord")); 1933 if (so->so_rcv.sb_mb == NULL) { 1934 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1935 ("soreceive: sb_lastercord != NULL")); 1936 } 1937 } 1938 } 1939 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1940 SBLASTRECORDCHK(&so->so_rcv); 1941 SBLASTMBUFCHK(&so->so_rcv); 1942 1943 /* 1944 * Now continue to read any data mbufs off of the head of the socket 1945 * buffer until the read request is satisfied. Note that 'type' is 1946 * used to store the type of any mbuf reads that have happened so far 1947 * such that soreceive() can stop reading if the type changes, which 1948 * causes soreceive() to return only one of regular data and inline 1949 * out-of-band data in a single socket receive operation. 1950 */ 1951 moff = 0; 1952 offset = 0; 1953 while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0 1954 && error == 0) { 1955 /* 1956 * If the type of mbuf has changed since the last mbuf 1957 * examined ('type'), end the receive operation. 1958 */ 1959 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1960 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) { 1961 if (type != m->m_type) 1962 break; 1963 } else if (type == MT_OOBDATA) 1964 break; 1965 else 1966 KASSERT(m->m_type == MT_DATA, 1967 ("m->m_type == %d", m->m_type)); 1968 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1969 len = uio->uio_resid; 1970 if (so->so_oobmark && len > so->so_oobmark - offset) 1971 len = so->so_oobmark - offset; 1972 if (len > m->m_len - moff) 1973 len = m->m_len - moff; 1974 /* 1975 * If mp is set, just pass back the mbufs. Otherwise copy 1976 * them out via the uio, then free. Sockbuf must be 1977 * consistent here (points to current mbuf, it points to next 1978 * record) when we drop priority; we must note any additions 1979 * to the sockbuf when we block interrupts again. 1980 */ 1981 if (mp == NULL) { 1982 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1983 SBLASTRECORDCHK(&so->so_rcv); 1984 SBLASTMBUFCHK(&so->so_rcv); 1985 SOCKBUF_UNLOCK(&so->so_rcv); 1986 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1987 SOCKBUF_LOCK(&so->so_rcv); 1988 if (error) { 1989 /* 1990 * The MT_SONAME mbuf has already been removed 1991 * from the record, so it is necessary to 1992 * remove the data mbufs, if any, to preserve 1993 * the invariant in the case of PR_ADDR that 1994 * requires MT_SONAME mbufs at the head of 1995 * each record. 1996 */ 1997 if (pr->pr_flags & PR_ATOMIC && 1998 ((flags & MSG_PEEK) == 0)) 1999 (void)sbdroprecord_locked(&so->so_rcv); 2000 SOCKBUF_UNLOCK(&so->so_rcv); 2001 goto release; 2002 } 2003 } else 2004 uio->uio_resid -= len; 2005 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2006 if (len == m->m_len - moff) { 2007 if (m->m_flags & M_EOR) 2008 flags |= MSG_EOR; 2009 if (flags & MSG_PEEK) { 2010 m = m->m_next; 2011 moff = 0; 2012 } else { 2013 nextrecord = m->m_nextpkt; 2014 sbfree(&so->so_rcv, m); 2015 if (mp != NULL) { 2016 m->m_nextpkt = NULL; 2017 *mp = m; 2018 mp = &m->m_next; 2019 so->so_rcv.sb_mb = m = m->m_next; 2020 *mp = NULL; 2021 } else { 2022 so->so_rcv.sb_mb = m_free(m); 2023 m = so->so_rcv.sb_mb; 2024 } 2025 sockbuf_pushsync(&so->so_rcv, nextrecord); 2026 SBLASTRECORDCHK(&so->so_rcv); 2027 SBLASTMBUFCHK(&so->so_rcv); 2028 } 2029 } else { 2030 if (flags & MSG_PEEK) 2031 moff += len; 2032 else { 2033 if (mp != NULL) { 2034 if (flags & MSG_DONTWAIT) { 2035 *mp = m_copym(m, 0, len, 2036 M_NOWAIT); 2037 if (*mp == NULL) { 2038 /* 2039 * m_copym() couldn't 2040 * allocate an mbuf. 2041 * Adjust uio_resid back 2042 * (it was adjusted 2043 * down by len bytes, 2044 * which we didn't end 2045 * up "copying" over). 2046 */ 2047 uio->uio_resid += len; 2048 break; 2049 } 2050 } else { 2051 SOCKBUF_UNLOCK(&so->so_rcv); 2052 *mp = m_copym(m, 0, len, 2053 M_WAITOK); 2054 SOCKBUF_LOCK(&so->so_rcv); 2055 } 2056 } 2057 sbcut_locked(&so->so_rcv, len); 2058 } 2059 } 2060 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2061 if (so->so_oobmark) { 2062 if ((flags & MSG_PEEK) == 0) { 2063 so->so_oobmark -= len; 2064 if (so->so_oobmark == 0) { 2065 so->so_rcv.sb_state |= SBS_RCVATMARK; 2066 break; 2067 } 2068 } else { 2069 offset += len; 2070 if (offset == so->so_oobmark) 2071 break; 2072 } 2073 } 2074 if (flags & MSG_EOR) 2075 break; 2076 /* 2077 * If the MSG_WAITALL flag is set (for non-atomic socket), we 2078 * must not quit until "uio->uio_resid == 0" or an error 2079 * termination. If a signal/timeout occurs, return with a 2080 * short count but without error. Keep sockbuf locked 2081 * against other readers. 2082 */ 2083 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 2084 !sosendallatonce(so) && nextrecord == NULL) { 2085 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2086 if (so->so_error || 2087 so->so_rcv.sb_state & SBS_CANTRCVMORE) 2088 break; 2089 /* 2090 * Notify the protocol that some data has been 2091 * drained before blocking. 2092 */ 2093 if (pr->pr_flags & PR_WANTRCVD) { 2094 SOCKBUF_UNLOCK(&so->so_rcv); 2095 VNET_SO_ASSERT(so); 2096 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 2097 SOCKBUF_LOCK(&so->so_rcv); 2098 } 2099 SBLASTRECORDCHK(&so->so_rcv); 2100 SBLASTMBUFCHK(&so->so_rcv); 2101 /* 2102 * We could receive some data while was notifying 2103 * the protocol. Skip blocking in this case. 2104 */ 2105 if (so->so_rcv.sb_mb == NULL) { 2106 error = sbwait(&so->so_rcv); 2107 if (error) { 2108 SOCKBUF_UNLOCK(&so->so_rcv); 2109 goto release; 2110 } 2111 } 2112 m = so->so_rcv.sb_mb; 2113 if (m != NULL) 2114 nextrecord = m->m_nextpkt; 2115 } 2116 } 2117 2118 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2119 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 2120 flags |= MSG_TRUNC; 2121 if ((flags & MSG_PEEK) == 0) 2122 (void) sbdroprecord_locked(&so->so_rcv); 2123 } 2124 if ((flags & MSG_PEEK) == 0) { 2125 if (m == NULL) { 2126 /* 2127 * First part is an inline SB_EMPTY_FIXUP(). Second 2128 * part makes sure sb_lastrecord is up-to-date if 2129 * there is still data in the socket buffer. 2130 */ 2131 so->so_rcv.sb_mb = nextrecord; 2132 if (so->so_rcv.sb_mb == NULL) { 2133 so->so_rcv.sb_mbtail = NULL; 2134 so->so_rcv.sb_lastrecord = NULL; 2135 } else if (nextrecord->m_nextpkt == NULL) 2136 so->so_rcv.sb_lastrecord = nextrecord; 2137 } 2138 SBLASTRECORDCHK(&so->so_rcv); 2139 SBLASTMBUFCHK(&so->so_rcv); 2140 /* 2141 * If soreceive() is being done from the socket callback, 2142 * then don't need to generate ACK to peer to update window, 2143 * since ACK will be generated on return to TCP. 2144 */ 2145 if (!(flags & MSG_SOCALLBCK) && 2146 (pr->pr_flags & PR_WANTRCVD)) { 2147 SOCKBUF_UNLOCK(&so->so_rcv); 2148 VNET_SO_ASSERT(so); 2149 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 2150 SOCKBUF_LOCK(&so->so_rcv); 2151 } 2152 } 2153 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2154 if (orig_resid == uio->uio_resid && orig_resid && 2155 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 2156 SOCKBUF_UNLOCK(&so->so_rcv); 2157 goto restart; 2158 } 2159 SOCKBUF_UNLOCK(&so->so_rcv); 2160 2161 if (flagsp != NULL) 2162 *flagsp |= flags; 2163 release: 2164 sbunlock(&so->so_rcv); 2165 return (error); 2166 } 2167 2168 /* 2169 * Optimized version of soreceive() for stream (TCP) sockets. 2170 */ 2171 int 2172 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio, 2173 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2174 { 2175 int len = 0, error = 0, flags, oresid; 2176 struct sockbuf *sb; 2177 struct mbuf *m, *n = NULL; 2178 2179 /* We only do stream sockets. */ 2180 if (so->so_type != SOCK_STREAM) 2181 return (EINVAL); 2182 if (psa != NULL) 2183 *psa = NULL; 2184 if (flagsp != NULL) 2185 flags = *flagsp &~ MSG_EOR; 2186 else 2187 flags = 0; 2188 if (controlp != NULL) 2189 *controlp = NULL; 2190 if (flags & MSG_OOB) 2191 return (soreceive_rcvoob(so, uio, flags)); 2192 if (mp0 != NULL) 2193 *mp0 = NULL; 2194 2195 sb = &so->so_rcv; 2196 2197 /* Prevent other readers from entering the socket. */ 2198 error = sblock(sb, SBLOCKWAIT(flags)); 2199 if (error) 2200 goto out; 2201 SOCKBUF_LOCK(sb); 2202 2203 /* Easy one, no space to copyout anything. */ 2204 if (uio->uio_resid == 0) { 2205 error = EINVAL; 2206 goto out; 2207 } 2208 oresid = uio->uio_resid; 2209 2210 /* We will never ever get anything unless we are or were connected. */ 2211 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 2212 error = ENOTCONN; 2213 goto out; 2214 } 2215 2216 restart: 2217 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2218 2219 /* Abort if socket has reported problems. */ 2220 if (so->so_error) { 2221 if (sbavail(sb) > 0) 2222 goto deliver; 2223 if (oresid > uio->uio_resid) 2224 goto out; 2225 error = so->so_error; 2226 if (!(flags & MSG_PEEK)) 2227 so->so_error = 0; 2228 goto out; 2229 } 2230 2231 /* Door is closed. Deliver what is left, if any. */ 2232 if (sb->sb_state & SBS_CANTRCVMORE) { 2233 if (sbavail(sb) > 0) 2234 goto deliver; 2235 else 2236 goto out; 2237 } 2238 2239 /* Socket buffer is empty and we shall not block. */ 2240 if (sbavail(sb) == 0 && 2241 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) { 2242 error = EAGAIN; 2243 goto out; 2244 } 2245 2246 /* Socket buffer got some data that we shall deliver now. */ 2247 if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) && 2248 ((so->so_state & SS_NBIO) || 2249 (flags & (MSG_DONTWAIT|MSG_NBIO)) || 2250 sbavail(sb) >= sb->sb_lowat || 2251 sbavail(sb) >= uio->uio_resid || 2252 sbavail(sb) >= sb->sb_hiwat) ) { 2253 goto deliver; 2254 } 2255 2256 /* On MSG_WAITALL we must wait until all data or error arrives. */ 2257 if ((flags & MSG_WAITALL) && 2258 (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat)) 2259 goto deliver; 2260 2261 /* 2262 * Wait and block until (more) data comes in. 2263 * NB: Drops the sockbuf lock during wait. 2264 */ 2265 error = sbwait(sb); 2266 if (error) 2267 goto out; 2268 goto restart; 2269 2270 deliver: 2271 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2272 KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__)); 2273 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__)); 2274 2275 /* Statistics. */ 2276 if (uio->uio_td) 2277 uio->uio_td->td_ru.ru_msgrcv++; 2278 2279 /* Fill uio until full or current end of socket buffer is reached. */ 2280 len = min(uio->uio_resid, sbavail(sb)); 2281 if (mp0 != NULL) { 2282 /* Dequeue as many mbufs as possible. */ 2283 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) { 2284 if (*mp0 == NULL) 2285 *mp0 = sb->sb_mb; 2286 else 2287 m_cat(*mp0, sb->sb_mb); 2288 for (m = sb->sb_mb; 2289 m != NULL && m->m_len <= len; 2290 m = m->m_next) { 2291 KASSERT(!(m->m_flags & M_NOTAVAIL), 2292 ("%s: m %p not available", __func__, m)); 2293 len -= m->m_len; 2294 uio->uio_resid -= m->m_len; 2295 sbfree(sb, m); 2296 n = m; 2297 } 2298 n->m_next = NULL; 2299 sb->sb_mb = m; 2300 sb->sb_lastrecord = sb->sb_mb; 2301 if (sb->sb_mb == NULL) 2302 SB_EMPTY_FIXUP(sb); 2303 } 2304 /* Copy the remainder. */ 2305 if (len > 0) { 2306 KASSERT(sb->sb_mb != NULL, 2307 ("%s: len > 0 && sb->sb_mb empty", __func__)); 2308 2309 m = m_copym(sb->sb_mb, 0, len, M_NOWAIT); 2310 if (m == NULL) 2311 len = 0; /* Don't flush data from sockbuf. */ 2312 else 2313 uio->uio_resid -= len; 2314 if (*mp0 != NULL) 2315 m_cat(*mp0, m); 2316 else 2317 *mp0 = m; 2318 if (*mp0 == NULL) { 2319 error = ENOBUFS; 2320 goto out; 2321 } 2322 } 2323 } else { 2324 /* NB: Must unlock socket buffer as uiomove may sleep. */ 2325 SOCKBUF_UNLOCK(sb); 2326 error = m_mbuftouio(uio, sb->sb_mb, len); 2327 SOCKBUF_LOCK(sb); 2328 if (error) 2329 goto out; 2330 } 2331 SBLASTRECORDCHK(sb); 2332 SBLASTMBUFCHK(sb); 2333 2334 /* 2335 * Remove the delivered data from the socket buffer unless we 2336 * were only peeking. 2337 */ 2338 if (!(flags & MSG_PEEK)) { 2339 if (len > 0) 2340 sbdrop_locked(sb, len); 2341 2342 /* Notify protocol that we drained some data. */ 2343 if ((so->so_proto->pr_flags & PR_WANTRCVD) && 2344 (((flags & MSG_WAITALL) && uio->uio_resid > 0) || 2345 !(flags & MSG_SOCALLBCK))) { 2346 SOCKBUF_UNLOCK(sb); 2347 VNET_SO_ASSERT(so); 2348 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags); 2349 SOCKBUF_LOCK(sb); 2350 } 2351 } 2352 2353 /* 2354 * For MSG_WAITALL we may have to loop again and wait for 2355 * more data to come in. 2356 */ 2357 if ((flags & MSG_WAITALL) && uio->uio_resid > 0) 2358 goto restart; 2359 out: 2360 SOCKBUF_LOCK_ASSERT(sb); 2361 SBLASTRECORDCHK(sb); 2362 SBLASTMBUFCHK(sb); 2363 SOCKBUF_UNLOCK(sb); 2364 sbunlock(sb); 2365 return (error); 2366 } 2367 2368 /* 2369 * Optimized version of soreceive() for simple datagram cases from userspace. 2370 * Unlike in the stream case, we're able to drop a datagram if copyout() 2371 * fails, and because we handle datagrams atomically, we don't need to use a 2372 * sleep lock to prevent I/O interlacing. 2373 */ 2374 int 2375 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, 2376 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2377 { 2378 struct mbuf *m, *m2; 2379 int flags, error; 2380 ssize_t len; 2381 struct protosw *pr = so->so_proto; 2382 struct mbuf *nextrecord; 2383 2384 if (psa != NULL) 2385 *psa = NULL; 2386 if (controlp != NULL) 2387 *controlp = NULL; 2388 if (flagsp != NULL) 2389 flags = *flagsp &~ MSG_EOR; 2390 else 2391 flags = 0; 2392 2393 /* 2394 * For any complicated cases, fall back to the full 2395 * soreceive_generic(). 2396 */ 2397 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB)) 2398 return (soreceive_generic(so, psa, uio, mp0, controlp, 2399 flagsp)); 2400 2401 /* 2402 * Enforce restrictions on use. 2403 */ 2404 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0, 2405 ("soreceive_dgram: wantrcvd")); 2406 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic")); 2407 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0, 2408 ("soreceive_dgram: SBS_RCVATMARK")); 2409 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0, 2410 ("soreceive_dgram: P_CONNREQUIRED")); 2411 2412 /* 2413 * Loop blocking while waiting for a datagram. 2414 */ 2415 SOCKBUF_LOCK(&so->so_rcv); 2416 while ((m = so->so_rcv.sb_mb) == NULL) { 2417 KASSERT(sbavail(&so->so_rcv) == 0, 2418 ("soreceive_dgram: sb_mb NULL but sbavail %u", 2419 sbavail(&so->so_rcv))); 2420 if (so->so_error) { 2421 error = so->so_error; 2422 so->so_error = 0; 2423 SOCKBUF_UNLOCK(&so->so_rcv); 2424 return (error); 2425 } 2426 if (so->so_rcv.sb_state & SBS_CANTRCVMORE || 2427 uio->uio_resid == 0) { 2428 SOCKBUF_UNLOCK(&so->so_rcv); 2429 return (0); 2430 } 2431 if ((so->so_state & SS_NBIO) || 2432 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 2433 SOCKBUF_UNLOCK(&so->so_rcv); 2434 return (EWOULDBLOCK); 2435 } 2436 SBLASTRECORDCHK(&so->so_rcv); 2437 SBLASTMBUFCHK(&so->so_rcv); 2438 error = sbwait(&so->so_rcv); 2439 if (error) { 2440 SOCKBUF_UNLOCK(&so->so_rcv); 2441 return (error); 2442 } 2443 } 2444 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2445 2446 if (uio->uio_td) 2447 uio->uio_td->td_ru.ru_msgrcv++; 2448 SBLASTRECORDCHK(&so->so_rcv); 2449 SBLASTMBUFCHK(&so->so_rcv); 2450 nextrecord = m->m_nextpkt; 2451 if (nextrecord == NULL) { 2452 KASSERT(so->so_rcv.sb_lastrecord == m, 2453 ("soreceive_dgram: lastrecord != m")); 2454 } 2455 2456 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord, 2457 ("soreceive_dgram: m_nextpkt != nextrecord")); 2458 2459 /* 2460 * Pull 'm' and its chain off the front of the packet queue. 2461 */ 2462 so->so_rcv.sb_mb = NULL; 2463 sockbuf_pushsync(&so->so_rcv, nextrecord); 2464 2465 /* 2466 * Walk 'm's chain and free that many bytes from the socket buffer. 2467 */ 2468 for (m2 = m; m2 != NULL; m2 = m2->m_next) 2469 sbfree(&so->so_rcv, m2); 2470 2471 /* 2472 * Do a few last checks before we let go of the lock. 2473 */ 2474 SBLASTRECORDCHK(&so->so_rcv); 2475 SBLASTMBUFCHK(&so->so_rcv); 2476 SOCKBUF_UNLOCK(&so->so_rcv); 2477 2478 if (pr->pr_flags & PR_ADDR) { 2479 KASSERT(m->m_type == MT_SONAME, 2480 ("m->m_type == %d", m->m_type)); 2481 if (psa != NULL) 2482 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 2483 M_NOWAIT); 2484 m = m_free(m); 2485 } 2486 if (m == NULL) { 2487 /* XXXRW: Can this happen? */ 2488 return (0); 2489 } 2490 2491 /* 2492 * Packet to copyout() is now in 'm' and it is disconnected from the 2493 * queue. 2494 * 2495 * Process one or more MT_CONTROL mbufs present before any data mbufs 2496 * in the first mbuf chain on the socket buffer. We call into the 2497 * protocol to perform externalization (or freeing if controlp == 2498 * NULL). In some cases there can be only MT_CONTROL mbufs without 2499 * MT_DATA mbufs. 2500 */ 2501 if (m->m_type == MT_CONTROL) { 2502 struct mbuf *cm = NULL, *cmn; 2503 struct mbuf **cme = &cm; 2504 2505 do { 2506 m2 = m->m_next; 2507 m->m_next = NULL; 2508 *cme = m; 2509 cme = &(*cme)->m_next; 2510 m = m2; 2511 } while (m != NULL && m->m_type == MT_CONTROL); 2512 while (cm != NULL) { 2513 cmn = cm->m_next; 2514 cm->m_next = NULL; 2515 if (pr->pr_domain->dom_externalize != NULL) { 2516 error = (*pr->pr_domain->dom_externalize) 2517 (cm, controlp, flags); 2518 } else if (controlp != NULL) 2519 *controlp = cm; 2520 else 2521 m_freem(cm); 2522 if (controlp != NULL) { 2523 while (*controlp != NULL) 2524 controlp = &(*controlp)->m_next; 2525 } 2526 cm = cmn; 2527 } 2528 } 2529 KASSERT(m == NULL || m->m_type == MT_DATA, 2530 ("soreceive_dgram: !data")); 2531 while (m != NULL && uio->uio_resid > 0) { 2532 len = uio->uio_resid; 2533 if (len > m->m_len) 2534 len = m->m_len; 2535 error = uiomove(mtod(m, char *), (int)len, uio); 2536 if (error) { 2537 m_freem(m); 2538 return (error); 2539 } 2540 if (len == m->m_len) 2541 m = m_free(m); 2542 else { 2543 m->m_data += len; 2544 m->m_len -= len; 2545 } 2546 } 2547 if (m != NULL) { 2548 flags |= MSG_TRUNC; 2549 m_freem(m); 2550 } 2551 if (flagsp != NULL) 2552 *flagsp |= flags; 2553 return (0); 2554 } 2555 2556 int 2557 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 2558 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2559 { 2560 int error; 2561 2562 CURVNET_SET(so->so_vnet); 2563 if (!SOLISTENING(so)) 2564 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, 2565 mp0, controlp, flagsp)); 2566 else 2567 error = ENOTCONN; 2568 CURVNET_RESTORE(); 2569 return (error); 2570 } 2571 2572 int 2573 soshutdown(struct socket *so, int how) 2574 { 2575 struct protosw *pr = so->so_proto; 2576 int error, soerror_enotconn; 2577 2578 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 2579 return (EINVAL); 2580 2581 soerror_enotconn = 0; 2582 if ((so->so_state & 2583 (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { 2584 /* 2585 * POSIX mandates us to return ENOTCONN when shutdown(2) is 2586 * invoked on a datagram sockets, however historically we would 2587 * actually tear socket down. This is known to be leveraged by 2588 * some applications to unblock process waiting in recvXXX(2) 2589 * by other process that it shares that socket with. Try to meet 2590 * both backward-compatibility and POSIX requirements by forcing 2591 * ENOTCONN but still asking protocol to perform pru_shutdown(). 2592 */ 2593 if (so->so_type != SOCK_DGRAM && !SOLISTENING(so)) 2594 return (ENOTCONN); 2595 soerror_enotconn = 1; 2596 } 2597 2598 if (SOLISTENING(so)) { 2599 if (how != SHUT_WR) { 2600 SOLISTEN_LOCK(so); 2601 so->so_error = ECONNABORTED; 2602 solisten_wakeup(so); /* unlocks so */ 2603 } 2604 goto done; 2605 } 2606 2607 CURVNET_SET(so->so_vnet); 2608 if (pr->pr_usrreqs->pru_flush != NULL) 2609 (*pr->pr_usrreqs->pru_flush)(so, how); 2610 if (how != SHUT_WR) 2611 sorflush(so); 2612 if (how != SHUT_RD) { 2613 error = (*pr->pr_usrreqs->pru_shutdown)(so); 2614 wakeup(&so->so_timeo); 2615 CURVNET_RESTORE(); 2616 return ((error == 0 && soerror_enotconn) ? ENOTCONN : error); 2617 } 2618 wakeup(&so->so_timeo); 2619 CURVNET_RESTORE(); 2620 2621 done: 2622 return (soerror_enotconn ? ENOTCONN : 0); 2623 } 2624 2625 void 2626 sorflush(struct socket *so) 2627 { 2628 struct sockbuf *sb = &so->so_rcv; 2629 struct protosw *pr = so->so_proto; 2630 struct socket aso; 2631 2632 VNET_SO_ASSERT(so); 2633 2634 /* 2635 * In order to avoid calling dom_dispose with the socket buffer mutex 2636 * held, and in order to generally avoid holding the lock for a long 2637 * time, we make a copy of the socket buffer and clear the original 2638 * (except locks, state). The new socket buffer copy won't have 2639 * initialized locks so we can only call routines that won't use or 2640 * assert those locks. 2641 * 2642 * Dislodge threads currently blocked in receive and wait to acquire 2643 * a lock against other simultaneous readers before clearing the 2644 * socket buffer. Don't let our acquire be interrupted by a signal 2645 * despite any existing socket disposition on interruptable waiting. 2646 */ 2647 socantrcvmore(so); 2648 (void) sblock(sb, SBL_WAIT | SBL_NOINTR); 2649 2650 /* 2651 * Invalidate/clear most of the sockbuf structure, but leave selinfo 2652 * and mutex data unchanged. 2653 */ 2654 SOCKBUF_LOCK(sb); 2655 bzero(&aso, sizeof(aso)); 2656 aso.so_pcb = so->so_pcb; 2657 bcopy(&sb->sb_startzero, &aso.so_rcv.sb_startzero, 2658 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2659 bzero(&sb->sb_startzero, 2660 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2661 SOCKBUF_UNLOCK(sb); 2662 sbunlock(sb); 2663 2664 /* 2665 * Dispose of special rights and flush the copied socket. Don't call 2666 * any unsafe routines (that rely on locks being initialized) on aso. 2667 */ 2668 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 2669 (*pr->pr_domain->dom_dispose)(&aso); 2670 sbrelease_internal(&aso.so_rcv, so); 2671 } 2672 2673 /* 2674 * Wrapper for Socket established helper hook. 2675 * Parameters: socket, context of the hook point, hook id. 2676 */ 2677 static int inline 2678 hhook_run_socket(struct socket *so, void *hctx, int32_t h_id) 2679 { 2680 struct socket_hhook_data hhook_data = { 2681 .so = so, 2682 .hctx = hctx, 2683 .m = NULL, 2684 .status = 0 2685 }; 2686 2687 CURVNET_SET(so->so_vnet); 2688 HHOOKS_RUN_IF(V_socket_hhh[h_id], &hhook_data, &so->osd); 2689 CURVNET_RESTORE(); 2690 2691 /* Ugly but needed, since hhooks return void for now */ 2692 return (hhook_data.status); 2693 } 2694 2695 /* 2696 * Perhaps this routine, and sooptcopyout(), below, ought to come in an 2697 * additional variant to handle the case where the option value needs to be 2698 * some kind of integer, but not a specific size. In addition to their use 2699 * here, these functions are also called by the protocol-level pr_ctloutput() 2700 * routines. 2701 */ 2702 int 2703 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2704 { 2705 size_t valsize; 2706 2707 /* 2708 * If the user gives us more than we wanted, we ignore it, but if we 2709 * don't get the minimum length the caller wants, we return EINVAL. 2710 * On success, sopt->sopt_valsize is set to however much we actually 2711 * retrieved. 2712 */ 2713 if ((valsize = sopt->sopt_valsize) < minlen) 2714 return EINVAL; 2715 if (valsize > len) 2716 sopt->sopt_valsize = valsize = len; 2717 2718 if (sopt->sopt_td != NULL) 2719 return (copyin(sopt->sopt_val, buf, valsize)); 2720 2721 bcopy(sopt->sopt_val, buf, valsize); 2722 return (0); 2723 } 2724 2725 /* 2726 * Kernel version of setsockopt(2). 2727 * 2728 * XXX: optlen is size_t, not socklen_t 2729 */ 2730 int 2731 so_setsockopt(struct socket *so, int level, int optname, void *optval, 2732 size_t optlen) 2733 { 2734 struct sockopt sopt; 2735 2736 sopt.sopt_level = level; 2737 sopt.sopt_name = optname; 2738 sopt.sopt_dir = SOPT_SET; 2739 sopt.sopt_val = optval; 2740 sopt.sopt_valsize = optlen; 2741 sopt.sopt_td = NULL; 2742 return (sosetopt(so, &sopt)); 2743 } 2744 2745 int 2746 sosetopt(struct socket *so, struct sockopt *sopt) 2747 { 2748 int error, optval; 2749 struct linger l; 2750 struct timeval tv; 2751 sbintime_t val; 2752 uint32_t val32; 2753 #ifdef MAC 2754 struct mac extmac; 2755 #endif 2756 2757 CURVNET_SET(so->so_vnet); 2758 error = 0; 2759 if (sopt->sopt_level != SOL_SOCKET) { 2760 if (so->so_proto->pr_ctloutput != NULL) 2761 error = (*so->so_proto->pr_ctloutput)(so, sopt); 2762 else 2763 error = ENOPROTOOPT; 2764 } else { 2765 switch (sopt->sopt_name) { 2766 case SO_ACCEPTFILTER: 2767 error = accept_filt_setopt(so, sopt); 2768 if (error) 2769 goto bad; 2770 break; 2771 2772 case SO_LINGER: 2773 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2774 if (error) 2775 goto bad; 2776 2777 SOCK_LOCK(so); 2778 so->so_linger = l.l_linger; 2779 if (l.l_onoff) 2780 so->so_options |= SO_LINGER; 2781 else 2782 so->so_options &= ~SO_LINGER; 2783 SOCK_UNLOCK(so); 2784 break; 2785 2786 case SO_DEBUG: 2787 case SO_KEEPALIVE: 2788 case SO_DONTROUTE: 2789 case SO_USELOOPBACK: 2790 case SO_BROADCAST: 2791 case SO_REUSEADDR: 2792 case SO_REUSEPORT: 2793 case SO_REUSEPORT_LB: 2794 case SO_OOBINLINE: 2795 case SO_TIMESTAMP: 2796 case SO_BINTIME: 2797 case SO_NOSIGPIPE: 2798 case SO_NO_DDP: 2799 case SO_NO_OFFLOAD: 2800 error = sooptcopyin(sopt, &optval, sizeof optval, 2801 sizeof optval); 2802 if (error) 2803 goto bad; 2804 SOCK_LOCK(so); 2805 if (optval) 2806 so->so_options |= sopt->sopt_name; 2807 else 2808 so->so_options &= ~sopt->sopt_name; 2809 SOCK_UNLOCK(so); 2810 break; 2811 2812 case SO_SETFIB: 2813 error = sooptcopyin(sopt, &optval, sizeof optval, 2814 sizeof optval); 2815 if (error) 2816 goto bad; 2817 2818 if (optval < 0 || optval >= rt_numfibs) { 2819 error = EINVAL; 2820 goto bad; 2821 } 2822 if (((so->so_proto->pr_domain->dom_family == PF_INET) || 2823 (so->so_proto->pr_domain->dom_family == PF_INET6) || 2824 (so->so_proto->pr_domain->dom_family == PF_ROUTE))) 2825 so->so_fibnum = optval; 2826 else 2827 so->so_fibnum = 0; 2828 break; 2829 2830 case SO_USER_COOKIE: 2831 error = sooptcopyin(sopt, &val32, sizeof val32, 2832 sizeof val32); 2833 if (error) 2834 goto bad; 2835 so->so_user_cookie = val32; 2836 break; 2837 2838 case SO_SNDBUF: 2839 case SO_RCVBUF: 2840 case SO_SNDLOWAT: 2841 case SO_RCVLOWAT: 2842 error = sooptcopyin(sopt, &optval, sizeof optval, 2843 sizeof optval); 2844 if (error) 2845 goto bad; 2846 2847 /* 2848 * Values < 1 make no sense for any of these options, 2849 * so disallow them. 2850 */ 2851 if (optval < 1) { 2852 error = EINVAL; 2853 goto bad; 2854 } 2855 2856 error = sbsetopt(so, sopt->sopt_name, optval); 2857 break; 2858 2859 case SO_SNDTIMEO: 2860 case SO_RCVTIMEO: 2861 #ifdef COMPAT_FREEBSD32 2862 if (SV_CURPROC_FLAG(SV_ILP32)) { 2863 struct timeval32 tv32; 2864 2865 error = sooptcopyin(sopt, &tv32, sizeof tv32, 2866 sizeof tv32); 2867 CP(tv32, tv, tv_sec); 2868 CP(tv32, tv, tv_usec); 2869 } else 2870 #endif 2871 error = sooptcopyin(sopt, &tv, sizeof tv, 2872 sizeof tv); 2873 if (error) 2874 goto bad; 2875 if (tv.tv_sec < 0 || tv.tv_usec < 0 || 2876 tv.tv_usec >= 1000000) { 2877 error = EDOM; 2878 goto bad; 2879 } 2880 if (tv.tv_sec > INT32_MAX) 2881 val = SBT_MAX; 2882 else 2883 val = tvtosbt(tv); 2884 switch (sopt->sopt_name) { 2885 case SO_SNDTIMEO: 2886 so->so_snd.sb_timeo = val; 2887 break; 2888 case SO_RCVTIMEO: 2889 so->so_rcv.sb_timeo = val; 2890 break; 2891 } 2892 break; 2893 2894 case SO_LABEL: 2895 #ifdef MAC 2896 error = sooptcopyin(sopt, &extmac, sizeof extmac, 2897 sizeof extmac); 2898 if (error) 2899 goto bad; 2900 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 2901 so, &extmac); 2902 #else 2903 error = EOPNOTSUPP; 2904 #endif 2905 break; 2906 2907 case SO_TS_CLOCK: 2908 error = sooptcopyin(sopt, &optval, sizeof optval, 2909 sizeof optval); 2910 if (error) 2911 goto bad; 2912 if (optval < 0 || optval > SO_TS_CLOCK_MAX) { 2913 error = EINVAL; 2914 goto bad; 2915 } 2916 so->so_ts_clock = optval; 2917 break; 2918 2919 case SO_MAX_PACING_RATE: 2920 error = sooptcopyin(sopt, &val32, sizeof(val32), 2921 sizeof(val32)); 2922 if (error) 2923 goto bad; 2924 so->so_max_pacing_rate = val32; 2925 break; 2926 2927 default: 2928 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0) 2929 error = hhook_run_socket(so, sopt, 2930 HHOOK_SOCKET_OPT); 2931 else 2932 error = ENOPROTOOPT; 2933 break; 2934 } 2935 if (error == 0 && so->so_proto->pr_ctloutput != NULL) 2936 (void)(*so->so_proto->pr_ctloutput)(so, sopt); 2937 } 2938 bad: 2939 CURVNET_RESTORE(); 2940 return (error); 2941 } 2942 2943 /* 2944 * Helper routine for getsockopt. 2945 */ 2946 int 2947 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2948 { 2949 int error; 2950 size_t valsize; 2951 2952 error = 0; 2953 2954 /* 2955 * Documented get behavior is that we always return a value, possibly 2956 * truncated to fit in the user's buffer. Traditional behavior is 2957 * that we always tell the user precisely how much we copied, rather 2958 * than something useful like the total amount we had available for 2959 * her. Note that this interface is not idempotent; the entire 2960 * answer must be generated ahead of time. 2961 */ 2962 valsize = min(len, sopt->sopt_valsize); 2963 sopt->sopt_valsize = valsize; 2964 if (sopt->sopt_val != NULL) { 2965 if (sopt->sopt_td != NULL) 2966 error = copyout(buf, sopt->sopt_val, valsize); 2967 else 2968 bcopy(buf, sopt->sopt_val, valsize); 2969 } 2970 return (error); 2971 } 2972 2973 int 2974 sogetopt(struct socket *so, struct sockopt *sopt) 2975 { 2976 int error, optval; 2977 struct linger l; 2978 struct timeval tv; 2979 #ifdef MAC 2980 struct mac extmac; 2981 #endif 2982 2983 CURVNET_SET(so->so_vnet); 2984 error = 0; 2985 if (sopt->sopt_level != SOL_SOCKET) { 2986 if (so->so_proto->pr_ctloutput != NULL) 2987 error = (*so->so_proto->pr_ctloutput)(so, sopt); 2988 else 2989 error = ENOPROTOOPT; 2990 CURVNET_RESTORE(); 2991 return (error); 2992 } else { 2993 switch (sopt->sopt_name) { 2994 case SO_ACCEPTFILTER: 2995 error = accept_filt_getopt(so, sopt); 2996 break; 2997 2998 case SO_LINGER: 2999 SOCK_LOCK(so); 3000 l.l_onoff = so->so_options & SO_LINGER; 3001 l.l_linger = so->so_linger; 3002 SOCK_UNLOCK(so); 3003 error = sooptcopyout(sopt, &l, sizeof l); 3004 break; 3005 3006 case SO_USELOOPBACK: 3007 case SO_DONTROUTE: 3008 case SO_DEBUG: 3009 case SO_KEEPALIVE: 3010 case SO_REUSEADDR: 3011 case SO_REUSEPORT: 3012 case SO_REUSEPORT_LB: 3013 case SO_BROADCAST: 3014 case SO_OOBINLINE: 3015 case SO_ACCEPTCONN: 3016 case SO_TIMESTAMP: 3017 case SO_BINTIME: 3018 case SO_NOSIGPIPE: 3019 optval = so->so_options & sopt->sopt_name; 3020 integer: 3021 error = sooptcopyout(sopt, &optval, sizeof optval); 3022 break; 3023 3024 case SO_DOMAIN: 3025 optval = so->so_proto->pr_domain->dom_family; 3026 goto integer; 3027 3028 case SO_TYPE: 3029 optval = so->so_type; 3030 goto integer; 3031 3032 case SO_PROTOCOL: 3033 optval = so->so_proto->pr_protocol; 3034 goto integer; 3035 3036 case SO_ERROR: 3037 SOCK_LOCK(so); 3038 optval = so->so_error; 3039 so->so_error = 0; 3040 SOCK_UNLOCK(so); 3041 goto integer; 3042 3043 case SO_SNDBUF: 3044 optval = SOLISTENING(so) ? so->sol_sbsnd_hiwat : 3045 so->so_snd.sb_hiwat; 3046 goto integer; 3047 3048 case SO_RCVBUF: 3049 optval = SOLISTENING(so) ? so->sol_sbrcv_hiwat : 3050 so->so_rcv.sb_hiwat; 3051 goto integer; 3052 3053 case SO_SNDLOWAT: 3054 optval = SOLISTENING(so) ? so->sol_sbsnd_lowat : 3055 so->so_snd.sb_lowat; 3056 goto integer; 3057 3058 case SO_RCVLOWAT: 3059 optval = SOLISTENING(so) ? so->sol_sbrcv_lowat : 3060 so->so_rcv.sb_lowat; 3061 goto integer; 3062 3063 case SO_SNDTIMEO: 3064 case SO_RCVTIMEO: 3065 tv = sbttotv(sopt->sopt_name == SO_SNDTIMEO ? 3066 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 3067 #ifdef COMPAT_FREEBSD32 3068 if (SV_CURPROC_FLAG(SV_ILP32)) { 3069 struct timeval32 tv32; 3070 3071 CP(tv, tv32, tv_sec); 3072 CP(tv, tv32, tv_usec); 3073 error = sooptcopyout(sopt, &tv32, sizeof tv32); 3074 } else 3075 #endif 3076 error = sooptcopyout(sopt, &tv, sizeof tv); 3077 break; 3078 3079 case SO_LABEL: 3080 #ifdef MAC 3081 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 3082 sizeof(extmac)); 3083 if (error) 3084 goto bad; 3085 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 3086 so, &extmac); 3087 if (error) 3088 goto bad; 3089 error = sooptcopyout(sopt, &extmac, sizeof extmac); 3090 #else 3091 error = EOPNOTSUPP; 3092 #endif 3093 break; 3094 3095 case SO_PEERLABEL: 3096 #ifdef MAC 3097 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 3098 sizeof(extmac)); 3099 if (error) 3100 goto bad; 3101 error = mac_getsockopt_peerlabel( 3102 sopt->sopt_td->td_ucred, so, &extmac); 3103 if (error) 3104 goto bad; 3105 error = sooptcopyout(sopt, &extmac, sizeof extmac); 3106 #else 3107 error = EOPNOTSUPP; 3108 #endif 3109 break; 3110 3111 case SO_LISTENQLIMIT: 3112 optval = SOLISTENING(so) ? so->sol_qlimit : 0; 3113 goto integer; 3114 3115 case SO_LISTENQLEN: 3116 optval = SOLISTENING(so) ? so->sol_qlen : 0; 3117 goto integer; 3118 3119 case SO_LISTENINCQLEN: 3120 optval = SOLISTENING(so) ? so->sol_incqlen : 0; 3121 goto integer; 3122 3123 case SO_TS_CLOCK: 3124 optval = so->so_ts_clock; 3125 goto integer; 3126 3127 case SO_MAX_PACING_RATE: 3128 optval = so->so_max_pacing_rate; 3129 goto integer; 3130 3131 default: 3132 if (V_socket_hhh[HHOOK_SOCKET_OPT]->hhh_nhooks > 0) 3133 error = hhook_run_socket(so, sopt, 3134 HHOOK_SOCKET_OPT); 3135 else 3136 error = ENOPROTOOPT; 3137 break; 3138 } 3139 } 3140 #ifdef MAC 3141 bad: 3142 #endif 3143 CURVNET_RESTORE(); 3144 return (error); 3145 } 3146 3147 int 3148 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 3149 { 3150 struct mbuf *m, *m_prev; 3151 int sopt_size = sopt->sopt_valsize; 3152 3153 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA); 3154 if (m == NULL) 3155 return ENOBUFS; 3156 if (sopt_size > MLEN) { 3157 MCLGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT); 3158 if ((m->m_flags & M_EXT) == 0) { 3159 m_free(m); 3160 return ENOBUFS; 3161 } 3162 m->m_len = min(MCLBYTES, sopt_size); 3163 } else { 3164 m->m_len = min(MLEN, sopt_size); 3165 } 3166 sopt_size -= m->m_len; 3167 *mp = m; 3168 m_prev = m; 3169 3170 while (sopt_size) { 3171 MGET(m, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA); 3172 if (m == NULL) { 3173 m_freem(*mp); 3174 return ENOBUFS; 3175 } 3176 if (sopt_size > MLEN) { 3177 MCLGET(m, sopt->sopt_td != NULL ? M_WAITOK : 3178 M_NOWAIT); 3179 if ((m->m_flags & M_EXT) == 0) { 3180 m_freem(m); 3181 m_freem(*mp); 3182 return ENOBUFS; 3183 } 3184 m->m_len = min(MCLBYTES, sopt_size); 3185 } else { 3186 m->m_len = min(MLEN, sopt_size); 3187 } 3188 sopt_size -= m->m_len; 3189 m_prev->m_next = m; 3190 m_prev = m; 3191 } 3192 return (0); 3193 } 3194 3195 int 3196 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 3197 { 3198 struct mbuf *m0 = m; 3199 3200 if (sopt->sopt_val == NULL) 3201 return (0); 3202 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 3203 if (sopt->sopt_td != NULL) { 3204 int error; 3205 3206 error = copyin(sopt->sopt_val, mtod(m, char *), 3207 m->m_len); 3208 if (error != 0) { 3209 m_freem(m0); 3210 return(error); 3211 } 3212 } else 3213 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 3214 sopt->sopt_valsize -= m->m_len; 3215 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 3216 m = m->m_next; 3217 } 3218 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 3219 panic("ip6_sooptmcopyin"); 3220 return (0); 3221 } 3222 3223 int 3224 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 3225 { 3226 struct mbuf *m0 = m; 3227 size_t valsize = 0; 3228 3229 if (sopt->sopt_val == NULL) 3230 return (0); 3231 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 3232 if (sopt->sopt_td != NULL) { 3233 int error; 3234 3235 error = copyout(mtod(m, char *), sopt->sopt_val, 3236 m->m_len); 3237 if (error != 0) { 3238 m_freem(m0); 3239 return(error); 3240 } 3241 } else 3242 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 3243 sopt->sopt_valsize -= m->m_len; 3244 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 3245 valsize += m->m_len; 3246 m = m->m_next; 3247 } 3248 if (m != NULL) { 3249 /* enough soopt buffer should be given from user-land */ 3250 m_freem(m0); 3251 return(EINVAL); 3252 } 3253 sopt->sopt_valsize = valsize; 3254 return (0); 3255 } 3256 3257 /* 3258 * sohasoutofband(): protocol notifies socket layer of the arrival of new 3259 * out-of-band data, which will then notify socket consumers. 3260 */ 3261 void 3262 sohasoutofband(struct socket *so) 3263 { 3264 3265 if (so->so_sigio != NULL) 3266 pgsigio(&so->so_sigio, SIGURG, 0); 3267 selwakeuppri(&so->so_rdsel, PSOCK); 3268 } 3269 3270 int 3271 sopoll(struct socket *so, int events, struct ucred *active_cred, 3272 struct thread *td) 3273 { 3274 3275 /* 3276 * We do not need to set or assert curvnet as long as everyone uses 3277 * sopoll_generic(). 3278 */ 3279 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred, 3280 td)); 3281 } 3282 3283 int 3284 sopoll_generic(struct socket *so, int events, struct ucred *active_cred, 3285 struct thread *td) 3286 { 3287 int revents; 3288 3289 SOCK_LOCK(so); 3290 if (SOLISTENING(so)) { 3291 if (!(events & (POLLIN | POLLRDNORM))) 3292 revents = 0; 3293 else if (!TAILQ_EMPTY(&so->sol_comp)) 3294 revents = events & (POLLIN | POLLRDNORM); 3295 else if ((events & POLLINIGNEOF) == 0 && so->so_error) 3296 revents = (events & (POLLIN | POLLRDNORM)) | POLLHUP; 3297 else { 3298 selrecord(td, &so->so_rdsel); 3299 revents = 0; 3300 } 3301 } else { 3302 revents = 0; 3303 SOCKBUF_LOCK(&so->so_snd); 3304 SOCKBUF_LOCK(&so->so_rcv); 3305 if (events & (POLLIN | POLLRDNORM)) 3306 if (soreadabledata(so)) 3307 revents |= events & (POLLIN | POLLRDNORM); 3308 if (events & (POLLOUT | POLLWRNORM)) 3309 if (sowriteable(so)) 3310 revents |= events & (POLLOUT | POLLWRNORM); 3311 if (events & (POLLPRI | POLLRDBAND)) 3312 if (so->so_oobmark || 3313 (so->so_rcv.sb_state & SBS_RCVATMARK)) 3314 revents |= events & (POLLPRI | POLLRDBAND); 3315 if ((events & POLLINIGNEOF) == 0) { 3316 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 3317 revents |= events & (POLLIN | POLLRDNORM); 3318 if (so->so_snd.sb_state & SBS_CANTSENDMORE) 3319 revents |= POLLHUP; 3320 } 3321 } 3322 if (revents == 0) { 3323 if (events & 3324 (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { 3325 selrecord(td, &so->so_rdsel); 3326 so->so_rcv.sb_flags |= SB_SEL; 3327 } 3328 if (events & (POLLOUT | POLLWRNORM)) { 3329 selrecord(td, &so->so_wrsel); 3330 so->so_snd.sb_flags |= SB_SEL; 3331 } 3332 } 3333 SOCKBUF_UNLOCK(&so->so_rcv); 3334 SOCKBUF_UNLOCK(&so->so_snd); 3335 } 3336 SOCK_UNLOCK(so); 3337 return (revents); 3338 } 3339 3340 int 3341 soo_kqfilter(struct file *fp, struct knote *kn) 3342 { 3343 struct socket *so = kn->kn_fp->f_data; 3344 struct sockbuf *sb; 3345 struct knlist *knl; 3346 3347 switch (kn->kn_filter) { 3348 case EVFILT_READ: 3349 kn->kn_fop = &soread_filtops; 3350 knl = &so->so_rdsel.si_note; 3351 sb = &so->so_rcv; 3352 break; 3353 case EVFILT_WRITE: 3354 kn->kn_fop = &sowrite_filtops; 3355 knl = &so->so_wrsel.si_note; 3356 sb = &so->so_snd; 3357 break; 3358 case EVFILT_EMPTY: 3359 kn->kn_fop = &soempty_filtops; 3360 knl = &so->so_wrsel.si_note; 3361 sb = &so->so_snd; 3362 break; 3363 default: 3364 return (EINVAL); 3365 } 3366 3367 SOCK_LOCK(so); 3368 if (SOLISTENING(so)) { 3369 knlist_add(knl, kn, 1); 3370 } else { 3371 SOCKBUF_LOCK(sb); 3372 knlist_add(knl, kn, 1); 3373 sb->sb_flags |= SB_KNOTE; 3374 SOCKBUF_UNLOCK(sb); 3375 } 3376 SOCK_UNLOCK(so); 3377 return (0); 3378 } 3379 3380 /* 3381 * Some routines that return EOPNOTSUPP for entry points that are not 3382 * supported by a protocol. Fill in as needed. 3383 */ 3384 int 3385 pru_accept_notsupp(struct socket *so, struct sockaddr **nam) 3386 { 3387 3388 return EOPNOTSUPP; 3389 } 3390 3391 int 3392 pru_aio_queue_notsupp(struct socket *so, struct kaiocb *job) 3393 { 3394 3395 return EOPNOTSUPP; 3396 } 3397 3398 int 3399 pru_attach_notsupp(struct socket *so, int proto, struct thread *td) 3400 { 3401 3402 return EOPNOTSUPP; 3403 } 3404 3405 int 3406 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 3407 { 3408 3409 return EOPNOTSUPP; 3410 } 3411 3412 int 3413 pru_bindat_notsupp(int fd, struct socket *so, struct sockaddr *nam, 3414 struct thread *td) 3415 { 3416 3417 return EOPNOTSUPP; 3418 } 3419 3420 int 3421 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 3422 { 3423 3424 return EOPNOTSUPP; 3425 } 3426 3427 int 3428 pru_connectat_notsupp(int fd, struct socket *so, struct sockaddr *nam, 3429 struct thread *td) 3430 { 3431 3432 return EOPNOTSUPP; 3433 } 3434 3435 int 3436 pru_connect2_notsupp(struct socket *so1, struct socket *so2) 3437 { 3438 3439 return EOPNOTSUPP; 3440 } 3441 3442 int 3443 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, 3444 struct ifnet *ifp, struct thread *td) 3445 { 3446 3447 return EOPNOTSUPP; 3448 } 3449 3450 int 3451 pru_disconnect_notsupp(struct socket *so) 3452 { 3453 3454 return EOPNOTSUPP; 3455 } 3456 3457 int 3458 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td) 3459 { 3460 3461 return EOPNOTSUPP; 3462 } 3463 3464 int 3465 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) 3466 { 3467 3468 return EOPNOTSUPP; 3469 } 3470 3471 int 3472 pru_rcvd_notsupp(struct socket *so, int flags) 3473 { 3474 3475 return EOPNOTSUPP; 3476 } 3477 3478 int 3479 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) 3480 { 3481 3482 return EOPNOTSUPP; 3483 } 3484 3485 int 3486 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, 3487 struct sockaddr *addr, struct mbuf *control, struct thread *td) 3488 { 3489 3490 return EOPNOTSUPP; 3491 } 3492 3493 int 3494 pru_ready_notsupp(struct socket *so, struct mbuf *m, int count) 3495 { 3496 3497 return (EOPNOTSUPP); 3498 } 3499 3500 /* 3501 * This isn't really a ``null'' operation, but it's the default one and 3502 * doesn't do anything destructive. 3503 */ 3504 int 3505 pru_sense_null(struct socket *so, struct stat *sb) 3506 { 3507 3508 sb->st_blksize = so->so_snd.sb_hiwat; 3509 return 0; 3510 } 3511 3512 int 3513 pru_shutdown_notsupp(struct socket *so) 3514 { 3515 3516 return EOPNOTSUPP; 3517 } 3518 3519 int 3520 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) 3521 { 3522 3523 return EOPNOTSUPP; 3524 } 3525 3526 int 3527 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 3528 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 3529 { 3530 3531 return EOPNOTSUPP; 3532 } 3533 3534 int 3535 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 3536 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 3537 { 3538 3539 return EOPNOTSUPP; 3540 } 3541 3542 int 3543 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred, 3544 struct thread *td) 3545 { 3546 3547 return EOPNOTSUPP; 3548 } 3549 3550 static void 3551 filt_sordetach(struct knote *kn) 3552 { 3553 struct socket *so = kn->kn_fp->f_data; 3554 3555 so_rdknl_lock(so); 3556 knlist_remove(&so->so_rdsel.si_note, kn, 1); 3557 if (!SOLISTENING(so) && knlist_empty(&so->so_rdsel.si_note)) 3558 so->so_rcv.sb_flags &= ~SB_KNOTE; 3559 so_rdknl_unlock(so); 3560 } 3561 3562 /*ARGSUSED*/ 3563 static int 3564 filt_soread(struct knote *kn, long hint) 3565 { 3566 struct socket *so; 3567 3568 so = kn->kn_fp->f_data; 3569 3570 if (SOLISTENING(so)) { 3571 SOCK_LOCK_ASSERT(so); 3572 kn->kn_data = so->sol_qlen; 3573 if (so->so_error) { 3574 kn->kn_flags |= EV_EOF; 3575 kn->kn_fflags = so->so_error; 3576 return (1); 3577 } 3578 return (!TAILQ_EMPTY(&so->sol_comp)); 3579 } 3580 3581 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 3582 3583 kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl; 3584 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 3585 kn->kn_flags |= EV_EOF; 3586 kn->kn_fflags = so->so_error; 3587 return (1); 3588 } else if (so->so_error) /* temporary udp error */ 3589 return (1); 3590 3591 if (kn->kn_sfflags & NOTE_LOWAT) { 3592 if (kn->kn_data >= kn->kn_sdata) 3593 return (1); 3594 } else if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat) 3595 return (1); 3596 3597 /* This hook returning non-zero indicates an event, not error */ 3598 return (hhook_run_socket(so, NULL, HHOOK_FILT_SOREAD)); 3599 } 3600 3601 static void 3602 filt_sowdetach(struct knote *kn) 3603 { 3604 struct socket *so = kn->kn_fp->f_data; 3605 3606 so_wrknl_lock(so); 3607 knlist_remove(&so->so_wrsel.si_note, kn, 1); 3608 if (!SOLISTENING(so) && knlist_empty(&so->so_wrsel.si_note)) 3609 so->so_snd.sb_flags &= ~SB_KNOTE; 3610 so_wrknl_unlock(so); 3611 } 3612 3613 /*ARGSUSED*/ 3614 static int 3615 filt_sowrite(struct knote *kn, long hint) 3616 { 3617 struct socket *so; 3618 3619 so = kn->kn_fp->f_data; 3620 3621 if (SOLISTENING(so)) 3622 return (0); 3623 3624 SOCKBUF_LOCK_ASSERT(&so->so_snd); 3625 kn->kn_data = sbspace(&so->so_snd); 3626 3627 hhook_run_socket(so, kn, HHOOK_FILT_SOWRITE); 3628 3629 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 3630 kn->kn_flags |= EV_EOF; 3631 kn->kn_fflags = so->so_error; 3632 return (1); 3633 } else if (so->so_error) /* temporary udp error */ 3634 return (1); 3635 else if (((so->so_state & SS_ISCONNECTED) == 0) && 3636 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 3637 return (0); 3638 else if (kn->kn_sfflags & NOTE_LOWAT) 3639 return (kn->kn_data >= kn->kn_sdata); 3640 else 3641 return (kn->kn_data >= so->so_snd.sb_lowat); 3642 } 3643 3644 static int 3645 filt_soempty(struct knote *kn, long hint) 3646 { 3647 struct socket *so; 3648 3649 so = kn->kn_fp->f_data; 3650 3651 if (SOLISTENING(so)) 3652 return (1); 3653 3654 SOCKBUF_LOCK_ASSERT(&so->so_snd); 3655 kn->kn_data = sbused(&so->so_snd); 3656 3657 if (kn->kn_data == 0) 3658 return (1); 3659 else 3660 return (0); 3661 } 3662 3663 int 3664 socheckuid(struct socket *so, uid_t uid) 3665 { 3666 3667 if (so == NULL) 3668 return (EPERM); 3669 if (so->so_cred->cr_uid != uid) 3670 return (EPERM); 3671 return (0); 3672 } 3673 3674 /* 3675 * These functions are used by protocols to notify the socket layer (and its 3676 * consumers) of state changes in the sockets driven by protocol-side events. 3677 */ 3678 3679 /* 3680 * Procedures to manipulate state flags of socket and do appropriate wakeups. 3681 * 3682 * Normal sequence from the active (originating) side is that 3683 * soisconnecting() is called during processing of connect() call, resulting 3684 * in an eventual call to soisconnected() if/when the connection is 3685 * established. When the connection is torn down soisdisconnecting() is 3686 * called during processing of disconnect() call, and soisdisconnected() is 3687 * called when the connection to the peer is totally severed. The semantics 3688 * of these routines are such that connectionless protocols can call 3689 * soisconnected() and soisdisconnected() only, bypassing the in-progress 3690 * calls when setting up a ``connection'' takes no time. 3691 * 3692 * From the passive side, a socket is created with two queues of sockets: 3693 * so_incomp for connections in progress and so_comp for connections already 3694 * made and awaiting user acceptance. As a protocol is preparing incoming 3695 * connections, it creates a socket structure queued on so_incomp by calling 3696 * sonewconn(). When the connection is established, soisconnected() is 3697 * called, and transfers the socket structure to so_comp, making it available 3698 * to accept(). 3699 * 3700 * If a socket is closed with sockets on either so_incomp or so_comp, these 3701 * sockets are dropped. 3702 * 3703 * If higher-level protocols are implemented in the kernel, the wakeups done 3704 * here will sometimes cause software-interrupt process scheduling. 3705 */ 3706 void 3707 soisconnecting(struct socket *so) 3708 { 3709 3710 SOCK_LOCK(so); 3711 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 3712 so->so_state |= SS_ISCONNECTING; 3713 SOCK_UNLOCK(so); 3714 } 3715 3716 void 3717 soisconnected(struct socket *so) 3718 { 3719 3720 SOCK_LOCK(so); 3721 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 3722 so->so_state |= SS_ISCONNECTED; 3723 3724 if (so->so_qstate == SQ_INCOMP) { 3725 struct socket *head = so->so_listen; 3726 int ret; 3727 3728 KASSERT(head, ("%s: so %p on incomp of NULL", __func__, so)); 3729 /* 3730 * Promoting a socket from incomplete queue to complete, we 3731 * need to go through reverse order of locking. We first do 3732 * trylock, and if that doesn't succeed, we go the hard way 3733 * leaving a reference and rechecking consistency after proper 3734 * locking. 3735 */ 3736 if (__predict_false(SOLISTEN_TRYLOCK(head) == 0)) { 3737 soref(head); 3738 SOCK_UNLOCK(so); 3739 SOLISTEN_LOCK(head); 3740 SOCK_LOCK(so); 3741 if (__predict_false(head != so->so_listen)) { 3742 /* 3743 * The socket went off the listen queue, 3744 * should be lost race to close(2) of sol. 3745 * The socket is about to soabort(). 3746 */ 3747 SOCK_UNLOCK(so); 3748 sorele(head); 3749 return; 3750 } 3751 /* Not the last one, as so holds a ref. */ 3752 refcount_release(&head->so_count); 3753 } 3754 again: 3755 if ((so->so_options & SO_ACCEPTFILTER) == 0) { 3756 TAILQ_REMOVE(&head->sol_incomp, so, so_list); 3757 head->sol_incqlen--; 3758 TAILQ_INSERT_TAIL(&head->sol_comp, so, so_list); 3759 head->sol_qlen++; 3760 so->so_qstate = SQ_COMP; 3761 SOCK_UNLOCK(so); 3762 solisten_wakeup(head); /* unlocks */ 3763 } else { 3764 SOCKBUF_LOCK(&so->so_rcv); 3765 soupcall_set(so, SO_RCV, 3766 head->sol_accept_filter->accf_callback, 3767 head->sol_accept_filter_arg); 3768 so->so_options &= ~SO_ACCEPTFILTER; 3769 ret = head->sol_accept_filter->accf_callback(so, 3770 head->sol_accept_filter_arg, M_NOWAIT); 3771 if (ret == SU_ISCONNECTED) { 3772 soupcall_clear(so, SO_RCV); 3773 SOCKBUF_UNLOCK(&so->so_rcv); 3774 goto again; 3775 } 3776 SOCKBUF_UNLOCK(&so->so_rcv); 3777 SOCK_UNLOCK(so); 3778 SOLISTEN_UNLOCK(head); 3779 } 3780 return; 3781 } 3782 SOCK_UNLOCK(so); 3783 wakeup(&so->so_timeo); 3784 sorwakeup(so); 3785 sowwakeup(so); 3786 } 3787 3788 void 3789 soisdisconnecting(struct socket *so) 3790 { 3791 3792 SOCK_LOCK(so); 3793 so->so_state &= ~SS_ISCONNECTING; 3794 so->so_state |= SS_ISDISCONNECTING; 3795 3796 if (!SOLISTENING(so)) { 3797 SOCKBUF_LOCK(&so->so_rcv); 3798 socantrcvmore_locked(so); 3799 SOCKBUF_LOCK(&so->so_snd); 3800 socantsendmore_locked(so); 3801 } 3802 SOCK_UNLOCK(so); 3803 wakeup(&so->so_timeo); 3804 } 3805 3806 void 3807 soisdisconnected(struct socket *so) 3808 { 3809 3810 SOCK_LOCK(so); 3811 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 3812 so->so_state |= SS_ISDISCONNECTED; 3813 3814 if (!SOLISTENING(so)) { 3815 SOCK_UNLOCK(so); 3816 SOCKBUF_LOCK(&so->so_rcv); 3817 socantrcvmore_locked(so); 3818 SOCKBUF_LOCK(&so->so_snd); 3819 sbdrop_locked(&so->so_snd, sbused(&so->so_snd)); 3820 socantsendmore_locked(so); 3821 } else 3822 SOCK_UNLOCK(so); 3823 wakeup(&so->so_timeo); 3824 } 3825 3826 /* 3827 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. 3828 */ 3829 struct sockaddr * 3830 sodupsockaddr(const struct sockaddr *sa, int mflags) 3831 { 3832 struct sockaddr *sa2; 3833 3834 sa2 = malloc(sa->sa_len, M_SONAME, mflags); 3835 if (sa2) 3836 bcopy(sa, sa2, sa->sa_len); 3837 return sa2; 3838 } 3839 3840 /* 3841 * Register per-socket destructor. 3842 */ 3843 void 3844 sodtor_set(struct socket *so, so_dtor_t *func) 3845 { 3846 3847 SOCK_LOCK_ASSERT(so); 3848 so->so_dtor = func; 3849 } 3850 3851 /* 3852 * Register per-socket buffer upcalls. 3853 */ 3854 void 3855 soupcall_set(struct socket *so, int which, so_upcall_t func, void *arg) 3856 { 3857 struct sockbuf *sb; 3858 3859 KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so)); 3860 3861 switch (which) { 3862 case SO_RCV: 3863 sb = &so->so_rcv; 3864 break; 3865 case SO_SND: 3866 sb = &so->so_snd; 3867 break; 3868 default: 3869 panic("soupcall_set: bad which"); 3870 } 3871 SOCKBUF_LOCK_ASSERT(sb); 3872 sb->sb_upcall = func; 3873 sb->sb_upcallarg = arg; 3874 sb->sb_flags |= SB_UPCALL; 3875 } 3876 3877 void 3878 soupcall_clear(struct socket *so, int which) 3879 { 3880 struct sockbuf *sb; 3881 3882 KASSERT(!SOLISTENING(so), ("%s: so %p listening", __func__, so)); 3883 3884 switch (which) { 3885 case SO_RCV: 3886 sb = &so->so_rcv; 3887 break; 3888 case SO_SND: 3889 sb = &so->so_snd; 3890 break; 3891 default: 3892 panic("soupcall_clear: bad which"); 3893 } 3894 SOCKBUF_LOCK_ASSERT(sb); 3895 KASSERT(sb->sb_upcall != NULL, 3896 ("%s: so %p no upcall to clear", __func__, so)); 3897 sb->sb_upcall = NULL; 3898 sb->sb_upcallarg = NULL; 3899 sb->sb_flags &= ~SB_UPCALL; 3900 } 3901 3902 void 3903 solisten_upcall_set(struct socket *so, so_upcall_t func, void *arg) 3904 { 3905 3906 SOLISTEN_LOCK_ASSERT(so); 3907 so->sol_upcall = func; 3908 so->sol_upcallarg = arg; 3909 } 3910 3911 static void 3912 so_rdknl_lock(void *arg) 3913 { 3914 struct socket *so = arg; 3915 3916 if (SOLISTENING(so)) 3917 SOCK_LOCK(so); 3918 else 3919 SOCKBUF_LOCK(&so->so_rcv); 3920 } 3921 3922 static void 3923 so_rdknl_unlock(void *arg) 3924 { 3925 struct socket *so = arg; 3926 3927 if (SOLISTENING(so)) 3928 SOCK_UNLOCK(so); 3929 else 3930 SOCKBUF_UNLOCK(&so->so_rcv); 3931 } 3932 3933 static void 3934 so_rdknl_assert_locked(void *arg) 3935 { 3936 struct socket *so = arg; 3937 3938 if (SOLISTENING(so)) 3939 SOCK_LOCK_ASSERT(so); 3940 else 3941 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 3942 } 3943 3944 static void 3945 so_rdknl_assert_unlocked(void *arg) 3946 { 3947 struct socket *so = arg; 3948 3949 if (SOLISTENING(so)) 3950 SOCK_UNLOCK_ASSERT(so); 3951 else 3952 SOCKBUF_UNLOCK_ASSERT(&so->so_rcv); 3953 } 3954 3955 static void 3956 so_wrknl_lock(void *arg) 3957 { 3958 struct socket *so = arg; 3959 3960 if (SOLISTENING(so)) 3961 SOCK_LOCK(so); 3962 else 3963 SOCKBUF_LOCK(&so->so_snd); 3964 } 3965 3966 static void 3967 so_wrknl_unlock(void *arg) 3968 { 3969 struct socket *so = arg; 3970 3971 if (SOLISTENING(so)) 3972 SOCK_UNLOCK(so); 3973 else 3974 SOCKBUF_UNLOCK(&so->so_snd); 3975 } 3976 3977 static void 3978 so_wrknl_assert_locked(void *arg) 3979 { 3980 struct socket *so = arg; 3981 3982 if (SOLISTENING(so)) 3983 SOCK_LOCK_ASSERT(so); 3984 else 3985 SOCKBUF_LOCK_ASSERT(&so->so_snd); 3986 } 3987 3988 static void 3989 so_wrknl_assert_unlocked(void *arg) 3990 { 3991 struct socket *so = arg; 3992 3993 if (SOLISTENING(so)) 3994 SOCK_UNLOCK_ASSERT(so); 3995 else 3996 SOCKBUF_UNLOCK_ASSERT(&so->so_snd); 3997 } 3998 3999 /* 4000 * Create an external-format (``xsocket'') structure using the information in 4001 * the kernel-format socket structure pointed to by so. This is done to 4002 * reduce the spew of irrelevant information over this interface, to isolate 4003 * user code from changes in the kernel structure, and potentially to provide 4004 * information-hiding if we decide that some of this information should be 4005 * hidden from users. 4006 */ 4007 void 4008 sotoxsocket(struct socket *so, struct xsocket *xso) 4009 { 4010 4011 bzero(xso, sizeof(*xso)); 4012 xso->xso_len = sizeof *xso; 4013 xso->xso_so = (uintptr_t)so; 4014 xso->so_type = so->so_type; 4015 xso->so_options = so->so_options; 4016 xso->so_linger = so->so_linger; 4017 xso->so_state = so->so_state; 4018 xso->so_pcb = (uintptr_t)so->so_pcb; 4019 xso->xso_protocol = so->so_proto->pr_protocol; 4020 xso->xso_family = so->so_proto->pr_domain->dom_family; 4021 xso->so_timeo = so->so_timeo; 4022 xso->so_error = so->so_error; 4023 xso->so_uid = so->so_cred->cr_uid; 4024 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 4025 if (SOLISTENING(so)) { 4026 xso->so_qlen = so->sol_qlen; 4027 xso->so_incqlen = so->sol_incqlen; 4028 xso->so_qlimit = so->sol_qlimit; 4029 xso->so_oobmark = 0; 4030 } else { 4031 xso->so_state |= so->so_qstate; 4032 xso->so_qlen = xso->so_incqlen = xso->so_qlimit = 0; 4033 xso->so_oobmark = so->so_oobmark; 4034 sbtoxsockbuf(&so->so_snd, &xso->so_snd); 4035 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 4036 } 4037 } 4038 4039 struct sockbuf * 4040 so_sockbuf_rcv(struct socket *so) 4041 { 4042 4043 return (&so->so_rcv); 4044 } 4045 4046 struct sockbuf * 4047 so_sockbuf_snd(struct socket *so) 4048 { 4049 4050 return (&so->so_snd); 4051 } 4052 4053 int 4054 so_state_get(const struct socket *so) 4055 { 4056 4057 return (so->so_state); 4058 } 4059 4060 void 4061 so_state_set(struct socket *so, int val) 4062 { 4063 4064 so->so_state = val; 4065 } 4066 4067 int 4068 so_options_get(const struct socket *so) 4069 { 4070 4071 return (so->so_options); 4072 } 4073 4074 void 4075 so_options_set(struct socket *so, int val) 4076 { 4077 4078 so->so_options = val; 4079 } 4080 4081 int 4082 so_error_get(const struct socket *so) 4083 { 4084 4085 return (so->so_error); 4086 } 4087 4088 void 4089 so_error_set(struct socket *so, int val) 4090 { 4091 4092 so->so_error = val; 4093 } 4094 4095 int 4096 so_linger_get(const struct socket *so) 4097 { 4098 4099 return (so->so_linger); 4100 } 4101 4102 void 4103 so_linger_set(struct socket *so, int val) 4104 { 4105 4106 so->so_linger = val; 4107 } 4108 4109 struct protosw * 4110 so_protosw_get(const struct socket *so) 4111 { 4112 4113 return (so->so_proto); 4114 } 4115 4116 void 4117 so_protosw_set(struct socket *so, struct protosw *val) 4118 { 4119 4120 so->so_proto = val; 4121 } 4122 4123 void 4124 so_sorwakeup(struct socket *so) 4125 { 4126 4127 sorwakeup(so); 4128 } 4129 4130 void 4131 so_sowwakeup(struct socket *so) 4132 { 4133 4134 sowwakeup(so); 4135 } 4136 4137 void 4138 so_sorwakeup_locked(struct socket *so) 4139 { 4140 4141 sorwakeup_locked(so); 4142 } 4143 4144 void 4145 so_sowwakeup_locked(struct socket *so) 4146 { 4147 4148 sowwakeup_locked(so); 4149 } 4150 4151 void 4152 so_lock(struct socket *so) 4153 { 4154 4155 SOCK_LOCK(so); 4156 } 4157 4158 void 4159 so_unlock(struct socket *so) 4160 { 4161 4162 SOCK_UNLOCK(so); 4163 } 4164