1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2004 The FreeBSD Foundation 5 * Copyright (c) 2004-2008 Robert N. M. Watson 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 /* 36 * Comments on the socket life cycle: 37 * 38 * soalloc() sets of socket layer state for a socket, called only by 39 * socreate() and sonewconn(). Socket layer private. 40 * 41 * sodealloc() tears down socket layer state for a socket, called only by 42 * sofree() and sonewconn(). Socket layer private. 43 * 44 * pru_attach() associates protocol layer state with an allocated socket; 45 * called only once, may fail, aborting socket allocation. This is called 46 * from socreate() and sonewconn(). Socket layer private. 47 * 48 * pru_detach() disassociates protocol layer state from an attached socket, 49 * and will be called exactly once for sockets in which pru_attach() has 50 * been successfully called. If pru_attach() returned an error, 51 * pru_detach() will not be called. Socket layer private. 52 * 53 * pru_abort() and pru_close() notify the protocol layer that the last 54 * consumer of a socket is starting to tear down the socket, and that the 55 * protocol should terminate the connection. Historically, pru_abort() also 56 * detached protocol state from the socket state, but this is no longer the 57 * case. 58 * 59 * socreate() creates a socket and attaches protocol state. This is a public 60 * interface that may be used by socket layer consumers to create new 61 * sockets. 62 * 63 * sonewconn() creates a socket and attaches protocol state. This is a 64 * public interface that may be used by protocols to create new sockets when 65 * a new connection is received and will be available for accept() on a 66 * listen socket. 67 * 68 * soclose() destroys a socket after possibly waiting for it to disconnect. 69 * This is a public interface that socket consumers should use to close and 70 * release a socket when done with it. 71 * 72 * soabort() destroys a socket without waiting for it to disconnect (used 73 * only for incoming connections that are already partially or fully 74 * connected). This is used internally by the socket layer when clearing 75 * listen socket queues (due to overflow or close on the listen socket), but 76 * is also a public interface protocols may use to abort connections in 77 * their incomplete listen queues should they no longer be required. Sockets 78 * placed in completed connection listen queues should not be aborted for 79 * reasons described in the comment above the soclose() implementation. This 80 * is not a general purpose close routine, and except in the specific 81 * circumstances described here, should not be used. 82 * 83 * sofree() will free a socket and its protocol state if all references on 84 * the socket have been released, and is the public interface to attempt to 85 * free a socket when a reference is removed. This is a socket layer private 86 * interface. 87 * 88 * NOTE: In addition to socreate() and soclose(), which provide a single 89 * socket reference to the consumer to be managed as required, there are two 90 * calls to explicitly manage socket references, soref(), and sorele(). 91 * Currently, these are generally required only when transitioning a socket 92 * from a listen queue to a file descriptor, in order to prevent garbage 93 * collection of the socket at an untimely moment. For a number of reasons, 94 * these interfaces are not preferred, and should be avoided. 95 * 96 * NOTE: With regard to VNETs the general rule is that callers do not set 97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(), 98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn() 99 * and sorflush(), which are usually called from a pre-set VNET context. 100 * sopoll() currently does not need a VNET context to be set. 101 */ 102 103 #include <sys/cdefs.h> 104 __FBSDID("$FreeBSD$"); 105 106 #include "opt_inet.h" 107 #include "opt_inet6.h" 108 #include "opt_zero.h" 109 #include "opt_compat.h" 110 111 #include <sys/param.h> 112 #include <sys/systm.h> 113 #include <sys/fcntl.h> 114 #include <sys/limits.h> 115 #include <sys/lock.h> 116 #include <sys/mac.h> 117 #include <sys/malloc.h> 118 #include <sys/mbuf.h> 119 #include <sys/mutex.h> 120 #include <sys/domain.h> 121 #include <sys/file.h> /* for struct knote */ 122 #include <sys/kernel.h> 123 #include <sys/event.h> 124 #include <sys/eventhandler.h> 125 #include <sys/poll.h> 126 #include <sys/proc.h> 127 #include <sys/protosw.h> 128 #include <sys/socket.h> 129 #include <sys/socketvar.h> 130 #include <sys/resourcevar.h> 131 #include <net/route.h> 132 #include <sys/signalvar.h> 133 #include <sys/stat.h> 134 #include <sys/sx.h> 135 #include <sys/sysctl.h> 136 #include <sys/uio.h> 137 #include <sys/jail.h> 138 139 #include <net/vnet.h> 140 141 #include <security/mac/mac_framework.h> 142 143 #include <vm/uma.h> 144 145 #ifdef COMPAT_FREEBSD32 146 #include <sys/mount.h> 147 #include <sys/sysent.h> 148 #include <compat/freebsd32/freebsd32.h> 149 #endif 150 151 static int soreceive_rcvoob(struct socket *so, struct uio *uio, 152 int flags); 153 154 static void filt_sordetach(struct knote *kn); 155 static int filt_soread(struct knote *kn, long hint); 156 static void filt_sowdetach(struct knote *kn); 157 static int filt_sowrite(struct knote *kn, long hint); 158 static int filt_solisten(struct knote *kn, long hint); 159 160 static struct filterops solisten_filtops = { 161 .f_isfd = 1, 162 .f_detach = filt_sordetach, 163 .f_event = filt_solisten, 164 }; 165 static struct filterops soread_filtops = { 166 .f_isfd = 1, 167 .f_detach = filt_sordetach, 168 .f_event = filt_soread, 169 }; 170 static struct filterops sowrite_filtops = { 171 .f_isfd = 1, 172 .f_detach = filt_sowdetach, 173 .f_event = filt_sowrite, 174 }; 175 176 uma_zone_t socket_zone; 177 so_gen_t so_gencnt; /* generation count for sockets */ 178 179 int maxsockets; 180 181 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 182 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 183 184 #define VNET_SO_ASSERT(so) \ 185 VNET_ASSERT(curvnet != NULL, \ 186 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so))); 187 188 static int somaxconn = SOMAXCONN; 189 static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS); 190 /* XXX: we dont have SYSCTL_USHORT */ 191 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW, 192 0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection " 193 "queue size"); 194 static int numopensockets; 195 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 196 &numopensockets, 0, "Number of open sockets"); 197 #ifdef ZERO_COPY_SOCKETS 198 /* These aren't static because they're used in other files. */ 199 int so_zero_copy_send = 1; 200 int so_zero_copy_receive = 1; 201 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 202 "Zero copy controls"); 203 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 204 &so_zero_copy_receive, 0, "Enable zero copy receive"); 205 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 206 &so_zero_copy_send, 0, "Enable zero copy send"); 207 #endif /* ZERO_COPY_SOCKETS */ 208 209 /* 210 * accept_mtx locks down per-socket fields relating to accept queues. See 211 * socketvar.h for an annotation of the protected fields of struct socket. 212 */ 213 struct mtx accept_mtx; 214 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 215 216 /* 217 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 218 * so_gencnt field. 219 */ 220 static struct mtx so_global_mtx; 221 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 222 223 /* 224 * General IPC sysctl name space, used by sockets and a variety of other IPC 225 * types. 226 */ 227 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 228 229 /* 230 * Sysctl to get and set the maximum global sockets limit. Notify protocols 231 * of the change so that they can update their dependent limits as required. 232 */ 233 static int 234 sysctl_maxsockets(SYSCTL_HANDLER_ARGS) 235 { 236 int error, newmaxsockets; 237 238 newmaxsockets = maxsockets; 239 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req); 240 if (error == 0 && req->newptr) { 241 if (newmaxsockets > maxsockets) { 242 maxsockets = newmaxsockets; 243 if (maxsockets > ((maxfiles / 4) * 3)) { 244 maxfiles = (maxsockets * 5) / 4; 245 maxfilesperproc = (maxfiles * 9) / 10; 246 } 247 EVENTHANDLER_INVOKE(maxsockets_change); 248 } else 249 error = EINVAL; 250 } 251 return (error); 252 } 253 254 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, 255 &maxsockets, 0, sysctl_maxsockets, "IU", 256 "Maximum number of sockets avaliable"); 257 258 /* 259 * Initialise maxsockets. This SYSINIT must be run after 260 * tunable_mbinit(). 261 */ 262 static void 263 init_maxsockets(void *ignored) 264 { 265 266 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 267 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 268 } 269 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); 270 271 /* 272 * Socket operation routines. These routines are called by the routines in 273 * sys_socket.c or from a system process, and implement the semantics of 274 * socket operations by switching out to the protocol specific routines. 275 */ 276 277 /* 278 * Get a socket structure from our zone, and initialize it. Note that it 279 * would probably be better to allocate socket and PCB at the same time, but 280 * I'm not convinced that all the protocols can be easily modified to do 281 * this. 282 * 283 * soalloc() returns a socket with a ref count of 0. 284 */ 285 static struct socket * 286 soalloc(struct vnet *vnet) 287 { 288 struct socket *so; 289 290 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO); 291 if (so == NULL) 292 return (NULL); 293 #ifdef MAC 294 if (mac_socket_init(so, M_NOWAIT) != 0) { 295 uma_zfree(socket_zone, so); 296 return (NULL); 297 } 298 #endif 299 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 300 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 301 sx_init(&so->so_snd.sb_sx, "so_snd_sx"); 302 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx"); 303 TAILQ_INIT(&so->so_aiojobq); 304 mtx_lock(&so_global_mtx); 305 so->so_gencnt = ++so_gencnt; 306 ++numopensockets; 307 #ifdef VIMAGE 308 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p", 309 __func__, __LINE__, so)); 310 vnet->vnet_sockcnt++; 311 so->so_vnet = vnet; 312 #endif 313 mtx_unlock(&so_global_mtx); 314 return (so); 315 } 316 317 /* 318 * Free the storage associated with a socket at the socket layer, tear down 319 * locks, labels, etc. All protocol state is assumed already to have been 320 * torn down (and possibly never set up) by the caller. 321 */ 322 static void 323 sodealloc(struct socket *so) 324 { 325 326 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 327 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); 328 329 mtx_lock(&so_global_mtx); 330 so->so_gencnt = ++so_gencnt; 331 --numopensockets; /* Could be below, but faster here. */ 332 #ifdef VIMAGE 333 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p", 334 __func__, __LINE__, so)); 335 so->so_vnet->vnet_sockcnt--; 336 #endif 337 mtx_unlock(&so_global_mtx); 338 if (so->so_rcv.sb_hiwat) 339 (void)chgsbsize(so->so_cred->cr_uidinfo, 340 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 341 if (so->so_snd.sb_hiwat) 342 (void)chgsbsize(so->so_cred->cr_uidinfo, 343 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 344 #ifdef INET 345 /* remove acccept filter if one is present. */ 346 if (so->so_accf != NULL) 347 do_setopt_accept_filter(so, NULL); 348 #endif 349 #ifdef MAC 350 mac_socket_destroy(so); 351 #endif 352 crfree(so->so_cred); 353 sx_destroy(&so->so_snd.sb_sx); 354 sx_destroy(&so->so_rcv.sb_sx); 355 SOCKBUF_LOCK_DESTROY(&so->so_snd); 356 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 357 uma_zfree(socket_zone, so); 358 } 359 360 /* 361 * socreate returns a socket with a ref count of 1. The socket should be 362 * closed with soclose(). 363 */ 364 int 365 socreate(int dom, struct socket **aso, int type, int proto, 366 struct ucred *cred, struct thread *td) 367 { 368 struct protosw *prp; 369 struct socket *so; 370 int error; 371 372 if (proto) 373 prp = pffindproto(dom, proto, type); 374 else 375 prp = pffindtype(dom, type); 376 377 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL || 378 prp->pr_usrreqs->pru_attach == pru_attach_notsupp) 379 return (EPROTONOSUPPORT); 380 381 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0) 382 return (EPROTONOSUPPORT); 383 384 if (prp->pr_type != type) 385 return (EPROTOTYPE); 386 so = soalloc(CRED_TO_VNET(cred)); 387 if (so == NULL) 388 return (ENOBUFS); 389 390 TAILQ_INIT(&so->so_incomp); 391 TAILQ_INIT(&so->so_comp); 392 so->so_type = type; 393 so->so_cred = crhold(cred); 394 if ((prp->pr_domain->dom_family == PF_INET) || 395 (prp->pr_domain->dom_family == PF_INET6) || 396 (prp->pr_domain->dom_family == PF_ROUTE)) 397 so->so_fibnum = td->td_proc->p_fibnum; 398 else 399 so->so_fibnum = 0; 400 so->so_proto = prp; 401 #ifdef MAC 402 mac_socket_create(cred, so); 403 #endif 404 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv)); 405 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd)); 406 so->so_count = 1; 407 /* 408 * Auto-sizing of socket buffers is managed by the protocols and 409 * the appropriate flags must be set in the pru_attach function. 410 */ 411 CURVNET_SET(so->so_vnet); 412 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 413 CURVNET_RESTORE(); 414 if (error) { 415 KASSERT(so->so_count == 1, ("socreate: so_count %d", 416 so->so_count)); 417 so->so_count = 0; 418 sodealloc(so); 419 return (error); 420 } 421 *aso = so; 422 return (0); 423 } 424 425 #ifdef REGRESSION 426 static int regression_sonewconn_earlytest = 1; 427 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, 428 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); 429 #endif 430 431 /* 432 * When an attempt at a new connection is noted on a socket which accepts 433 * connections, sonewconn is called. If the connection is possible (subject 434 * to space constraints, etc.) then we allocate a new structure, propoerly 435 * linked into the data structure of the original socket, and return this. 436 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 437 * 438 * Note: the ref count on the socket is 0 on return. 439 */ 440 struct socket * 441 sonewconn(struct socket *head, int connstatus) 442 { 443 struct socket *so; 444 int over; 445 446 ACCEPT_LOCK(); 447 over = (head->so_qlen > 3 * head->so_qlimit / 2); 448 ACCEPT_UNLOCK(); 449 #ifdef REGRESSION 450 if (regression_sonewconn_earlytest && over) 451 #else 452 if (over) 453 #endif 454 return (NULL); 455 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p", 456 __func__, __LINE__, head)); 457 so = soalloc(head->so_vnet); 458 if (so == NULL) 459 return (NULL); 460 if ((head->so_options & SO_ACCEPTFILTER) != 0) 461 connstatus = 0; 462 so->so_head = head; 463 so->so_type = head->so_type; 464 so->so_options = head->so_options &~ SO_ACCEPTCONN; 465 so->so_linger = head->so_linger; 466 so->so_state = head->so_state | SS_NOFDREF; 467 so->so_fibnum = head->so_fibnum; 468 so->so_proto = head->so_proto; 469 so->so_cred = crhold(head->so_cred); 470 #ifdef MAC 471 mac_socket_newconn(head, so); 472 #endif 473 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv)); 474 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd)); 475 VNET_SO_ASSERT(head); 476 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) || 477 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { 478 sodealloc(so); 479 return (NULL); 480 } 481 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat; 482 so->so_snd.sb_lowat = head->so_snd.sb_lowat; 483 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo; 484 so->so_snd.sb_timeo = head->so_snd.sb_timeo; 485 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE; 486 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE; 487 so->so_state |= connstatus; 488 ACCEPT_LOCK(); 489 if (connstatus) { 490 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 491 so->so_qstate |= SQ_COMP; 492 head->so_qlen++; 493 } else { 494 /* 495 * Keep removing sockets from the head until there's room for 496 * us to insert on the tail. In pre-locking revisions, this 497 * was a simple if(), but as we could be racing with other 498 * threads and soabort() requires dropping locks, we must 499 * loop waiting for the condition to be true. 500 */ 501 while (head->so_incqlen > head->so_qlimit) { 502 struct socket *sp; 503 sp = TAILQ_FIRST(&head->so_incomp); 504 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 505 head->so_incqlen--; 506 sp->so_qstate &= ~SQ_INCOMP; 507 sp->so_head = NULL; 508 ACCEPT_UNLOCK(); 509 soabort(sp); 510 ACCEPT_LOCK(); 511 } 512 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 513 so->so_qstate |= SQ_INCOMP; 514 head->so_incqlen++; 515 } 516 ACCEPT_UNLOCK(); 517 if (connstatus) { 518 sorwakeup(head); 519 wakeup_one(&head->so_timeo); 520 } 521 return (so); 522 } 523 524 int 525 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 526 { 527 int error; 528 529 CURVNET_SET(so->so_vnet); 530 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td); 531 CURVNET_RESTORE(); 532 return error; 533 } 534 535 /* 536 * solisten() transitions a socket from a non-listening state to a listening 537 * state, but can also be used to update the listen queue depth on an 538 * existing listen socket. The protocol will call back into the sockets 539 * layer using solisten_proto_check() and solisten_proto() to check and set 540 * socket-layer listen state. Call backs are used so that the protocol can 541 * acquire both protocol and socket layer locks in whatever order is required 542 * by the protocol. 543 * 544 * Protocol implementors are advised to hold the socket lock across the 545 * socket-layer test and set to avoid races at the socket layer. 546 */ 547 int 548 solisten(struct socket *so, int backlog, struct thread *td) 549 { 550 int error; 551 552 CURVNET_SET(so->so_vnet); 553 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td); 554 CURVNET_RESTORE(); 555 return error; 556 } 557 558 int 559 solisten_proto_check(struct socket *so) 560 { 561 562 SOCK_LOCK_ASSERT(so); 563 564 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 565 SS_ISDISCONNECTING)) 566 return (EINVAL); 567 return (0); 568 } 569 570 void 571 solisten_proto(struct socket *so, int backlog) 572 { 573 574 SOCK_LOCK_ASSERT(so); 575 576 if (backlog < 0 || backlog > somaxconn) 577 backlog = somaxconn; 578 so->so_qlimit = backlog; 579 so->so_options |= SO_ACCEPTCONN; 580 } 581 582 /* 583 * Evaluate the reference count and named references on a socket; if no 584 * references remain, free it. This should be called whenever a reference is 585 * released, such as in sorele(), but also when named reference flags are 586 * cleared in socket or protocol code. 587 * 588 * sofree() will free the socket if: 589 * 590 * - There are no outstanding file descriptor references or related consumers 591 * (so_count == 0). 592 * 593 * - The socket has been closed by user space, if ever open (SS_NOFDREF). 594 * 595 * - The protocol does not have an outstanding strong reference on the socket 596 * (SS_PROTOREF). 597 * 598 * - The socket is not in a completed connection queue, so a process has been 599 * notified that it is present. If it is removed, the user process may 600 * block in accept() despite select() saying the socket was ready. 601 */ 602 void 603 sofree(struct socket *so) 604 { 605 struct protosw *pr = so->so_proto; 606 struct socket *head; 607 608 ACCEPT_LOCK_ASSERT(); 609 SOCK_LOCK_ASSERT(so); 610 611 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 || 612 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) { 613 SOCK_UNLOCK(so); 614 ACCEPT_UNLOCK(); 615 return; 616 } 617 618 head = so->so_head; 619 if (head != NULL) { 620 KASSERT((so->so_qstate & SQ_COMP) != 0 || 621 (so->so_qstate & SQ_INCOMP) != 0, 622 ("sofree: so_head != NULL, but neither SQ_COMP nor " 623 "SQ_INCOMP")); 624 KASSERT((so->so_qstate & SQ_COMP) == 0 || 625 (so->so_qstate & SQ_INCOMP) == 0, 626 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 627 TAILQ_REMOVE(&head->so_incomp, so, so_list); 628 head->so_incqlen--; 629 so->so_qstate &= ~SQ_INCOMP; 630 so->so_head = NULL; 631 } 632 KASSERT((so->so_qstate & SQ_COMP) == 0 && 633 (so->so_qstate & SQ_INCOMP) == 0, 634 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 635 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 636 if (so->so_options & SO_ACCEPTCONN) { 637 KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated")); 638 KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_incomp populated")); 639 } 640 SOCK_UNLOCK(so); 641 ACCEPT_UNLOCK(); 642 643 VNET_SO_ASSERT(so); 644 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 645 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb); 646 if (pr->pr_usrreqs->pru_detach != NULL) 647 (*pr->pr_usrreqs->pru_detach)(so); 648 649 /* 650 * From this point on, we assume that no other references to this 651 * socket exist anywhere else in the stack. Therefore, no locks need 652 * to be acquired or held. 653 * 654 * We used to do a lot of socket buffer and socket locking here, as 655 * well as invoke sorflush() and perform wakeups. The direct call to 656 * dom_dispose() and sbrelease_internal() are an inlining of what was 657 * necessary from sorflush(). 658 * 659 * Notice that the socket buffer and kqueue state are torn down 660 * before calling pru_detach. This means that protocols shold not 661 * assume they can perform socket wakeups, etc, in their detach code. 662 */ 663 sbdestroy(&so->so_snd, so); 664 sbdestroy(&so->so_rcv, so); 665 seldrain(&so->so_snd.sb_sel); 666 seldrain(&so->so_rcv.sb_sel); 667 knlist_destroy(&so->so_rcv.sb_sel.si_note); 668 knlist_destroy(&so->so_snd.sb_sel.si_note); 669 sodealloc(so); 670 } 671 672 /* 673 * Close a socket on last file table reference removal. Initiate disconnect 674 * if connected. Free socket when disconnect complete. 675 * 676 * This function will sorele() the socket. Note that soclose() may be called 677 * prior to the ref count reaching zero. The actual socket structure will 678 * not be freed until the ref count reaches zero. 679 */ 680 int 681 soclose(struct socket *so) 682 { 683 int error = 0; 684 685 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); 686 687 CURVNET_SET(so->so_vnet); 688 funsetown(&so->so_sigio); 689 if (so->so_state & SS_ISCONNECTED) { 690 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 691 error = sodisconnect(so); 692 if (error) { 693 if (error == ENOTCONN) 694 error = 0; 695 goto drop; 696 } 697 } 698 if (so->so_options & SO_LINGER) { 699 if ((so->so_state & SS_ISDISCONNECTING) && 700 (so->so_state & SS_NBIO)) 701 goto drop; 702 while (so->so_state & SS_ISCONNECTED) { 703 error = tsleep(&so->so_timeo, 704 PSOCK | PCATCH, "soclos", so->so_linger * hz); 705 if (error) 706 break; 707 } 708 } 709 } 710 711 drop: 712 if (so->so_proto->pr_usrreqs->pru_close != NULL) 713 (*so->so_proto->pr_usrreqs->pru_close)(so); 714 if (so->so_options & SO_ACCEPTCONN) { 715 struct socket *sp; 716 ACCEPT_LOCK(); 717 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 718 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 719 so->so_incqlen--; 720 sp->so_qstate &= ~SQ_INCOMP; 721 sp->so_head = NULL; 722 ACCEPT_UNLOCK(); 723 soabort(sp); 724 ACCEPT_LOCK(); 725 } 726 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 727 TAILQ_REMOVE(&so->so_comp, sp, so_list); 728 so->so_qlen--; 729 sp->so_qstate &= ~SQ_COMP; 730 sp->so_head = NULL; 731 ACCEPT_UNLOCK(); 732 soabort(sp); 733 ACCEPT_LOCK(); 734 } 735 ACCEPT_UNLOCK(); 736 } 737 ACCEPT_LOCK(); 738 SOCK_LOCK(so); 739 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 740 so->so_state |= SS_NOFDREF; 741 sorele(so); 742 CURVNET_RESTORE(); 743 return (error); 744 } 745 746 /* 747 * soabort() is used to abruptly tear down a connection, such as when a 748 * resource limit is reached (listen queue depth exceeded), or if a listen 749 * socket is closed while there are sockets waiting to be accepted. 750 * 751 * This interface is tricky, because it is called on an unreferenced socket, 752 * and must be called only by a thread that has actually removed the socket 753 * from the listen queue it was on, or races with other threads are risked. 754 * 755 * This interface will call into the protocol code, so must not be called 756 * with any socket locks held. Protocols do call it while holding their own 757 * recursible protocol mutexes, but this is something that should be subject 758 * to review in the future. 759 */ 760 void 761 soabort(struct socket *so) 762 { 763 764 /* 765 * In as much as is possible, assert that no references to this 766 * socket are held. This is not quite the same as asserting that the 767 * current thread is responsible for arranging for no references, but 768 * is as close as we can get for now. 769 */ 770 KASSERT(so->so_count == 0, ("soabort: so_count")); 771 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF")); 772 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF")); 773 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP")); 774 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP")); 775 VNET_SO_ASSERT(so); 776 777 if (so->so_proto->pr_usrreqs->pru_abort != NULL) 778 (*so->so_proto->pr_usrreqs->pru_abort)(so); 779 ACCEPT_LOCK(); 780 SOCK_LOCK(so); 781 sofree(so); 782 } 783 784 int 785 soaccept(struct socket *so, struct sockaddr **nam) 786 { 787 int error; 788 789 SOCK_LOCK(so); 790 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 791 so->so_state &= ~SS_NOFDREF; 792 SOCK_UNLOCK(so); 793 794 CURVNET_SET(so->so_vnet); 795 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 796 CURVNET_RESTORE(); 797 return (error); 798 } 799 800 int 801 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 802 { 803 int error; 804 805 if (so->so_options & SO_ACCEPTCONN) 806 return (EOPNOTSUPP); 807 808 CURVNET_SET(so->so_vnet); 809 /* 810 * If protocol is connection-based, can only connect once. 811 * Otherwise, if connected, try to disconnect first. This allows 812 * user to disconnect by connecting to, e.g., a null address. 813 */ 814 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 815 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 816 (error = sodisconnect(so)))) { 817 error = EISCONN; 818 } else { 819 /* 820 * Prevent accumulated error from previous connection from 821 * biting us. 822 */ 823 so->so_error = 0; 824 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 825 } 826 CURVNET_RESTORE(); 827 828 return (error); 829 } 830 831 int 832 soconnect2(struct socket *so1, struct socket *so2) 833 { 834 int error; 835 836 CURVNET_SET(so1->so_vnet); 837 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2); 838 CURVNET_RESTORE(); 839 return (error); 840 } 841 842 int 843 sodisconnect(struct socket *so) 844 { 845 int error; 846 847 if ((so->so_state & SS_ISCONNECTED) == 0) 848 return (ENOTCONN); 849 if (so->so_state & SS_ISDISCONNECTING) 850 return (EALREADY); 851 VNET_SO_ASSERT(so); 852 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 853 return (error); 854 } 855 856 #ifdef ZERO_COPY_SOCKETS 857 struct so_zerocopy_stats{ 858 int size_ok; 859 int align_ok; 860 int found_ifp; 861 }; 862 struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 863 #include <netinet/in.h> 864 #include <net/route.h> 865 #include <netinet/in_pcb.h> 866 #include <vm/vm.h> 867 #include <vm/vm_page.h> 868 #include <vm/vm_object.h> 869 870 /* 871 * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise 872 * sosend_dgram() and sosend_generic() use m_uiotombuf(). 873 * 874 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or 875 * all of the data referenced by the uio. If desired, it uses zero-copy. 876 * *space will be updated to reflect data copied in. 877 * 878 * NB: If atomic I/O is requested, the caller must already have checked that 879 * space can hold resid bytes. 880 * 881 * NB: In the event of an error, the caller may need to free the partial 882 * chain pointed to by *mpp. The contents of both *uio and *space may be 883 * modified even in the case of an error. 884 */ 885 static int 886 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space, 887 int flags) 888 { 889 struct mbuf *m, **mp, *top; 890 long len; 891 ssize_t resid; 892 int error; 893 #ifdef ZERO_COPY_SOCKETS 894 int cow_send; 895 #endif 896 897 *retmp = top = NULL; 898 mp = ⊤ 899 len = 0; 900 resid = uio->uio_resid; 901 error = 0; 902 do { 903 #ifdef ZERO_COPY_SOCKETS 904 cow_send = 0; 905 #endif /* ZERO_COPY_SOCKETS */ 906 if (resid >= MINCLSIZE) { 907 #ifdef ZERO_COPY_SOCKETS 908 if (top == NULL) { 909 m = m_gethdr(M_WAITOK, MT_DATA); 910 m->m_pkthdr.len = 0; 911 m->m_pkthdr.rcvif = NULL; 912 } else 913 m = m_get(M_WAITOK, MT_DATA); 914 if (so_zero_copy_send && 915 resid>=PAGE_SIZE && 916 *space>=PAGE_SIZE && 917 uio->uio_iov->iov_len>=PAGE_SIZE) { 918 so_zerocp_stats.size_ok++; 919 so_zerocp_stats.align_ok++; 920 cow_send = socow_setup(m, uio); 921 len = cow_send; 922 } 923 if (!cow_send) { 924 m_clget(m, M_WAITOK); 925 len = min(min(MCLBYTES, resid), *space); 926 } 927 #else /* ZERO_COPY_SOCKETS */ 928 if (top == NULL) { 929 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 930 m->m_pkthdr.len = 0; 931 m->m_pkthdr.rcvif = NULL; 932 } else 933 m = m_getcl(M_WAIT, MT_DATA, 0); 934 len = min(min(MCLBYTES, resid), *space); 935 #endif /* ZERO_COPY_SOCKETS */ 936 } else { 937 if (top == NULL) { 938 m = m_gethdr(M_WAIT, MT_DATA); 939 m->m_pkthdr.len = 0; 940 m->m_pkthdr.rcvif = NULL; 941 942 len = min(min(MHLEN, resid), *space); 943 /* 944 * For datagram protocols, leave room 945 * for protocol headers in first mbuf. 946 */ 947 if (atomic && m && len < MHLEN) 948 MH_ALIGN(m, len); 949 } else { 950 m = m_get(M_WAIT, MT_DATA); 951 len = min(min(MLEN, resid), *space); 952 } 953 } 954 if (m == NULL) { 955 error = ENOBUFS; 956 goto out; 957 } 958 959 *space -= len; 960 #ifdef ZERO_COPY_SOCKETS 961 if (cow_send) 962 error = 0; 963 else 964 #endif /* ZERO_COPY_SOCKETS */ 965 error = uiomove(mtod(m, void *), (int)len, uio); 966 resid = uio->uio_resid; 967 m->m_len = len; 968 *mp = m; 969 top->m_pkthdr.len += len; 970 if (error) 971 goto out; 972 mp = &m->m_next; 973 if (resid <= 0) { 974 if (flags & MSG_EOR) 975 top->m_flags |= M_EOR; 976 break; 977 } 978 } while (*space > 0 && atomic); 979 out: 980 *retmp = top; 981 return (error); 982 } 983 #endif /*ZERO_COPY_SOCKETS*/ 984 985 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 986 987 int 988 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, 989 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 990 { 991 long space; 992 ssize_t resid; 993 int clen = 0, error, dontroute; 994 #ifdef ZERO_COPY_SOCKETS 995 int atomic = sosendallatonce(so) || top; 996 #endif 997 998 KASSERT(so->so_type == SOCK_DGRAM, ("sosend_dgram: !SOCK_DGRAM")); 999 KASSERT(so->so_proto->pr_flags & PR_ATOMIC, 1000 ("sosend_dgram: !PR_ATOMIC")); 1001 1002 if (uio != NULL) 1003 resid = uio->uio_resid; 1004 else 1005 resid = top->m_pkthdr.len; 1006 /* 1007 * In theory resid should be unsigned. However, space must be 1008 * signed, as it might be less than 0 if we over-committed, and we 1009 * must use a signed comparison of space and resid. On the other 1010 * hand, a negative resid causes us to loop sending 0-length 1011 * segments to the protocol. 1012 */ 1013 if (resid < 0) { 1014 error = EINVAL; 1015 goto out; 1016 } 1017 1018 dontroute = 1019 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; 1020 if (td != NULL) 1021 td->td_ru.ru_msgsnd++; 1022 if (control != NULL) 1023 clen = control->m_len; 1024 1025 SOCKBUF_LOCK(&so->so_snd); 1026 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1027 SOCKBUF_UNLOCK(&so->so_snd); 1028 error = EPIPE; 1029 goto out; 1030 } 1031 if (so->so_error) { 1032 error = so->so_error; 1033 so->so_error = 0; 1034 SOCKBUF_UNLOCK(&so->so_snd); 1035 goto out; 1036 } 1037 if ((so->so_state & SS_ISCONNECTED) == 0) { 1038 /* 1039 * `sendto' and `sendmsg' is allowed on a connection-based 1040 * socket if it supports implied connect. Return ENOTCONN if 1041 * not connected and no address is supplied. 1042 */ 1043 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1044 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1045 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1046 !(resid == 0 && clen != 0)) { 1047 SOCKBUF_UNLOCK(&so->so_snd); 1048 error = ENOTCONN; 1049 goto out; 1050 } 1051 } else if (addr == NULL) { 1052 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1053 error = ENOTCONN; 1054 else 1055 error = EDESTADDRREQ; 1056 SOCKBUF_UNLOCK(&so->so_snd); 1057 goto out; 1058 } 1059 } 1060 1061 /* 1062 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a 1063 * problem and need fixing. 1064 */ 1065 space = sbspace(&so->so_snd); 1066 if (flags & MSG_OOB) 1067 space += 1024; 1068 space -= clen; 1069 SOCKBUF_UNLOCK(&so->so_snd); 1070 if (resid > space) { 1071 error = EMSGSIZE; 1072 goto out; 1073 } 1074 if (uio == NULL) { 1075 resid = 0; 1076 if (flags & MSG_EOR) 1077 top->m_flags |= M_EOR; 1078 } else { 1079 #ifdef ZERO_COPY_SOCKETS 1080 error = sosend_copyin(uio, &top, atomic, &space, flags); 1081 if (error) 1082 goto out; 1083 #else 1084 /* 1085 * Copy the data from userland into a mbuf chain. 1086 * If no data is to be copied in, a single empty mbuf 1087 * is returned. 1088 */ 1089 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr, 1090 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0))); 1091 if (top == NULL) { 1092 error = EFAULT; /* only possible error */ 1093 goto out; 1094 } 1095 space -= resid - uio->uio_resid; 1096 #endif 1097 resid = uio->uio_resid; 1098 } 1099 KASSERT(resid == 0, ("sosend_dgram: resid != 0")); 1100 /* 1101 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock 1102 * than with. 1103 */ 1104 if (dontroute) { 1105 SOCK_LOCK(so); 1106 so->so_options |= SO_DONTROUTE; 1107 SOCK_UNLOCK(so); 1108 } 1109 /* 1110 * XXX all the SBS_CANTSENDMORE checks previously done could be out 1111 * of date. We could have recieved a reset packet in an interrupt or 1112 * maybe we slept while doing page faults in uiomove() etc. We could 1113 * probably recheck again inside the locking protection here, but 1114 * there are probably other places that this also happens. We must 1115 * rethink this. 1116 */ 1117 VNET_SO_ASSERT(so); 1118 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1119 (flags & MSG_OOB) ? PRUS_OOB : 1120 /* 1121 * If the user set MSG_EOF, the protocol understands this flag and 1122 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND. 1123 */ 1124 ((flags & MSG_EOF) && 1125 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1126 (resid <= 0)) ? 1127 PRUS_EOF : 1128 /* If there is more to send set PRUS_MORETOCOME */ 1129 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1130 top, addr, control, td); 1131 if (dontroute) { 1132 SOCK_LOCK(so); 1133 so->so_options &= ~SO_DONTROUTE; 1134 SOCK_UNLOCK(so); 1135 } 1136 clen = 0; 1137 control = NULL; 1138 top = NULL; 1139 out: 1140 if (top != NULL) 1141 m_freem(top); 1142 if (control != NULL) 1143 m_freem(control); 1144 return (error); 1145 } 1146 1147 /* 1148 * Send on a socket. If send must go all at once and message is larger than 1149 * send buffering, then hard error. Lock against other senders. If must go 1150 * all at once and not enough room now, then inform user that this would 1151 * block and do nothing. Otherwise, if nonblocking, send as much as 1152 * possible. The data to be sent is described by "uio" if nonzero, otherwise 1153 * by the mbuf chain "top" (which must be null if uio is not). Data provided 1154 * in mbuf chain must be small enough to send all at once. 1155 * 1156 * Returns nonzero on error, timeout or signal; callers must check for short 1157 * counts if EINTR/ERESTART are returned. Data and control buffers are freed 1158 * on return. 1159 */ 1160 int 1161 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, 1162 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1163 { 1164 long space; 1165 ssize_t resid; 1166 int clen = 0, error, dontroute; 1167 int atomic = sosendallatonce(so) || top; 1168 1169 if (uio != NULL) 1170 resid = uio->uio_resid; 1171 else 1172 resid = top->m_pkthdr.len; 1173 /* 1174 * In theory resid should be unsigned. However, space must be 1175 * signed, as it might be less than 0 if we over-committed, and we 1176 * must use a signed comparison of space and resid. On the other 1177 * hand, a negative resid causes us to loop sending 0-length 1178 * segments to the protocol. 1179 * 1180 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 1181 * type sockets since that's an error. 1182 */ 1183 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 1184 error = EINVAL; 1185 goto out; 1186 } 1187 1188 dontroute = 1189 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 1190 (so->so_proto->pr_flags & PR_ATOMIC); 1191 if (td != NULL) 1192 td->td_ru.ru_msgsnd++; 1193 if (control != NULL) 1194 clen = control->m_len; 1195 1196 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1197 if (error) 1198 goto out; 1199 1200 restart: 1201 do { 1202 SOCKBUF_LOCK(&so->so_snd); 1203 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1204 SOCKBUF_UNLOCK(&so->so_snd); 1205 error = EPIPE; 1206 goto release; 1207 } 1208 if (so->so_error) { 1209 error = so->so_error; 1210 so->so_error = 0; 1211 SOCKBUF_UNLOCK(&so->so_snd); 1212 goto release; 1213 } 1214 if ((so->so_state & SS_ISCONNECTED) == 0) { 1215 /* 1216 * `sendto' and `sendmsg' is allowed on a connection- 1217 * based socket if it supports implied connect. 1218 * Return ENOTCONN if not connected and no address is 1219 * supplied. 1220 */ 1221 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1222 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1223 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1224 !(resid == 0 && clen != 0)) { 1225 SOCKBUF_UNLOCK(&so->so_snd); 1226 error = ENOTCONN; 1227 goto release; 1228 } 1229 } else if (addr == NULL) { 1230 SOCKBUF_UNLOCK(&so->so_snd); 1231 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1232 error = ENOTCONN; 1233 else 1234 error = EDESTADDRREQ; 1235 goto release; 1236 } 1237 } 1238 space = sbspace(&so->so_snd); 1239 if (flags & MSG_OOB) 1240 space += 1024; 1241 if ((atomic && resid > so->so_snd.sb_hiwat) || 1242 clen > so->so_snd.sb_hiwat) { 1243 SOCKBUF_UNLOCK(&so->so_snd); 1244 error = EMSGSIZE; 1245 goto release; 1246 } 1247 if (space < resid + clen && 1248 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 1249 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) { 1250 SOCKBUF_UNLOCK(&so->so_snd); 1251 error = EWOULDBLOCK; 1252 goto release; 1253 } 1254 error = sbwait(&so->so_snd); 1255 SOCKBUF_UNLOCK(&so->so_snd); 1256 if (error) 1257 goto release; 1258 goto restart; 1259 } 1260 SOCKBUF_UNLOCK(&so->so_snd); 1261 space -= clen; 1262 do { 1263 if (uio == NULL) { 1264 resid = 0; 1265 if (flags & MSG_EOR) 1266 top->m_flags |= M_EOR; 1267 } else { 1268 #ifdef ZERO_COPY_SOCKETS 1269 error = sosend_copyin(uio, &top, atomic, 1270 &space, flags); 1271 if (error != 0) 1272 goto release; 1273 #else 1274 /* 1275 * Copy the data from userland into a mbuf 1276 * chain. If no data is to be copied in, 1277 * a single empty mbuf is returned. 1278 */ 1279 top = m_uiotombuf(uio, M_WAITOK, space, 1280 (atomic ? max_hdr : 0), 1281 (atomic ? M_PKTHDR : 0) | 1282 ((flags & MSG_EOR) ? M_EOR : 0)); 1283 if (top == NULL) { 1284 error = EFAULT; /* only possible error */ 1285 goto release; 1286 } 1287 space -= resid - uio->uio_resid; 1288 #endif 1289 resid = uio->uio_resid; 1290 } 1291 if (dontroute) { 1292 SOCK_LOCK(so); 1293 so->so_options |= SO_DONTROUTE; 1294 SOCK_UNLOCK(so); 1295 } 1296 /* 1297 * XXX all the SBS_CANTSENDMORE checks previously 1298 * done could be out of date. We could have recieved 1299 * a reset packet in an interrupt or maybe we slept 1300 * while doing page faults in uiomove() etc. We 1301 * could probably recheck again inside the locking 1302 * protection here, but there are probably other 1303 * places that this also happens. We must rethink 1304 * this. 1305 */ 1306 VNET_SO_ASSERT(so); 1307 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1308 (flags & MSG_OOB) ? PRUS_OOB : 1309 /* 1310 * If the user set MSG_EOF, the protocol understands 1311 * this flag and nothing left to send then use 1312 * PRU_SEND_EOF instead of PRU_SEND. 1313 */ 1314 ((flags & MSG_EOF) && 1315 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1316 (resid <= 0)) ? 1317 PRUS_EOF : 1318 /* If there is more to send set PRUS_MORETOCOME. */ 1319 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1320 top, addr, control, td); 1321 if (dontroute) { 1322 SOCK_LOCK(so); 1323 so->so_options &= ~SO_DONTROUTE; 1324 SOCK_UNLOCK(so); 1325 } 1326 clen = 0; 1327 control = NULL; 1328 top = NULL; 1329 if (error) 1330 goto release; 1331 } while (resid && space > 0); 1332 } while (resid); 1333 1334 release: 1335 sbunlock(&so->so_snd); 1336 out: 1337 if (top != NULL) 1338 m_freem(top); 1339 if (control != NULL) 1340 m_freem(control); 1341 return (error); 1342 } 1343 1344 int 1345 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 1346 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1347 { 1348 int error; 1349 1350 CURVNET_SET(so->so_vnet); 1351 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top, 1352 control, flags, td); 1353 CURVNET_RESTORE(); 1354 return (error); 1355 } 1356 1357 /* 1358 * The part of soreceive() that implements reading non-inline out-of-band 1359 * data from a socket. For more complete comments, see soreceive(), from 1360 * which this code originated. 1361 * 1362 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is 1363 * unable to return an mbuf chain to the caller. 1364 */ 1365 static int 1366 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) 1367 { 1368 struct protosw *pr = so->so_proto; 1369 struct mbuf *m; 1370 int error; 1371 1372 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 1373 VNET_SO_ASSERT(so); 1374 1375 m = m_get(M_WAIT, MT_DATA); 1376 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 1377 if (error) 1378 goto bad; 1379 do { 1380 #ifdef ZERO_COPY_SOCKETS 1381 if (so_zero_copy_receive) { 1382 int disposable; 1383 1384 if ((m->m_flags & M_EXT) 1385 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1386 disposable = 1; 1387 else 1388 disposable = 0; 1389 1390 error = uiomoveco(mtod(m, void *), 1391 min(uio->uio_resid, m->m_len), 1392 uio, disposable); 1393 } else 1394 #endif /* ZERO_COPY_SOCKETS */ 1395 error = uiomove(mtod(m, void *), 1396 (int) min(uio->uio_resid, m->m_len), uio); 1397 m = m_free(m); 1398 } while (uio->uio_resid && error == 0 && m); 1399 bad: 1400 if (m != NULL) 1401 m_freem(m); 1402 return (error); 1403 } 1404 1405 /* 1406 * Following replacement or removal of the first mbuf on the first mbuf chain 1407 * of a socket buffer, push necessary state changes back into the socket 1408 * buffer so that other consumers see the values consistently. 'nextrecord' 1409 * is the callers locally stored value of the original value of 1410 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 1411 * NOTE: 'nextrecord' may be NULL. 1412 */ 1413 static __inline void 1414 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 1415 { 1416 1417 SOCKBUF_LOCK_ASSERT(sb); 1418 /* 1419 * First, update for the new value of nextrecord. If necessary, make 1420 * it the first record. 1421 */ 1422 if (sb->sb_mb != NULL) 1423 sb->sb_mb->m_nextpkt = nextrecord; 1424 else 1425 sb->sb_mb = nextrecord; 1426 1427 /* 1428 * Now update any dependent socket buffer fields to reflect the new 1429 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 1430 * addition of a second clause that takes care of the case where 1431 * sb_mb has been updated, but remains the last record. 1432 */ 1433 if (sb->sb_mb == NULL) { 1434 sb->sb_mbtail = NULL; 1435 sb->sb_lastrecord = NULL; 1436 } else if (sb->sb_mb->m_nextpkt == NULL) 1437 sb->sb_lastrecord = sb->sb_mb; 1438 } 1439 1440 1441 /* 1442 * Implement receive operations on a socket. We depend on the way that 1443 * records are added to the sockbuf by sbappend. In particular, each record 1444 * (mbufs linked through m_next) must begin with an address if the protocol 1445 * so specifies, followed by an optional mbuf or mbufs containing ancillary 1446 * data, and then zero or more mbufs of data. In order to allow parallelism 1447 * between network receive and copying to user space, as well as avoid 1448 * sleeping with a mutex held, we release the socket buffer mutex during the 1449 * user space copy. Although the sockbuf is locked, new data may still be 1450 * appended, and thus we must maintain consistency of the sockbuf during that 1451 * time. 1452 * 1453 * The caller may receive the data as a single mbuf chain by supplying an 1454 * mbuf **mp0 for use in returning the chain. The uio is then used only for 1455 * the count in uio_resid. 1456 */ 1457 int 1458 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio, 1459 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1460 { 1461 struct mbuf *m, **mp; 1462 int flags, error, offset; 1463 ssize_t len; 1464 struct protosw *pr = so->so_proto; 1465 struct mbuf *nextrecord; 1466 int moff, type = 0; 1467 ssize_t orig_resid = uio->uio_resid; 1468 1469 mp = mp0; 1470 if (psa != NULL) 1471 *psa = NULL; 1472 if (controlp != NULL) 1473 *controlp = NULL; 1474 if (flagsp != NULL) 1475 flags = *flagsp &~ MSG_EOR; 1476 else 1477 flags = 0; 1478 if (flags & MSG_OOB) 1479 return (soreceive_rcvoob(so, uio, flags)); 1480 if (mp != NULL) 1481 *mp = NULL; 1482 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) 1483 && uio->uio_resid) { 1484 VNET_SO_ASSERT(so); 1485 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 1486 } 1487 1488 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 1489 if (error) 1490 return (error); 1491 1492 restart: 1493 SOCKBUF_LOCK(&so->so_rcv); 1494 m = so->so_rcv.sb_mb; 1495 /* 1496 * If we have less data than requested, block awaiting more (subject 1497 * to any timeout) if: 1498 * 1. the current count is less than the low water mark, or 1499 * 2. MSG_DONTWAIT is not set 1500 */ 1501 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1502 so->so_rcv.sb_cc < uio->uio_resid) && 1503 so->so_rcv.sb_cc < so->so_rcv.sb_lowat && 1504 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1505 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1506 ("receive: m == %p so->so_rcv.sb_cc == %u", 1507 m, so->so_rcv.sb_cc)); 1508 if (so->so_error) { 1509 if (m != NULL) 1510 goto dontblock; 1511 error = so->so_error; 1512 if ((flags & MSG_PEEK) == 0) 1513 so->so_error = 0; 1514 SOCKBUF_UNLOCK(&so->so_rcv); 1515 goto release; 1516 } 1517 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1518 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1519 if (m == NULL) { 1520 SOCKBUF_UNLOCK(&so->so_rcv); 1521 goto release; 1522 } else 1523 goto dontblock; 1524 } 1525 for (; m != NULL; m = m->m_next) 1526 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1527 m = so->so_rcv.sb_mb; 1528 goto dontblock; 1529 } 1530 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1531 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1532 SOCKBUF_UNLOCK(&so->so_rcv); 1533 error = ENOTCONN; 1534 goto release; 1535 } 1536 if (uio->uio_resid == 0) { 1537 SOCKBUF_UNLOCK(&so->so_rcv); 1538 goto release; 1539 } 1540 if ((so->so_state & SS_NBIO) || 1541 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1542 SOCKBUF_UNLOCK(&so->so_rcv); 1543 error = EWOULDBLOCK; 1544 goto release; 1545 } 1546 SBLASTRECORDCHK(&so->so_rcv); 1547 SBLASTMBUFCHK(&so->so_rcv); 1548 error = sbwait(&so->so_rcv); 1549 SOCKBUF_UNLOCK(&so->so_rcv); 1550 if (error) 1551 goto release; 1552 goto restart; 1553 } 1554 dontblock: 1555 /* 1556 * From this point onward, we maintain 'nextrecord' as a cache of the 1557 * pointer to the next record in the socket buffer. We must keep the 1558 * various socket buffer pointers and local stack versions of the 1559 * pointers in sync, pushing out modifications before dropping the 1560 * socket buffer mutex, and re-reading them when picking it up. 1561 * 1562 * Otherwise, we will race with the network stack appending new data 1563 * or records onto the socket buffer by using inconsistent/stale 1564 * versions of the field, possibly resulting in socket buffer 1565 * corruption. 1566 * 1567 * By holding the high-level sblock(), we prevent simultaneous 1568 * readers from pulling off the front of the socket buffer. 1569 */ 1570 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1571 if (uio->uio_td) 1572 uio->uio_td->td_ru.ru_msgrcv++; 1573 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1574 SBLASTRECORDCHK(&so->so_rcv); 1575 SBLASTMBUFCHK(&so->so_rcv); 1576 nextrecord = m->m_nextpkt; 1577 if (pr->pr_flags & PR_ADDR) { 1578 KASSERT(m->m_type == MT_SONAME, 1579 ("m->m_type == %d", m->m_type)); 1580 orig_resid = 0; 1581 if (psa != NULL) 1582 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1583 M_NOWAIT); 1584 if (flags & MSG_PEEK) { 1585 m = m->m_next; 1586 } else { 1587 sbfree(&so->so_rcv, m); 1588 so->so_rcv.sb_mb = m_free(m); 1589 m = so->so_rcv.sb_mb; 1590 sockbuf_pushsync(&so->so_rcv, nextrecord); 1591 } 1592 } 1593 1594 /* 1595 * Process one or more MT_CONTROL mbufs present before any data mbufs 1596 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1597 * just copy the data; if !MSG_PEEK, we call into the protocol to 1598 * perform externalization (or freeing if controlp == NULL). 1599 */ 1600 if (m != NULL && m->m_type == MT_CONTROL) { 1601 struct mbuf *cm = NULL, *cmn; 1602 struct mbuf **cme = &cm; 1603 1604 do { 1605 if (flags & MSG_PEEK) { 1606 if (controlp != NULL) { 1607 *controlp = m_copy(m, 0, m->m_len); 1608 controlp = &(*controlp)->m_next; 1609 } 1610 m = m->m_next; 1611 } else { 1612 sbfree(&so->so_rcv, m); 1613 so->so_rcv.sb_mb = m->m_next; 1614 m->m_next = NULL; 1615 *cme = m; 1616 cme = &(*cme)->m_next; 1617 m = so->so_rcv.sb_mb; 1618 } 1619 } while (m != NULL && m->m_type == MT_CONTROL); 1620 if ((flags & MSG_PEEK) == 0) 1621 sockbuf_pushsync(&so->so_rcv, nextrecord); 1622 while (cm != NULL) { 1623 cmn = cm->m_next; 1624 cm->m_next = NULL; 1625 if (pr->pr_domain->dom_externalize != NULL) { 1626 SOCKBUF_UNLOCK(&so->so_rcv); 1627 VNET_SO_ASSERT(so); 1628 error = (*pr->pr_domain->dom_externalize) 1629 (cm, controlp); 1630 SOCKBUF_LOCK(&so->so_rcv); 1631 } else if (controlp != NULL) 1632 *controlp = cm; 1633 else 1634 m_freem(cm); 1635 if (controlp != NULL) { 1636 orig_resid = 0; 1637 while (*controlp != NULL) 1638 controlp = &(*controlp)->m_next; 1639 } 1640 cm = cmn; 1641 } 1642 if (m != NULL) 1643 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1644 else 1645 nextrecord = so->so_rcv.sb_mb; 1646 orig_resid = 0; 1647 } 1648 if (m != NULL) { 1649 if ((flags & MSG_PEEK) == 0) { 1650 KASSERT(m->m_nextpkt == nextrecord, 1651 ("soreceive: post-control, nextrecord !sync")); 1652 if (nextrecord == NULL) { 1653 KASSERT(so->so_rcv.sb_mb == m, 1654 ("soreceive: post-control, sb_mb!=m")); 1655 KASSERT(so->so_rcv.sb_lastrecord == m, 1656 ("soreceive: post-control, lastrecord!=m")); 1657 } 1658 } 1659 type = m->m_type; 1660 if (type == MT_OOBDATA) 1661 flags |= MSG_OOB; 1662 } else { 1663 if ((flags & MSG_PEEK) == 0) { 1664 KASSERT(so->so_rcv.sb_mb == nextrecord, 1665 ("soreceive: sb_mb != nextrecord")); 1666 if (so->so_rcv.sb_mb == NULL) { 1667 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1668 ("soreceive: sb_lastercord != NULL")); 1669 } 1670 } 1671 } 1672 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1673 SBLASTRECORDCHK(&so->so_rcv); 1674 SBLASTMBUFCHK(&so->so_rcv); 1675 1676 /* 1677 * Now continue to read any data mbufs off of the head of the socket 1678 * buffer until the read request is satisfied. Note that 'type' is 1679 * used to store the type of any mbuf reads that have happened so far 1680 * such that soreceive() can stop reading if the type changes, which 1681 * causes soreceive() to return only one of regular data and inline 1682 * out-of-band data in a single socket receive operation. 1683 */ 1684 moff = 0; 1685 offset = 0; 1686 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1687 /* 1688 * If the type of mbuf has changed since the last mbuf 1689 * examined ('type'), end the receive operation. 1690 */ 1691 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1692 if (m->m_type == MT_OOBDATA || m->m_type == MT_CONTROL) { 1693 if (type != m->m_type) 1694 break; 1695 } else if (type == MT_OOBDATA) 1696 break; 1697 else 1698 KASSERT(m->m_type == MT_DATA, 1699 ("m->m_type == %d", m->m_type)); 1700 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1701 len = uio->uio_resid; 1702 if (so->so_oobmark && len > so->so_oobmark - offset) 1703 len = so->so_oobmark - offset; 1704 if (len > m->m_len - moff) 1705 len = m->m_len - moff; 1706 /* 1707 * If mp is set, just pass back the mbufs. Otherwise copy 1708 * them out via the uio, then free. Sockbuf must be 1709 * consistent here (points to current mbuf, it points to next 1710 * record) when we drop priority; we must note any additions 1711 * to the sockbuf when we block interrupts again. 1712 */ 1713 if (mp == NULL) { 1714 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1715 SBLASTRECORDCHK(&so->so_rcv); 1716 SBLASTMBUFCHK(&so->so_rcv); 1717 SOCKBUF_UNLOCK(&so->so_rcv); 1718 #ifdef ZERO_COPY_SOCKETS 1719 if (so_zero_copy_receive) { 1720 int disposable; 1721 1722 if ((m->m_flags & M_EXT) 1723 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1724 disposable = 1; 1725 else 1726 disposable = 0; 1727 1728 error = uiomoveco(mtod(m, char *) + moff, 1729 (int)len, uio, 1730 disposable); 1731 } else 1732 #endif /* ZERO_COPY_SOCKETS */ 1733 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1734 SOCKBUF_LOCK(&so->so_rcv); 1735 if (error) { 1736 /* 1737 * The MT_SONAME mbuf has already been removed 1738 * from the record, so it is necessary to 1739 * remove the data mbufs, if any, to preserve 1740 * the invariant in the case of PR_ADDR that 1741 * requires MT_SONAME mbufs at the head of 1742 * each record. 1743 */ 1744 if (m && pr->pr_flags & PR_ATOMIC && 1745 ((flags & MSG_PEEK) == 0)) 1746 (void)sbdroprecord_locked(&so->so_rcv); 1747 SOCKBUF_UNLOCK(&so->so_rcv); 1748 goto release; 1749 } 1750 } else 1751 uio->uio_resid -= len; 1752 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1753 if (len == m->m_len - moff) { 1754 if (m->m_flags & M_EOR) 1755 flags |= MSG_EOR; 1756 if (flags & MSG_PEEK) { 1757 m = m->m_next; 1758 moff = 0; 1759 } else { 1760 nextrecord = m->m_nextpkt; 1761 sbfree(&so->so_rcv, m); 1762 if (mp != NULL) { 1763 *mp = m; 1764 mp = &m->m_next; 1765 so->so_rcv.sb_mb = m = m->m_next; 1766 *mp = NULL; 1767 } else { 1768 so->so_rcv.sb_mb = m_free(m); 1769 m = so->so_rcv.sb_mb; 1770 } 1771 sockbuf_pushsync(&so->so_rcv, nextrecord); 1772 SBLASTRECORDCHK(&so->so_rcv); 1773 SBLASTMBUFCHK(&so->so_rcv); 1774 } 1775 } else { 1776 if (flags & MSG_PEEK) 1777 moff += len; 1778 else { 1779 if (mp != NULL) { 1780 int copy_flag; 1781 1782 if (flags & MSG_DONTWAIT) 1783 copy_flag = M_DONTWAIT; 1784 else 1785 copy_flag = M_WAIT; 1786 if (copy_flag == M_WAIT) 1787 SOCKBUF_UNLOCK(&so->so_rcv); 1788 *mp = m_copym(m, 0, len, copy_flag); 1789 if (copy_flag == M_WAIT) 1790 SOCKBUF_LOCK(&so->so_rcv); 1791 if (*mp == NULL) { 1792 /* 1793 * m_copym() couldn't 1794 * allocate an mbuf. Adjust 1795 * uio_resid back (it was 1796 * adjusted down by len 1797 * bytes, which we didn't end 1798 * up "copying" over). 1799 */ 1800 uio->uio_resid += len; 1801 break; 1802 } 1803 } 1804 m->m_data += len; 1805 m->m_len -= len; 1806 so->so_rcv.sb_cc -= len; 1807 } 1808 } 1809 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1810 if (so->so_oobmark) { 1811 if ((flags & MSG_PEEK) == 0) { 1812 so->so_oobmark -= len; 1813 if (so->so_oobmark == 0) { 1814 so->so_rcv.sb_state |= SBS_RCVATMARK; 1815 break; 1816 } 1817 } else { 1818 offset += len; 1819 if (offset == so->so_oobmark) 1820 break; 1821 } 1822 } 1823 if (flags & MSG_EOR) 1824 break; 1825 /* 1826 * If the MSG_WAITALL flag is set (for non-atomic socket), we 1827 * must not quit until "uio->uio_resid == 0" or an error 1828 * termination. If a signal/timeout occurs, return with a 1829 * short count but without error. Keep sockbuf locked 1830 * against other readers. 1831 */ 1832 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1833 !sosendallatonce(so) && nextrecord == NULL) { 1834 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1835 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1836 break; 1837 /* 1838 * Notify the protocol that some data has been 1839 * drained before blocking. 1840 */ 1841 if (pr->pr_flags & PR_WANTRCVD) { 1842 SOCKBUF_UNLOCK(&so->so_rcv); 1843 VNET_SO_ASSERT(so); 1844 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1845 SOCKBUF_LOCK(&so->so_rcv); 1846 } 1847 SBLASTRECORDCHK(&so->so_rcv); 1848 SBLASTMBUFCHK(&so->so_rcv); 1849 /* 1850 * We could receive some data while was notifying 1851 * the protocol. Skip blocking in this case. 1852 */ 1853 if (so->so_rcv.sb_mb == NULL) { 1854 error = sbwait(&so->so_rcv); 1855 if (error) { 1856 SOCKBUF_UNLOCK(&so->so_rcv); 1857 goto release; 1858 } 1859 } 1860 m = so->so_rcv.sb_mb; 1861 if (m != NULL) 1862 nextrecord = m->m_nextpkt; 1863 } 1864 } 1865 1866 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1867 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1868 flags |= MSG_TRUNC; 1869 if ((flags & MSG_PEEK) == 0) 1870 (void) sbdroprecord_locked(&so->so_rcv); 1871 } 1872 if ((flags & MSG_PEEK) == 0) { 1873 if (m == NULL) { 1874 /* 1875 * First part is an inline SB_EMPTY_FIXUP(). Second 1876 * part makes sure sb_lastrecord is up-to-date if 1877 * there is still data in the socket buffer. 1878 */ 1879 so->so_rcv.sb_mb = nextrecord; 1880 if (so->so_rcv.sb_mb == NULL) { 1881 so->so_rcv.sb_mbtail = NULL; 1882 so->so_rcv.sb_lastrecord = NULL; 1883 } else if (nextrecord->m_nextpkt == NULL) 1884 so->so_rcv.sb_lastrecord = nextrecord; 1885 } 1886 SBLASTRECORDCHK(&so->so_rcv); 1887 SBLASTMBUFCHK(&so->so_rcv); 1888 /* 1889 * If soreceive() is being done from the socket callback, 1890 * then don't need to generate ACK to peer to update window, 1891 * since ACK will be generated on return to TCP. 1892 */ 1893 if (!(flags & MSG_SOCALLBCK) && 1894 (pr->pr_flags & PR_WANTRCVD)) { 1895 SOCKBUF_UNLOCK(&so->so_rcv); 1896 VNET_SO_ASSERT(so); 1897 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1898 SOCKBUF_LOCK(&so->so_rcv); 1899 } 1900 } 1901 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1902 if (orig_resid == uio->uio_resid && orig_resid && 1903 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1904 SOCKBUF_UNLOCK(&so->so_rcv); 1905 goto restart; 1906 } 1907 SOCKBUF_UNLOCK(&so->so_rcv); 1908 1909 if (flagsp != NULL) 1910 *flagsp |= flags; 1911 release: 1912 sbunlock(&so->so_rcv); 1913 return (error); 1914 } 1915 1916 /* 1917 * Optimized version of soreceive() for stream (TCP) sockets. 1918 */ 1919 int 1920 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio, 1921 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1922 { 1923 int len = 0, error = 0, flags, oresid; 1924 struct sockbuf *sb; 1925 struct mbuf *m, *n = NULL; 1926 1927 /* We only do stream sockets. */ 1928 if (so->so_type != SOCK_STREAM) 1929 return (EINVAL); 1930 if (psa != NULL) 1931 *psa = NULL; 1932 if (controlp != NULL) 1933 return (EINVAL); 1934 if (flagsp != NULL) 1935 flags = *flagsp &~ MSG_EOR; 1936 else 1937 flags = 0; 1938 if (flags & MSG_OOB) 1939 return (soreceive_rcvoob(so, uio, flags)); 1940 if (mp0 != NULL) 1941 *mp0 = NULL; 1942 1943 sb = &so->so_rcv; 1944 1945 /* Prevent other readers from entering the socket. */ 1946 error = sblock(sb, SBLOCKWAIT(flags)); 1947 if (error) 1948 goto out; 1949 SOCKBUF_LOCK(sb); 1950 1951 /* Easy one, no space to copyout anything. */ 1952 if (uio->uio_resid == 0) { 1953 error = EINVAL; 1954 goto out; 1955 } 1956 oresid = uio->uio_resid; 1957 1958 /* We will never ever get anything unless we are or were connected. */ 1959 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 1960 error = ENOTCONN; 1961 goto out; 1962 } 1963 1964 restart: 1965 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1966 1967 /* Abort if socket has reported problems. */ 1968 if (so->so_error) { 1969 if (sb->sb_cc > 0) 1970 goto deliver; 1971 if (oresid > uio->uio_resid) 1972 goto out; 1973 error = so->so_error; 1974 if (!(flags & MSG_PEEK)) 1975 so->so_error = 0; 1976 goto out; 1977 } 1978 1979 /* Door is closed. Deliver what is left, if any. */ 1980 if (sb->sb_state & SBS_CANTRCVMORE) { 1981 if (sb->sb_cc > 0) 1982 goto deliver; 1983 else 1984 goto out; 1985 } 1986 1987 /* Socket buffer is empty and we shall not block. */ 1988 if (sb->sb_cc == 0 && 1989 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) { 1990 error = EAGAIN; 1991 goto out; 1992 } 1993 1994 /* Socket buffer got some data that we shall deliver now. */ 1995 if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) && 1996 ((sb->sb_flags & SS_NBIO) || 1997 (flags & (MSG_DONTWAIT|MSG_NBIO)) || 1998 sb->sb_cc >= sb->sb_lowat || 1999 sb->sb_cc >= uio->uio_resid || 2000 sb->sb_cc >= sb->sb_hiwat) ) { 2001 goto deliver; 2002 } 2003 2004 /* On MSG_WAITALL we must wait until all data or error arrives. */ 2005 if ((flags & MSG_WAITALL) && 2006 (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat)) 2007 goto deliver; 2008 2009 /* 2010 * Wait and block until (more) data comes in. 2011 * NB: Drops the sockbuf lock during wait. 2012 */ 2013 error = sbwait(sb); 2014 if (error) 2015 goto out; 2016 goto restart; 2017 2018 deliver: 2019 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2020 KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__)); 2021 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__)); 2022 2023 /* Statistics. */ 2024 if (uio->uio_td) 2025 uio->uio_td->td_ru.ru_msgrcv++; 2026 2027 /* Fill uio until full or current end of socket buffer is reached. */ 2028 len = min(uio->uio_resid, sb->sb_cc); 2029 if (mp0 != NULL) { 2030 /* Dequeue as many mbufs as possible. */ 2031 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) { 2032 for (*mp0 = m = sb->sb_mb; 2033 m != NULL && m->m_len <= len; 2034 m = m->m_next) { 2035 len -= m->m_len; 2036 uio->uio_resid -= m->m_len; 2037 sbfree(sb, m); 2038 n = m; 2039 } 2040 sb->sb_mb = m; 2041 if (sb->sb_mb == NULL) 2042 SB_EMPTY_FIXUP(sb); 2043 n->m_next = NULL; 2044 } 2045 /* Copy the remainder. */ 2046 if (len > 0) { 2047 KASSERT(sb->sb_mb != NULL, 2048 ("%s: len > 0 && sb->sb_mb empty", __func__)); 2049 2050 m = m_copym(sb->sb_mb, 0, len, M_DONTWAIT); 2051 if (m == NULL) 2052 len = 0; /* Don't flush data from sockbuf. */ 2053 else 2054 uio->uio_resid -= m->m_len; 2055 if (*mp0 != NULL) 2056 n->m_next = m; 2057 else 2058 *mp0 = m; 2059 if (*mp0 == NULL) { 2060 error = ENOBUFS; 2061 goto out; 2062 } 2063 } 2064 } else { 2065 /* NB: Must unlock socket buffer as uiomove may sleep. */ 2066 SOCKBUF_UNLOCK(sb); 2067 error = m_mbuftouio(uio, sb->sb_mb, len); 2068 SOCKBUF_LOCK(sb); 2069 if (error) 2070 goto out; 2071 } 2072 SBLASTRECORDCHK(sb); 2073 SBLASTMBUFCHK(sb); 2074 2075 /* 2076 * Remove the delivered data from the socket buffer unless we 2077 * were only peeking. 2078 */ 2079 if (!(flags & MSG_PEEK)) { 2080 if (len > 0) 2081 sbdrop_locked(sb, len); 2082 2083 /* Notify protocol that we drained some data. */ 2084 if ((so->so_proto->pr_flags & PR_WANTRCVD) && 2085 (((flags & MSG_WAITALL) && uio->uio_resid > 0) || 2086 !(flags & MSG_SOCALLBCK))) { 2087 SOCKBUF_UNLOCK(sb); 2088 VNET_SO_ASSERT(so); 2089 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags); 2090 SOCKBUF_LOCK(sb); 2091 } 2092 } 2093 2094 /* 2095 * For MSG_WAITALL we may have to loop again and wait for 2096 * more data to come in. 2097 */ 2098 if ((flags & MSG_WAITALL) && uio->uio_resid > 0) 2099 goto restart; 2100 out: 2101 SOCKBUF_LOCK_ASSERT(sb); 2102 SBLASTRECORDCHK(sb); 2103 SBLASTMBUFCHK(sb); 2104 SOCKBUF_UNLOCK(sb); 2105 sbunlock(sb); 2106 return (error); 2107 } 2108 2109 /* 2110 * Optimized version of soreceive() for simple datagram cases from userspace. 2111 * Unlike in the stream case, we're able to drop a datagram if copyout() 2112 * fails, and because we handle datagrams atomically, we don't need to use a 2113 * sleep lock to prevent I/O interlacing. 2114 */ 2115 int 2116 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, 2117 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2118 { 2119 struct mbuf *m, *m2; 2120 int flags, error; 2121 ssize_t len; 2122 struct protosw *pr = so->so_proto; 2123 struct mbuf *nextrecord; 2124 2125 if (psa != NULL) 2126 *psa = NULL; 2127 if (controlp != NULL) 2128 *controlp = NULL; 2129 if (flagsp != NULL) 2130 flags = *flagsp &~ MSG_EOR; 2131 else 2132 flags = 0; 2133 2134 /* 2135 * For any complicated cases, fall back to the full 2136 * soreceive_generic(). 2137 */ 2138 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB)) 2139 return (soreceive_generic(so, psa, uio, mp0, controlp, 2140 flagsp)); 2141 2142 /* 2143 * Enforce restrictions on use. 2144 */ 2145 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0, 2146 ("soreceive_dgram: wantrcvd")); 2147 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic")); 2148 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0, 2149 ("soreceive_dgram: SBS_RCVATMARK")); 2150 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0, 2151 ("soreceive_dgram: P_CONNREQUIRED")); 2152 2153 /* 2154 * Loop blocking while waiting for a datagram. 2155 */ 2156 SOCKBUF_LOCK(&so->so_rcv); 2157 while ((m = so->so_rcv.sb_mb) == NULL) { 2158 KASSERT(so->so_rcv.sb_cc == 0, 2159 ("soreceive_dgram: sb_mb NULL but sb_cc %u", 2160 so->so_rcv.sb_cc)); 2161 if (so->so_error) { 2162 error = so->so_error; 2163 so->so_error = 0; 2164 SOCKBUF_UNLOCK(&so->so_rcv); 2165 return (error); 2166 } 2167 if (so->so_rcv.sb_state & SBS_CANTRCVMORE || 2168 uio->uio_resid == 0) { 2169 SOCKBUF_UNLOCK(&so->so_rcv); 2170 return (0); 2171 } 2172 if ((so->so_state & SS_NBIO) || 2173 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 2174 SOCKBUF_UNLOCK(&so->so_rcv); 2175 return (EWOULDBLOCK); 2176 } 2177 SBLASTRECORDCHK(&so->so_rcv); 2178 SBLASTMBUFCHK(&so->so_rcv); 2179 error = sbwait(&so->so_rcv); 2180 if (error) { 2181 SOCKBUF_UNLOCK(&so->so_rcv); 2182 return (error); 2183 } 2184 } 2185 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2186 2187 if (uio->uio_td) 2188 uio->uio_td->td_ru.ru_msgrcv++; 2189 SBLASTRECORDCHK(&so->so_rcv); 2190 SBLASTMBUFCHK(&so->so_rcv); 2191 nextrecord = m->m_nextpkt; 2192 if (nextrecord == NULL) { 2193 KASSERT(so->so_rcv.sb_lastrecord == m, 2194 ("soreceive_dgram: lastrecord != m")); 2195 } 2196 2197 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord, 2198 ("soreceive_dgram: m_nextpkt != nextrecord")); 2199 2200 /* 2201 * Pull 'm' and its chain off the front of the packet queue. 2202 */ 2203 so->so_rcv.sb_mb = NULL; 2204 sockbuf_pushsync(&so->so_rcv, nextrecord); 2205 2206 /* 2207 * Walk 'm's chain and free that many bytes from the socket buffer. 2208 */ 2209 for (m2 = m; m2 != NULL; m2 = m2->m_next) 2210 sbfree(&so->so_rcv, m2); 2211 2212 /* 2213 * Do a few last checks before we let go of the lock. 2214 */ 2215 SBLASTRECORDCHK(&so->so_rcv); 2216 SBLASTMBUFCHK(&so->so_rcv); 2217 SOCKBUF_UNLOCK(&so->so_rcv); 2218 2219 if (pr->pr_flags & PR_ADDR) { 2220 KASSERT(m->m_type == MT_SONAME, 2221 ("m->m_type == %d", m->m_type)); 2222 if (psa != NULL) 2223 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 2224 M_NOWAIT); 2225 m = m_free(m); 2226 } 2227 if (m == NULL) { 2228 /* XXXRW: Can this happen? */ 2229 return (0); 2230 } 2231 2232 /* 2233 * Packet to copyout() is now in 'm' and it is disconnected from the 2234 * queue. 2235 * 2236 * Process one or more MT_CONTROL mbufs present before any data mbufs 2237 * in the first mbuf chain on the socket buffer. We call into the 2238 * protocol to perform externalization (or freeing if controlp == 2239 * NULL). 2240 */ 2241 if (m->m_type == MT_CONTROL) { 2242 struct mbuf *cm = NULL, *cmn; 2243 struct mbuf **cme = &cm; 2244 2245 do { 2246 m2 = m->m_next; 2247 m->m_next = NULL; 2248 *cme = m; 2249 cme = &(*cme)->m_next; 2250 m = m2; 2251 } while (m != NULL && m->m_type == MT_CONTROL); 2252 while (cm != NULL) { 2253 cmn = cm->m_next; 2254 cm->m_next = NULL; 2255 if (pr->pr_domain->dom_externalize != NULL) { 2256 error = (*pr->pr_domain->dom_externalize) 2257 (cm, controlp); 2258 } else if (controlp != NULL) 2259 *controlp = cm; 2260 else 2261 m_freem(cm); 2262 if (controlp != NULL) { 2263 while (*controlp != NULL) 2264 controlp = &(*controlp)->m_next; 2265 } 2266 cm = cmn; 2267 } 2268 } 2269 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data")); 2270 2271 while (m != NULL && uio->uio_resid > 0) { 2272 len = uio->uio_resid; 2273 if (len > m->m_len) 2274 len = m->m_len; 2275 error = uiomove(mtod(m, char *), (int)len, uio); 2276 if (error) { 2277 m_freem(m); 2278 return (error); 2279 } 2280 if (len == m->m_len) 2281 m = m_free(m); 2282 else { 2283 m->m_data += len; 2284 m->m_len -= len; 2285 } 2286 } 2287 if (m != NULL) 2288 flags |= MSG_TRUNC; 2289 m_freem(m); 2290 if (flagsp != NULL) 2291 *flagsp |= flags; 2292 return (0); 2293 } 2294 2295 int 2296 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 2297 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2298 { 2299 int error; 2300 2301 CURVNET_SET(so->so_vnet); 2302 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0, 2303 controlp, flagsp)); 2304 CURVNET_RESTORE(); 2305 return (error); 2306 } 2307 2308 int 2309 soshutdown(struct socket *so, int how) 2310 { 2311 struct protosw *pr = so->so_proto; 2312 int error; 2313 2314 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 2315 return (EINVAL); 2316 2317 CURVNET_SET(so->so_vnet); 2318 if (pr->pr_usrreqs->pru_flush != NULL) { 2319 (*pr->pr_usrreqs->pru_flush)(so, how); 2320 } 2321 if (how != SHUT_WR) 2322 sorflush(so); 2323 if (how != SHUT_RD) { 2324 error = (*pr->pr_usrreqs->pru_shutdown)(so); 2325 CURVNET_RESTORE(); 2326 return (error); 2327 } 2328 CURVNET_RESTORE(); 2329 return (0); 2330 } 2331 2332 void 2333 sorflush(struct socket *so) 2334 { 2335 struct sockbuf *sb = &so->so_rcv; 2336 struct protosw *pr = so->so_proto; 2337 struct sockbuf asb; 2338 2339 VNET_SO_ASSERT(so); 2340 2341 /* 2342 * In order to avoid calling dom_dispose with the socket buffer mutex 2343 * held, and in order to generally avoid holding the lock for a long 2344 * time, we make a copy of the socket buffer and clear the original 2345 * (except locks, state). The new socket buffer copy won't have 2346 * initialized locks so we can only call routines that won't use or 2347 * assert those locks. 2348 * 2349 * Dislodge threads currently blocked in receive and wait to acquire 2350 * a lock against other simultaneous readers before clearing the 2351 * socket buffer. Don't let our acquire be interrupted by a signal 2352 * despite any existing socket disposition on interruptable waiting. 2353 */ 2354 socantrcvmore(so); 2355 (void) sblock(sb, SBL_WAIT | SBL_NOINTR); 2356 2357 /* 2358 * Invalidate/clear most of the sockbuf structure, but leave selinfo 2359 * and mutex data unchanged. 2360 */ 2361 SOCKBUF_LOCK(sb); 2362 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 2363 bcopy(&sb->sb_startzero, &asb.sb_startzero, 2364 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2365 bzero(&sb->sb_startzero, 2366 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2367 SOCKBUF_UNLOCK(sb); 2368 sbunlock(sb); 2369 2370 /* 2371 * Dispose of special rights and flush the socket buffer. Don't call 2372 * any unsafe routines (that rely on locks being initialized) on asb. 2373 */ 2374 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 2375 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 2376 sbrelease_internal(&asb, so); 2377 } 2378 2379 /* 2380 * Perhaps this routine, and sooptcopyout(), below, ought to come in an 2381 * additional variant to handle the case where the option value needs to be 2382 * some kind of integer, but not a specific size. In addition to their use 2383 * here, these functions are also called by the protocol-level pr_ctloutput() 2384 * routines. 2385 */ 2386 int 2387 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2388 { 2389 size_t valsize; 2390 2391 /* 2392 * If the user gives us more than we wanted, we ignore it, but if we 2393 * don't get the minimum length the caller wants, we return EINVAL. 2394 * On success, sopt->sopt_valsize is set to however much we actually 2395 * retrieved. 2396 */ 2397 if ((valsize = sopt->sopt_valsize) < minlen) 2398 return EINVAL; 2399 if (valsize > len) 2400 sopt->sopt_valsize = valsize = len; 2401 2402 if (sopt->sopt_td != NULL) 2403 return (copyin(sopt->sopt_val, buf, valsize)); 2404 2405 bcopy(sopt->sopt_val, buf, valsize); 2406 return (0); 2407 } 2408 2409 /* 2410 * Kernel version of setsockopt(2). 2411 * 2412 * XXX: optlen is size_t, not socklen_t 2413 */ 2414 int 2415 so_setsockopt(struct socket *so, int level, int optname, void *optval, 2416 size_t optlen) 2417 { 2418 struct sockopt sopt; 2419 2420 sopt.sopt_level = level; 2421 sopt.sopt_name = optname; 2422 sopt.sopt_dir = SOPT_SET; 2423 sopt.sopt_val = optval; 2424 sopt.sopt_valsize = optlen; 2425 sopt.sopt_td = NULL; 2426 return (sosetopt(so, &sopt)); 2427 } 2428 2429 int 2430 sosetopt(struct socket *so, struct sockopt *sopt) 2431 { 2432 int error, optval; 2433 struct linger l; 2434 struct timeval tv; 2435 u_long val; 2436 uint32_t val32; 2437 #ifdef MAC 2438 struct mac extmac; 2439 #endif 2440 2441 CURVNET_SET(so->so_vnet); 2442 error = 0; 2443 if (sopt->sopt_level != SOL_SOCKET) { 2444 if (so->so_proto->pr_ctloutput != NULL) { 2445 error = (*so->so_proto->pr_ctloutput)(so, sopt); 2446 CURVNET_RESTORE(); 2447 return (error); 2448 } 2449 error = ENOPROTOOPT; 2450 } else { 2451 switch (sopt->sopt_name) { 2452 #ifdef INET 2453 case SO_ACCEPTFILTER: 2454 error = do_setopt_accept_filter(so, sopt); 2455 if (error) 2456 goto bad; 2457 break; 2458 #endif 2459 case SO_LINGER: 2460 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2461 if (error) 2462 goto bad; 2463 2464 SOCK_LOCK(so); 2465 so->so_linger = l.l_linger; 2466 if (l.l_onoff) 2467 so->so_options |= SO_LINGER; 2468 else 2469 so->so_options &= ~SO_LINGER; 2470 SOCK_UNLOCK(so); 2471 break; 2472 2473 case SO_DEBUG: 2474 case SO_KEEPALIVE: 2475 case SO_DONTROUTE: 2476 case SO_USELOOPBACK: 2477 case SO_BROADCAST: 2478 case SO_REUSEADDR: 2479 case SO_REUSEPORT: 2480 case SO_OOBINLINE: 2481 case SO_TIMESTAMP: 2482 case SO_BINTIME: 2483 case SO_NOSIGPIPE: 2484 case SO_NO_DDP: 2485 case SO_NO_OFFLOAD: 2486 error = sooptcopyin(sopt, &optval, sizeof optval, 2487 sizeof optval); 2488 if (error) 2489 goto bad; 2490 SOCK_LOCK(so); 2491 if (optval) 2492 so->so_options |= sopt->sopt_name; 2493 else 2494 so->so_options &= ~sopt->sopt_name; 2495 SOCK_UNLOCK(so); 2496 break; 2497 2498 case SO_SETFIB: 2499 error = sooptcopyin(sopt, &optval, sizeof optval, 2500 sizeof optval); 2501 if (error) 2502 goto bad; 2503 2504 if (optval < 0 || optval >= rt_numfibs) { 2505 error = EINVAL; 2506 goto bad; 2507 } 2508 if (((so->so_proto->pr_domain->dom_family == PF_INET) || 2509 (so->so_proto->pr_domain->dom_family == PF_INET6) || 2510 (so->so_proto->pr_domain->dom_family == PF_ROUTE))) 2511 so->so_fibnum = optval; 2512 else 2513 so->so_fibnum = 0; 2514 break; 2515 2516 case SO_USER_COOKIE: 2517 error = sooptcopyin(sopt, &val32, sizeof val32, 2518 sizeof val32); 2519 if (error) 2520 goto bad; 2521 so->so_user_cookie = val32; 2522 break; 2523 2524 case SO_SNDBUF: 2525 case SO_RCVBUF: 2526 case SO_SNDLOWAT: 2527 case SO_RCVLOWAT: 2528 error = sooptcopyin(sopt, &optval, sizeof optval, 2529 sizeof optval); 2530 if (error) 2531 goto bad; 2532 2533 /* 2534 * Values < 1 make no sense for any of these options, 2535 * so disallow them. 2536 */ 2537 if (optval < 1) { 2538 error = EINVAL; 2539 goto bad; 2540 } 2541 2542 switch (sopt->sopt_name) { 2543 case SO_SNDBUF: 2544 case SO_RCVBUF: 2545 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 2546 &so->so_snd : &so->so_rcv, (u_long)optval, 2547 so, curthread) == 0) { 2548 error = ENOBUFS; 2549 goto bad; 2550 } 2551 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd : 2552 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE; 2553 break; 2554 2555 /* 2556 * Make sure the low-water is never greater than the 2557 * high-water. 2558 */ 2559 case SO_SNDLOWAT: 2560 SOCKBUF_LOCK(&so->so_snd); 2561 so->so_snd.sb_lowat = 2562 (optval > so->so_snd.sb_hiwat) ? 2563 so->so_snd.sb_hiwat : optval; 2564 SOCKBUF_UNLOCK(&so->so_snd); 2565 break; 2566 case SO_RCVLOWAT: 2567 SOCKBUF_LOCK(&so->so_rcv); 2568 so->so_rcv.sb_lowat = 2569 (optval > so->so_rcv.sb_hiwat) ? 2570 so->so_rcv.sb_hiwat : optval; 2571 SOCKBUF_UNLOCK(&so->so_rcv); 2572 break; 2573 } 2574 break; 2575 2576 case SO_SNDTIMEO: 2577 case SO_RCVTIMEO: 2578 #ifdef COMPAT_FREEBSD32 2579 if (SV_CURPROC_FLAG(SV_ILP32)) { 2580 struct timeval32 tv32; 2581 2582 error = sooptcopyin(sopt, &tv32, sizeof tv32, 2583 sizeof tv32); 2584 CP(tv32, tv, tv_sec); 2585 CP(tv32, tv, tv_usec); 2586 } else 2587 #endif 2588 error = sooptcopyin(sopt, &tv, sizeof tv, 2589 sizeof tv); 2590 if (error) 2591 goto bad; 2592 2593 /* assert(hz > 0); */ 2594 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2595 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2596 error = EDOM; 2597 goto bad; 2598 } 2599 /* assert(tick > 0); */ 2600 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2601 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 2602 if (val > INT_MAX) { 2603 error = EDOM; 2604 goto bad; 2605 } 2606 if (val == 0 && tv.tv_usec != 0) 2607 val = 1; 2608 2609 switch (sopt->sopt_name) { 2610 case SO_SNDTIMEO: 2611 so->so_snd.sb_timeo = val; 2612 break; 2613 case SO_RCVTIMEO: 2614 so->so_rcv.sb_timeo = val; 2615 break; 2616 } 2617 break; 2618 2619 case SO_LABEL: 2620 #ifdef MAC 2621 error = sooptcopyin(sopt, &extmac, sizeof extmac, 2622 sizeof extmac); 2623 if (error) 2624 goto bad; 2625 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 2626 so, &extmac); 2627 #else 2628 error = EOPNOTSUPP; 2629 #endif 2630 break; 2631 2632 default: 2633 error = ENOPROTOOPT; 2634 break; 2635 } 2636 if (error == 0 && so->so_proto->pr_ctloutput != NULL) 2637 (void)(*so->so_proto->pr_ctloutput)(so, sopt); 2638 } 2639 bad: 2640 CURVNET_RESTORE(); 2641 return (error); 2642 } 2643 2644 /* 2645 * Helper routine for getsockopt. 2646 */ 2647 int 2648 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2649 { 2650 int error; 2651 size_t valsize; 2652 2653 error = 0; 2654 2655 /* 2656 * Documented get behavior is that we always return a value, possibly 2657 * truncated to fit in the user's buffer. Traditional behavior is 2658 * that we always tell the user precisely how much we copied, rather 2659 * than something useful like the total amount we had available for 2660 * her. Note that this interface is not idempotent; the entire 2661 * answer must generated ahead of time. 2662 */ 2663 valsize = min(len, sopt->sopt_valsize); 2664 sopt->sopt_valsize = valsize; 2665 if (sopt->sopt_val != NULL) { 2666 if (sopt->sopt_td != NULL) 2667 error = copyout(buf, sopt->sopt_val, valsize); 2668 else 2669 bcopy(buf, sopt->sopt_val, valsize); 2670 } 2671 return (error); 2672 } 2673 2674 int 2675 sogetopt(struct socket *so, struct sockopt *sopt) 2676 { 2677 int error, optval; 2678 struct linger l; 2679 struct timeval tv; 2680 #ifdef MAC 2681 struct mac extmac; 2682 #endif 2683 2684 CURVNET_SET(so->so_vnet); 2685 error = 0; 2686 if (sopt->sopt_level != SOL_SOCKET) { 2687 if (so->so_proto->pr_ctloutput != NULL) 2688 error = (*so->so_proto->pr_ctloutput)(so, sopt); 2689 else 2690 error = ENOPROTOOPT; 2691 CURVNET_RESTORE(); 2692 return (error); 2693 } else { 2694 switch (sopt->sopt_name) { 2695 #ifdef INET 2696 case SO_ACCEPTFILTER: 2697 error = do_getopt_accept_filter(so, sopt); 2698 break; 2699 #endif 2700 case SO_LINGER: 2701 SOCK_LOCK(so); 2702 l.l_onoff = so->so_options & SO_LINGER; 2703 l.l_linger = so->so_linger; 2704 SOCK_UNLOCK(so); 2705 error = sooptcopyout(sopt, &l, sizeof l); 2706 break; 2707 2708 case SO_USELOOPBACK: 2709 case SO_DONTROUTE: 2710 case SO_DEBUG: 2711 case SO_KEEPALIVE: 2712 case SO_REUSEADDR: 2713 case SO_REUSEPORT: 2714 case SO_BROADCAST: 2715 case SO_OOBINLINE: 2716 case SO_ACCEPTCONN: 2717 case SO_TIMESTAMP: 2718 case SO_BINTIME: 2719 case SO_NOSIGPIPE: 2720 optval = so->so_options & sopt->sopt_name; 2721 integer: 2722 error = sooptcopyout(sopt, &optval, sizeof optval); 2723 break; 2724 2725 case SO_TYPE: 2726 optval = so->so_type; 2727 goto integer; 2728 2729 case SO_PROTOCOL: 2730 optval = so->so_proto->pr_protocol; 2731 goto integer; 2732 2733 case SO_ERROR: 2734 SOCK_LOCK(so); 2735 optval = so->so_error; 2736 so->so_error = 0; 2737 SOCK_UNLOCK(so); 2738 goto integer; 2739 2740 case SO_SNDBUF: 2741 optval = so->so_snd.sb_hiwat; 2742 goto integer; 2743 2744 case SO_RCVBUF: 2745 optval = so->so_rcv.sb_hiwat; 2746 goto integer; 2747 2748 case SO_SNDLOWAT: 2749 optval = so->so_snd.sb_lowat; 2750 goto integer; 2751 2752 case SO_RCVLOWAT: 2753 optval = so->so_rcv.sb_lowat; 2754 goto integer; 2755 2756 case SO_SNDTIMEO: 2757 case SO_RCVTIMEO: 2758 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2759 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 2760 2761 tv.tv_sec = optval / hz; 2762 tv.tv_usec = (optval % hz) * tick; 2763 #ifdef COMPAT_FREEBSD32 2764 if (SV_CURPROC_FLAG(SV_ILP32)) { 2765 struct timeval32 tv32; 2766 2767 CP(tv, tv32, tv_sec); 2768 CP(tv, tv32, tv_usec); 2769 error = sooptcopyout(sopt, &tv32, sizeof tv32); 2770 } else 2771 #endif 2772 error = sooptcopyout(sopt, &tv, sizeof tv); 2773 break; 2774 2775 case SO_LABEL: 2776 #ifdef MAC 2777 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2778 sizeof(extmac)); 2779 if (error) 2780 goto bad; 2781 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 2782 so, &extmac); 2783 if (error) 2784 goto bad; 2785 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2786 #else 2787 error = EOPNOTSUPP; 2788 #endif 2789 break; 2790 2791 case SO_PEERLABEL: 2792 #ifdef MAC 2793 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2794 sizeof(extmac)); 2795 if (error) 2796 goto bad; 2797 error = mac_getsockopt_peerlabel( 2798 sopt->sopt_td->td_ucred, so, &extmac); 2799 if (error) 2800 goto bad; 2801 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2802 #else 2803 error = EOPNOTSUPP; 2804 #endif 2805 break; 2806 2807 case SO_LISTENQLIMIT: 2808 optval = so->so_qlimit; 2809 goto integer; 2810 2811 case SO_LISTENQLEN: 2812 optval = so->so_qlen; 2813 goto integer; 2814 2815 case SO_LISTENINCQLEN: 2816 optval = so->so_incqlen; 2817 goto integer; 2818 2819 default: 2820 error = ENOPROTOOPT; 2821 break; 2822 } 2823 } 2824 #ifdef MAC 2825 bad: 2826 #endif 2827 CURVNET_RESTORE(); 2828 return (error); 2829 } 2830 2831 int 2832 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2833 { 2834 struct mbuf *m, *m_prev; 2835 int sopt_size = sopt->sopt_valsize; 2836 2837 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA); 2838 if (m == NULL) 2839 return ENOBUFS; 2840 if (sopt_size > MLEN) { 2841 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT); 2842 if ((m->m_flags & M_EXT) == 0) { 2843 m_free(m); 2844 return ENOBUFS; 2845 } 2846 m->m_len = min(MCLBYTES, sopt_size); 2847 } else { 2848 m->m_len = min(MLEN, sopt_size); 2849 } 2850 sopt_size -= m->m_len; 2851 *mp = m; 2852 m_prev = m; 2853 2854 while (sopt_size) { 2855 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA); 2856 if (m == NULL) { 2857 m_freem(*mp); 2858 return ENOBUFS; 2859 } 2860 if (sopt_size > MLEN) { 2861 MCLGET(m, sopt->sopt_td != NULL ? M_WAIT : 2862 M_DONTWAIT); 2863 if ((m->m_flags & M_EXT) == 0) { 2864 m_freem(m); 2865 m_freem(*mp); 2866 return ENOBUFS; 2867 } 2868 m->m_len = min(MCLBYTES, sopt_size); 2869 } else { 2870 m->m_len = min(MLEN, sopt_size); 2871 } 2872 sopt_size -= m->m_len; 2873 m_prev->m_next = m; 2874 m_prev = m; 2875 } 2876 return (0); 2877 } 2878 2879 int 2880 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2881 { 2882 struct mbuf *m0 = m; 2883 2884 if (sopt->sopt_val == NULL) 2885 return (0); 2886 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2887 if (sopt->sopt_td != NULL) { 2888 int error; 2889 2890 error = copyin(sopt->sopt_val, mtod(m, char *), 2891 m->m_len); 2892 if (error != 0) { 2893 m_freem(m0); 2894 return(error); 2895 } 2896 } else 2897 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 2898 sopt->sopt_valsize -= m->m_len; 2899 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2900 m = m->m_next; 2901 } 2902 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2903 panic("ip6_sooptmcopyin"); 2904 return (0); 2905 } 2906 2907 int 2908 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2909 { 2910 struct mbuf *m0 = m; 2911 size_t valsize = 0; 2912 2913 if (sopt->sopt_val == NULL) 2914 return (0); 2915 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2916 if (sopt->sopt_td != NULL) { 2917 int error; 2918 2919 error = copyout(mtod(m, char *), sopt->sopt_val, 2920 m->m_len); 2921 if (error != 0) { 2922 m_freem(m0); 2923 return(error); 2924 } 2925 } else 2926 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 2927 sopt->sopt_valsize -= m->m_len; 2928 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2929 valsize += m->m_len; 2930 m = m->m_next; 2931 } 2932 if (m != NULL) { 2933 /* enough soopt buffer should be given from user-land */ 2934 m_freem(m0); 2935 return(EINVAL); 2936 } 2937 sopt->sopt_valsize = valsize; 2938 return (0); 2939 } 2940 2941 /* 2942 * sohasoutofband(): protocol notifies socket layer of the arrival of new 2943 * out-of-band data, which will then notify socket consumers. 2944 */ 2945 void 2946 sohasoutofband(struct socket *so) 2947 { 2948 2949 if (so->so_sigio != NULL) 2950 pgsigio(&so->so_sigio, SIGURG, 0); 2951 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 2952 } 2953 2954 int 2955 sopoll(struct socket *so, int events, struct ucred *active_cred, 2956 struct thread *td) 2957 { 2958 2959 /* 2960 * We do not need to set or assert curvnet as long as everyone uses 2961 * sopoll_generic(). 2962 */ 2963 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred, 2964 td)); 2965 } 2966 2967 int 2968 sopoll_generic(struct socket *so, int events, struct ucred *active_cred, 2969 struct thread *td) 2970 { 2971 int revents = 0; 2972 2973 SOCKBUF_LOCK(&so->so_snd); 2974 SOCKBUF_LOCK(&so->so_rcv); 2975 if (events & (POLLIN | POLLRDNORM)) 2976 if (soreadabledata(so)) 2977 revents |= events & (POLLIN | POLLRDNORM); 2978 2979 if (events & (POLLOUT | POLLWRNORM)) 2980 if (sowriteable(so)) 2981 revents |= events & (POLLOUT | POLLWRNORM); 2982 2983 if (events & (POLLPRI | POLLRDBAND)) 2984 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 2985 revents |= events & (POLLPRI | POLLRDBAND); 2986 2987 if ((events & POLLINIGNEOF) == 0) { 2988 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2989 revents |= events & (POLLIN | POLLRDNORM); 2990 if (so->so_snd.sb_state & SBS_CANTSENDMORE) 2991 revents |= POLLHUP; 2992 } 2993 } 2994 2995 if (revents == 0) { 2996 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { 2997 selrecord(td, &so->so_rcv.sb_sel); 2998 so->so_rcv.sb_flags |= SB_SEL; 2999 } 3000 3001 if (events & (POLLOUT | POLLWRNORM)) { 3002 selrecord(td, &so->so_snd.sb_sel); 3003 so->so_snd.sb_flags |= SB_SEL; 3004 } 3005 } 3006 3007 SOCKBUF_UNLOCK(&so->so_rcv); 3008 SOCKBUF_UNLOCK(&so->so_snd); 3009 return (revents); 3010 } 3011 3012 int 3013 soo_kqfilter(struct file *fp, struct knote *kn) 3014 { 3015 struct socket *so = kn->kn_fp->f_data; 3016 struct sockbuf *sb; 3017 3018 switch (kn->kn_filter) { 3019 case EVFILT_READ: 3020 if (so->so_options & SO_ACCEPTCONN) 3021 kn->kn_fop = &solisten_filtops; 3022 else 3023 kn->kn_fop = &soread_filtops; 3024 sb = &so->so_rcv; 3025 break; 3026 case EVFILT_WRITE: 3027 kn->kn_fop = &sowrite_filtops; 3028 sb = &so->so_snd; 3029 break; 3030 default: 3031 return (EINVAL); 3032 } 3033 3034 SOCKBUF_LOCK(sb); 3035 knlist_add(&sb->sb_sel.si_note, kn, 1); 3036 sb->sb_flags |= SB_KNOTE; 3037 SOCKBUF_UNLOCK(sb); 3038 return (0); 3039 } 3040 3041 /* 3042 * Some routines that return EOPNOTSUPP for entry points that are not 3043 * supported by a protocol. Fill in as needed. 3044 */ 3045 int 3046 pru_accept_notsupp(struct socket *so, struct sockaddr **nam) 3047 { 3048 3049 return EOPNOTSUPP; 3050 } 3051 3052 int 3053 pru_attach_notsupp(struct socket *so, int proto, struct thread *td) 3054 { 3055 3056 return EOPNOTSUPP; 3057 } 3058 3059 int 3060 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 3061 { 3062 3063 return EOPNOTSUPP; 3064 } 3065 3066 int 3067 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 3068 { 3069 3070 return EOPNOTSUPP; 3071 } 3072 3073 int 3074 pru_connect2_notsupp(struct socket *so1, struct socket *so2) 3075 { 3076 3077 return EOPNOTSUPP; 3078 } 3079 3080 int 3081 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, 3082 struct ifnet *ifp, struct thread *td) 3083 { 3084 3085 return EOPNOTSUPP; 3086 } 3087 3088 int 3089 pru_disconnect_notsupp(struct socket *so) 3090 { 3091 3092 return EOPNOTSUPP; 3093 } 3094 3095 int 3096 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td) 3097 { 3098 3099 return EOPNOTSUPP; 3100 } 3101 3102 int 3103 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) 3104 { 3105 3106 return EOPNOTSUPP; 3107 } 3108 3109 int 3110 pru_rcvd_notsupp(struct socket *so, int flags) 3111 { 3112 3113 return EOPNOTSUPP; 3114 } 3115 3116 int 3117 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) 3118 { 3119 3120 return EOPNOTSUPP; 3121 } 3122 3123 int 3124 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, 3125 struct sockaddr *addr, struct mbuf *control, struct thread *td) 3126 { 3127 3128 return EOPNOTSUPP; 3129 } 3130 3131 /* 3132 * This isn't really a ``null'' operation, but it's the default one and 3133 * doesn't do anything destructive. 3134 */ 3135 int 3136 pru_sense_null(struct socket *so, struct stat *sb) 3137 { 3138 3139 sb->st_blksize = so->so_snd.sb_hiwat; 3140 return 0; 3141 } 3142 3143 int 3144 pru_shutdown_notsupp(struct socket *so) 3145 { 3146 3147 return EOPNOTSUPP; 3148 } 3149 3150 int 3151 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) 3152 { 3153 3154 return EOPNOTSUPP; 3155 } 3156 3157 int 3158 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 3159 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 3160 { 3161 3162 return EOPNOTSUPP; 3163 } 3164 3165 int 3166 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 3167 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 3168 { 3169 3170 return EOPNOTSUPP; 3171 } 3172 3173 int 3174 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred, 3175 struct thread *td) 3176 { 3177 3178 return EOPNOTSUPP; 3179 } 3180 3181 static void 3182 filt_sordetach(struct knote *kn) 3183 { 3184 struct socket *so = kn->kn_fp->f_data; 3185 3186 SOCKBUF_LOCK(&so->so_rcv); 3187 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1); 3188 if (knlist_empty(&so->so_rcv.sb_sel.si_note)) 3189 so->so_rcv.sb_flags &= ~SB_KNOTE; 3190 SOCKBUF_UNLOCK(&so->so_rcv); 3191 } 3192 3193 /*ARGSUSED*/ 3194 static int 3195 filt_soread(struct knote *kn, long hint) 3196 { 3197 struct socket *so; 3198 3199 so = kn->kn_fp->f_data; 3200 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 3201 3202 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 3203 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 3204 kn->kn_flags |= EV_EOF; 3205 kn->kn_fflags = so->so_error; 3206 return (1); 3207 } else if (so->so_error) /* temporary udp error */ 3208 return (1); 3209 else if (kn->kn_sfflags & NOTE_LOWAT) 3210 return (kn->kn_data >= kn->kn_sdata); 3211 else 3212 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 3213 } 3214 3215 static void 3216 filt_sowdetach(struct knote *kn) 3217 { 3218 struct socket *so = kn->kn_fp->f_data; 3219 3220 SOCKBUF_LOCK(&so->so_snd); 3221 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1); 3222 if (knlist_empty(&so->so_snd.sb_sel.si_note)) 3223 so->so_snd.sb_flags &= ~SB_KNOTE; 3224 SOCKBUF_UNLOCK(&so->so_snd); 3225 } 3226 3227 /*ARGSUSED*/ 3228 static int 3229 filt_sowrite(struct knote *kn, long hint) 3230 { 3231 struct socket *so; 3232 3233 so = kn->kn_fp->f_data; 3234 SOCKBUF_LOCK_ASSERT(&so->so_snd); 3235 kn->kn_data = sbspace(&so->so_snd); 3236 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 3237 kn->kn_flags |= EV_EOF; 3238 kn->kn_fflags = so->so_error; 3239 return (1); 3240 } else if (so->so_error) /* temporary udp error */ 3241 return (1); 3242 else if (((so->so_state & SS_ISCONNECTED) == 0) && 3243 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 3244 return (0); 3245 else if (kn->kn_sfflags & NOTE_LOWAT) 3246 return (kn->kn_data >= kn->kn_sdata); 3247 else 3248 return (kn->kn_data >= so->so_snd.sb_lowat); 3249 } 3250 3251 /*ARGSUSED*/ 3252 static int 3253 filt_solisten(struct knote *kn, long hint) 3254 { 3255 struct socket *so = kn->kn_fp->f_data; 3256 3257 kn->kn_data = so->so_qlen; 3258 return (! TAILQ_EMPTY(&so->so_comp)); 3259 } 3260 3261 int 3262 socheckuid(struct socket *so, uid_t uid) 3263 { 3264 3265 if (so == NULL) 3266 return (EPERM); 3267 if (so->so_cred->cr_uid != uid) 3268 return (EPERM); 3269 return (0); 3270 } 3271 3272 static int 3273 sysctl_somaxconn(SYSCTL_HANDLER_ARGS) 3274 { 3275 int error; 3276 int val; 3277 3278 val = somaxconn; 3279 error = sysctl_handle_int(oidp, &val, 0, req); 3280 if (error || !req->newptr ) 3281 return (error); 3282 3283 if (val < 1 || val > USHRT_MAX) 3284 return (EINVAL); 3285 3286 somaxconn = val; 3287 return (0); 3288 } 3289 3290 /* 3291 * These functions are used by protocols to notify the socket layer (and its 3292 * consumers) of state changes in the sockets driven by protocol-side events. 3293 */ 3294 3295 /* 3296 * Procedures to manipulate state flags of socket and do appropriate wakeups. 3297 * 3298 * Normal sequence from the active (originating) side is that 3299 * soisconnecting() is called during processing of connect() call, resulting 3300 * in an eventual call to soisconnected() if/when the connection is 3301 * established. When the connection is torn down soisdisconnecting() is 3302 * called during processing of disconnect() call, and soisdisconnected() is 3303 * called when the connection to the peer is totally severed. The semantics 3304 * of these routines are such that connectionless protocols can call 3305 * soisconnected() and soisdisconnected() only, bypassing the in-progress 3306 * calls when setting up a ``connection'' takes no time. 3307 * 3308 * From the passive side, a socket is created with two queues of sockets: 3309 * so_incomp for connections in progress and so_comp for connections already 3310 * made and awaiting user acceptance. As a protocol is preparing incoming 3311 * connections, it creates a socket structure queued on so_incomp by calling 3312 * sonewconn(). When the connection is established, soisconnected() is 3313 * called, and transfers the socket structure to so_comp, making it available 3314 * to accept(). 3315 * 3316 * If a socket is closed with sockets on either so_incomp or so_comp, these 3317 * sockets are dropped. 3318 * 3319 * If higher-level protocols are implemented in the kernel, the wakeups done 3320 * here will sometimes cause software-interrupt process scheduling. 3321 */ 3322 void 3323 soisconnecting(struct socket *so) 3324 { 3325 3326 SOCK_LOCK(so); 3327 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 3328 so->so_state |= SS_ISCONNECTING; 3329 SOCK_UNLOCK(so); 3330 } 3331 3332 void 3333 soisconnected(struct socket *so) 3334 { 3335 struct socket *head; 3336 int ret; 3337 3338 restart: 3339 ACCEPT_LOCK(); 3340 SOCK_LOCK(so); 3341 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 3342 so->so_state |= SS_ISCONNECTED; 3343 head = so->so_head; 3344 if (head != NULL && (so->so_qstate & SQ_INCOMP)) { 3345 if ((so->so_options & SO_ACCEPTFILTER) == 0) { 3346 SOCK_UNLOCK(so); 3347 TAILQ_REMOVE(&head->so_incomp, so, so_list); 3348 head->so_incqlen--; 3349 so->so_qstate &= ~SQ_INCOMP; 3350 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 3351 head->so_qlen++; 3352 so->so_qstate |= SQ_COMP; 3353 ACCEPT_UNLOCK(); 3354 sorwakeup(head); 3355 wakeup_one(&head->so_timeo); 3356 } else { 3357 ACCEPT_UNLOCK(); 3358 soupcall_set(so, SO_RCV, 3359 head->so_accf->so_accept_filter->accf_callback, 3360 head->so_accf->so_accept_filter_arg); 3361 so->so_options &= ~SO_ACCEPTFILTER; 3362 ret = head->so_accf->so_accept_filter->accf_callback(so, 3363 head->so_accf->so_accept_filter_arg, M_DONTWAIT); 3364 if (ret == SU_ISCONNECTED) 3365 soupcall_clear(so, SO_RCV); 3366 SOCK_UNLOCK(so); 3367 if (ret == SU_ISCONNECTED) 3368 goto restart; 3369 } 3370 return; 3371 } 3372 SOCK_UNLOCK(so); 3373 ACCEPT_UNLOCK(); 3374 wakeup(&so->so_timeo); 3375 sorwakeup(so); 3376 sowwakeup(so); 3377 } 3378 3379 void 3380 soisdisconnecting(struct socket *so) 3381 { 3382 3383 /* 3384 * Note: This code assumes that SOCK_LOCK(so) and 3385 * SOCKBUF_LOCK(&so->so_rcv) are the same. 3386 */ 3387 SOCKBUF_LOCK(&so->so_rcv); 3388 so->so_state &= ~SS_ISCONNECTING; 3389 so->so_state |= SS_ISDISCONNECTING; 3390 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 3391 sorwakeup_locked(so); 3392 SOCKBUF_LOCK(&so->so_snd); 3393 so->so_snd.sb_state |= SBS_CANTSENDMORE; 3394 sowwakeup_locked(so); 3395 wakeup(&so->so_timeo); 3396 } 3397 3398 void 3399 soisdisconnected(struct socket *so) 3400 { 3401 3402 /* 3403 * Note: This code assumes that SOCK_LOCK(so) and 3404 * SOCKBUF_LOCK(&so->so_rcv) are the same. 3405 */ 3406 SOCKBUF_LOCK(&so->so_rcv); 3407 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 3408 so->so_state |= SS_ISDISCONNECTED; 3409 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 3410 sorwakeup_locked(so); 3411 SOCKBUF_LOCK(&so->so_snd); 3412 so->so_snd.sb_state |= SBS_CANTSENDMORE; 3413 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc); 3414 sowwakeup_locked(so); 3415 wakeup(&so->so_timeo); 3416 } 3417 3418 /* 3419 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. 3420 */ 3421 struct sockaddr * 3422 sodupsockaddr(const struct sockaddr *sa, int mflags) 3423 { 3424 struct sockaddr *sa2; 3425 3426 sa2 = malloc(sa->sa_len, M_SONAME, mflags); 3427 if (sa2) 3428 bcopy(sa, sa2, sa->sa_len); 3429 return sa2; 3430 } 3431 3432 /* 3433 * Register per-socket buffer upcalls. 3434 */ 3435 void 3436 soupcall_set(struct socket *so, int which, 3437 int (*func)(struct socket *, void *, int), void *arg) 3438 { 3439 struct sockbuf *sb; 3440 3441 switch (which) { 3442 case SO_RCV: 3443 sb = &so->so_rcv; 3444 break; 3445 case SO_SND: 3446 sb = &so->so_snd; 3447 break; 3448 default: 3449 panic("soupcall_set: bad which"); 3450 } 3451 SOCKBUF_LOCK_ASSERT(sb); 3452 #if 0 3453 /* XXX: accf_http actually wants to do this on purpose. */ 3454 KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall")); 3455 #endif 3456 sb->sb_upcall = func; 3457 sb->sb_upcallarg = arg; 3458 sb->sb_flags |= SB_UPCALL; 3459 } 3460 3461 void 3462 soupcall_clear(struct socket *so, int which) 3463 { 3464 struct sockbuf *sb; 3465 3466 switch (which) { 3467 case SO_RCV: 3468 sb = &so->so_rcv; 3469 break; 3470 case SO_SND: 3471 sb = &so->so_snd; 3472 break; 3473 default: 3474 panic("soupcall_clear: bad which"); 3475 } 3476 SOCKBUF_LOCK_ASSERT(sb); 3477 KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear")); 3478 sb->sb_upcall = NULL; 3479 sb->sb_upcallarg = NULL; 3480 sb->sb_flags &= ~SB_UPCALL; 3481 } 3482 3483 /* 3484 * Create an external-format (``xsocket'') structure using the information in 3485 * the kernel-format socket structure pointed to by so. This is done to 3486 * reduce the spew of irrelevant information over this interface, to isolate 3487 * user code from changes in the kernel structure, and potentially to provide 3488 * information-hiding if we decide that some of this information should be 3489 * hidden from users. 3490 */ 3491 void 3492 sotoxsocket(struct socket *so, struct xsocket *xso) 3493 { 3494 3495 xso->xso_len = sizeof *xso; 3496 xso->xso_so = so; 3497 xso->so_type = so->so_type; 3498 xso->so_options = so->so_options; 3499 xso->so_linger = so->so_linger; 3500 xso->so_state = so->so_state; 3501 xso->so_pcb = so->so_pcb; 3502 xso->xso_protocol = so->so_proto->pr_protocol; 3503 xso->xso_family = so->so_proto->pr_domain->dom_family; 3504 xso->so_qlen = so->so_qlen; 3505 xso->so_incqlen = so->so_incqlen; 3506 xso->so_qlimit = so->so_qlimit; 3507 xso->so_timeo = so->so_timeo; 3508 xso->so_error = so->so_error; 3509 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 3510 xso->so_oobmark = so->so_oobmark; 3511 sbtoxsockbuf(&so->so_snd, &xso->so_snd); 3512 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 3513 xso->so_uid = so->so_cred->cr_uid; 3514 } 3515 3516 3517 /* 3518 * Socket accessor functions to provide external consumers with 3519 * a safe interface to socket state 3520 * 3521 */ 3522 3523 void 3524 so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *), void *arg) 3525 { 3526 3527 TAILQ_FOREACH(so, &so->so_comp, so_list) 3528 func(so, arg); 3529 } 3530 3531 struct sockbuf * 3532 so_sockbuf_rcv(struct socket *so) 3533 { 3534 3535 return (&so->so_rcv); 3536 } 3537 3538 struct sockbuf * 3539 so_sockbuf_snd(struct socket *so) 3540 { 3541 3542 return (&so->so_snd); 3543 } 3544 3545 int 3546 so_state_get(const struct socket *so) 3547 { 3548 3549 return (so->so_state); 3550 } 3551 3552 void 3553 so_state_set(struct socket *so, int val) 3554 { 3555 3556 so->so_state = val; 3557 } 3558 3559 int 3560 so_options_get(const struct socket *so) 3561 { 3562 3563 return (so->so_options); 3564 } 3565 3566 void 3567 so_options_set(struct socket *so, int val) 3568 { 3569 3570 so->so_options = val; 3571 } 3572 3573 int 3574 so_error_get(const struct socket *so) 3575 { 3576 3577 return (so->so_error); 3578 } 3579 3580 void 3581 so_error_set(struct socket *so, int val) 3582 { 3583 3584 so->so_error = val; 3585 } 3586 3587 int 3588 so_linger_get(const struct socket *so) 3589 { 3590 3591 return (so->so_linger); 3592 } 3593 3594 void 3595 so_linger_set(struct socket *so, int val) 3596 { 3597 3598 so->so_linger = val; 3599 } 3600 3601 struct protosw * 3602 so_protosw_get(const struct socket *so) 3603 { 3604 3605 return (so->so_proto); 3606 } 3607 3608 void 3609 so_protosw_set(struct socket *so, struct protosw *val) 3610 { 3611 3612 so->so_proto = val; 3613 } 3614 3615 void 3616 so_sorwakeup(struct socket *so) 3617 { 3618 3619 sorwakeup(so); 3620 } 3621 3622 void 3623 so_sowwakeup(struct socket *so) 3624 { 3625 3626 sowwakeup(so); 3627 } 3628 3629 void 3630 so_sorwakeup_locked(struct socket *so) 3631 { 3632 3633 sorwakeup_locked(so); 3634 } 3635 3636 void 3637 so_sowwakeup_locked(struct socket *so) 3638 { 3639 3640 sowwakeup_locked(so); 3641 } 3642 3643 void 3644 so_lock(struct socket *so) 3645 { 3646 SOCK_LOCK(so); 3647 } 3648 3649 void 3650 so_unlock(struct socket *so) 3651 { 3652 SOCK_UNLOCK(so); 3653 } 3654