1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2004 The FreeBSD Foundation 5 * Copyright (c) 2004-2008 Robert N. M. Watson 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 /* 36 * Comments on the socket life cycle: 37 * 38 * soalloc() sets of socket layer state for a socket, called only by 39 * socreate() and sonewconn(). Socket layer private. 40 * 41 * sodealloc() tears down socket layer state for a socket, called only by 42 * sofree() and sonewconn(). Socket layer private. 43 * 44 * pru_attach() associates protocol layer state with an allocated socket; 45 * called only once, may fail, aborting socket allocation. This is called 46 * from socreate() and sonewconn(). Socket layer private. 47 * 48 * pru_detach() disassociates protocol layer state from an attached socket, 49 * and will be called exactly once for sockets in which pru_attach() has 50 * been successfully called. If pru_attach() returned an error, 51 * pru_detach() will not be called. Socket layer private. 52 * 53 * pru_abort() and pru_close() notify the protocol layer that the last 54 * consumer of a socket is starting to tear down the socket, and that the 55 * protocol should terminate the connection. Historically, pru_abort() also 56 * detached protocol state from the socket state, but this is no longer the 57 * case. 58 * 59 * socreate() creates a socket and attaches protocol state. This is a public 60 * interface that may be used by socket layer consumers to create new 61 * sockets. 62 * 63 * sonewconn() creates a socket and attaches protocol state. This is a 64 * public interface that may be used by protocols to create new sockets when 65 * a new connection is received and will be available for accept() on a 66 * listen socket. 67 * 68 * soclose() destroys a socket after possibly waiting for it to disconnect. 69 * This is a public interface that socket consumers should use to close and 70 * release a socket when done with it. 71 * 72 * soabort() destroys a socket without waiting for it to disconnect (used 73 * only for incoming connections that are already partially or fully 74 * connected). This is used internally by the socket layer when clearing 75 * listen socket queues (due to overflow or close on the listen socket), but 76 * is also a public interface protocols may use to abort connections in 77 * their incomplete listen queues should they no longer be required. Sockets 78 * placed in completed connection listen queues should not be aborted for 79 * reasons described in the comment above the soclose() implementation. This 80 * is not a general purpose close routine, and except in the specific 81 * circumstances described here, should not be used. 82 * 83 * sofree() will free a socket and its protocol state if all references on 84 * the socket have been released, and is the public interface to attempt to 85 * free a socket when a reference is removed. This is a socket layer private 86 * interface. 87 * 88 * NOTE: In addition to socreate() and soclose(), which provide a single 89 * socket reference to the consumer to be managed as required, there are two 90 * calls to explicitly manage socket references, soref(), and sorele(). 91 * Currently, these are generally required only when transitioning a socket 92 * from a listen queue to a file descriptor, in order to prevent garbage 93 * collection of the socket at an untimely moment. For a number of reasons, 94 * these interfaces are not preferred, and should be avoided. 95 * 96 * NOTE: With regard to VNETs the general rule is that callers do not set 97 * curvnet. Exceptions to this rule include soabort(), sodisconnect(), 98 * sofree() (and with that sorele(), sotryfree()), as well as sonewconn() 99 * and sorflush(), which are usually called from a pre-set VNET context. 100 * sopoll() currently does not need a VNET context to be set. 101 */ 102 103 #include <sys/cdefs.h> 104 __FBSDID("$FreeBSD$"); 105 106 #include "opt_inet.h" 107 #include "opt_inet6.h" 108 #include "opt_zero.h" 109 #include "opt_compat.h" 110 111 #include <sys/param.h> 112 #include <sys/systm.h> 113 #include <sys/fcntl.h> 114 #include <sys/limits.h> 115 #include <sys/lock.h> 116 #include <sys/mac.h> 117 #include <sys/malloc.h> 118 #include <sys/mbuf.h> 119 #include <sys/mutex.h> 120 #include <sys/domain.h> 121 #include <sys/file.h> /* for struct knote */ 122 #include <sys/kernel.h> 123 #include <sys/event.h> 124 #include <sys/eventhandler.h> 125 #include <sys/poll.h> 126 #include <sys/proc.h> 127 #include <sys/protosw.h> 128 #include <sys/socket.h> 129 #include <sys/socketvar.h> 130 #include <sys/resourcevar.h> 131 #include <net/route.h> 132 #include <sys/signalvar.h> 133 #include <sys/stat.h> 134 #include <sys/sx.h> 135 #include <sys/sysctl.h> 136 #include <sys/uio.h> 137 #include <sys/jail.h> 138 139 #include <net/vnet.h> 140 141 #include <security/mac/mac_framework.h> 142 143 #include <vm/uma.h> 144 145 #ifdef COMPAT_FREEBSD32 146 #include <sys/mount.h> 147 #include <sys/sysent.h> 148 #include <compat/freebsd32/freebsd32.h> 149 #endif 150 151 static int soreceive_rcvoob(struct socket *so, struct uio *uio, 152 int flags); 153 154 static void filt_sordetach(struct knote *kn); 155 static int filt_soread(struct knote *kn, long hint); 156 static void filt_sowdetach(struct knote *kn); 157 static int filt_sowrite(struct knote *kn, long hint); 158 static int filt_solisten(struct knote *kn, long hint); 159 160 static struct filterops solisten_filtops = { 161 .f_isfd = 1, 162 .f_detach = filt_sordetach, 163 .f_event = filt_solisten, 164 }; 165 static struct filterops soread_filtops = { 166 .f_isfd = 1, 167 .f_detach = filt_sordetach, 168 .f_event = filt_soread, 169 }; 170 static struct filterops sowrite_filtops = { 171 .f_isfd = 1, 172 .f_detach = filt_sowdetach, 173 .f_event = filt_sowrite, 174 }; 175 176 uma_zone_t socket_zone; 177 so_gen_t so_gencnt; /* generation count for sockets */ 178 179 int maxsockets; 180 181 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 182 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 183 184 #define VNET_SO_ASSERT(so) \ 185 VNET_ASSERT(curvnet != NULL, \ 186 ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so))); 187 188 static int somaxconn = SOMAXCONN; 189 static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS); 190 /* XXX: we dont have SYSCTL_USHORT */ 191 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW, 192 0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection " 193 "queue size"); 194 static int numopensockets; 195 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 196 &numopensockets, 0, "Number of open sockets"); 197 #ifdef ZERO_COPY_SOCKETS 198 /* These aren't static because they're used in other files. */ 199 int so_zero_copy_send = 1; 200 int so_zero_copy_receive = 1; 201 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 202 "Zero copy controls"); 203 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 204 &so_zero_copy_receive, 0, "Enable zero copy receive"); 205 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 206 &so_zero_copy_send, 0, "Enable zero copy send"); 207 #endif /* ZERO_COPY_SOCKETS */ 208 209 /* 210 * accept_mtx locks down per-socket fields relating to accept queues. See 211 * socketvar.h for an annotation of the protected fields of struct socket. 212 */ 213 struct mtx accept_mtx; 214 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 215 216 /* 217 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 218 * so_gencnt field. 219 */ 220 static struct mtx so_global_mtx; 221 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 222 223 /* 224 * General IPC sysctl name space, used by sockets and a variety of other IPC 225 * types. 226 */ 227 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 228 229 /* 230 * Sysctl to get and set the maximum global sockets limit. Notify protocols 231 * of the change so that they can update their dependent limits as required. 232 */ 233 static int 234 sysctl_maxsockets(SYSCTL_HANDLER_ARGS) 235 { 236 int error, newmaxsockets; 237 238 newmaxsockets = maxsockets; 239 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req); 240 if (error == 0 && req->newptr) { 241 if (newmaxsockets > maxsockets) { 242 maxsockets = newmaxsockets; 243 if (maxsockets > ((maxfiles / 4) * 3)) { 244 maxfiles = (maxsockets * 5) / 4; 245 maxfilesperproc = (maxfiles * 9) / 10; 246 } 247 EVENTHANDLER_INVOKE(maxsockets_change); 248 } else 249 error = EINVAL; 250 } 251 return (error); 252 } 253 254 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, 255 &maxsockets, 0, sysctl_maxsockets, "IU", 256 "Maximum number of sockets avaliable"); 257 258 /* 259 * Initialise maxsockets. This SYSINIT must be run after 260 * tunable_mbinit(). 261 */ 262 static void 263 init_maxsockets(void *ignored) 264 { 265 266 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 267 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 268 } 269 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); 270 271 /* 272 * Socket operation routines. These routines are called by the routines in 273 * sys_socket.c or from a system process, and implement the semantics of 274 * socket operations by switching out to the protocol specific routines. 275 */ 276 277 /* 278 * Get a socket structure from our zone, and initialize it. Note that it 279 * would probably be better to allocate socket and PCB at the same time, but 280 * I'm not convinced that all the protocols can be easily modified to do 281 * this. 282 * 283 * soalloc() returns a socket with a ref count of 0. 284 */ 285 static struct socket * 286 soalloc(struct vnet *vnet) 287 { 288 struct socket *so; 289 290 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO); 291 if (so == NULL) 292 return (NULL); 293 #ifdef MAC 294 if (mac_socket_init(so, M_NOWAIT) != 0) { 295 uma_zfree(socket_zone, so); 296 return (NULL); 297 } 298 #endif 299 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 300 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 301 sx_init(&so->so_snd.sb_sx, "so_snd_sx"); 302 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx"); 303 TAILQ_INIT(&so->so_aiojobq); 304 mtx_lock(&so_global_mtx); 305 so->so_gencnt = ++so_gencnt; 306 ++numopensockets; 307 #ifdef VIMAGE 308 VNET_ASSERT(vnet != NULL, ("%s:%d vnet is NULL, so=%p", 309 __func__, __LINE__, so)); 310 vnet->vnet_sockcnt++; 311 so->so_vnet = vnet; 312 #endif 313 mtx_unlock(&so_global_mtx); 314 return (so); 315 } 316 317 /* 318 * Free the storage associated with a socket at the socket layer, tear down 319 * locks, labels, etc. All protocol state is assumed already to have been 320 * torn down (and possibly never set up) by the caller. 321 */ 322 static void 323 sodealloc(struct socket *so) 324 { 325 326 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 327 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); 328 329 mtx_lock(&so_global_mtx); 330 so->so_gencnt = ++so_gencnt; 331 --numopensockets; /* Could be below, but faster here. */ 332 #ifdef VIMAGE 333 VNET_ASSERT(so->so_vnet != NULL, ("%s:%d so_vnet is NULL, so=%p", 334 __func__, __LINE__, so)); 335 so->so_vnet->vnet_sockcnt--; 336 #endif 337 mtx_unlock(&so_global_mtx); 338 if (so->so_rcv.sb_hiwat) 339 (void)chgsbsize(so->so_cred->cr_uidinfo, 340 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 341 if (so->so_snd.sb_hiwat) 342 (void)chgsbsize(so->so_cred->cr_uidinfo, 343 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 344 #ifdef INET 345 /* remove acccept filter if one is present. */ 346 if (so->so_accf != NULL) 347 do_setopt_accept_filter(so, NULL); 348 #endif 349 #ifdef MAC 350 mac_socket_destroy(so); 351 #endif 352 crfree(so->so_cred); 353 sx_destroy(&so->so_snd.sb_sx); 354 sx_destroy(&so->so_rcv.sb_sx); 355 SOCKBUF_LOCK_DESTROY(&so->so_snd); 356 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 357 uma_zfree(socket_zone, so); 358 } 359 360 /* 361 * socreate returns a socket with a ref count of 1. The socket should be 362 * closed with soclose(). 363 */ 364 int 365 socreate(int dom, struct socket **aso, int type, int proto, 366 struct ucred *cred, struct thread *td) 367 { 368 struct protosw *prp; 369 struct socket *so; 370 int error; 371 372 if (proto) 373 prp = pffindproto(dom, proto, type); 374 else 375 prp = pffindtype(dom, type); 376 377 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL || 378 prp->pr_usrreqs->pru_attach == pru_attach_notsupp) 379 return (EPROTONOSUPPORT); 380 381 if (prison_check_af(cred, prp->pr_domain->dom_family) != 0) 382 return (EPROTONOSUPPORT); 383 384 if (prp->pr_type != type) 385 return (EPROTOTYPE); 386 so = soalloc(CRED_TO_VNET(cred)); 387 if (so == NULL) 388 return (ENOBUFS); 389 390 TAILQ_INIT(&so->so_incomp); 391 TAILQ_INIT(&so->so_comp); 392 so->so_type = type; 393 so->so_cred = crhold(cred); 394 if ((prp->pr_domain->dom_family == PF_INET) || 395 (prp->pr_domain->dom_family == PF_ROUTE)) 396 so->so_fibnum = td->td_proc->p_fibnum; 397 else 398 so->so_fibnum = 0; 399 so->so_proto = prp; 400 #ifdef MAC 401 mac_socket_create(cred, so); 402 #endif 403 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv)); 404 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd)); 405 so->so_count = 1; 406 /* 407 * Auto-sizing of socket buffers is managed by the protocols and 408 * the appropriate flags must be set in the pru_attach function. 409 */ 410 CURVNET_SET(so->so_vnet); 411 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 412 CURVNET_RESTORE(); 413 if (error) { 414 KASSERT(so->so_count == 1, ("socreate: so_count %d", 415 so->so_count)); 416 so->so_count = 0; 417 sodealloc(so); 418 return (error); 419 } 420 *aso = so; 421 return (0); 422 } 423 424 #ifdef REGRESSION 425 static int regression_sonewconn_earlytest = 1; 426 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, 427 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); 428 #endif 429 430 /* 431 * When an attempt at a new connection is noted on a socket which accepts 432 * connections, sonewconn is called. If the connection is possible (subject 433 * to space constraints, etc.) then we allocate a new structure, propoerly 434 * linked into the data structure of the original socket, and return this. 435 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 436 * 437 * Note: the ref count on the socket is 0 on return. 438 */ 439 struct socket * 440 sonewconn(struct socket *head, int connstatus) 441 { 442 struct socket *so; 443 int over; 444 445 ACCEPT_LOCK(); 446 over = (head->so_qlen > 3 * head->so_qlimit / 2); 447 ACCEPT_UNLOCK(); 448 #ifdef REGRESSION 449 if (regression_sonewconn_earlytest && over) 450 #else 451 if (over) 452 #endif 453 return (NULL); 454 VNET_ASSERT(head->so_vnet != NULL, ("%s:%d so_vnet is NULL, head=%p", 455 __func__, __LINE__, head)); 456 so = soalloc(head->so_vnet); 457 if (so == NULL) 458 return (NULL); 459 if ((head->so_options & SO_ACCEPTFILTER) != 0) 460 connstatus = 0; 461 so->so_head = head; 462 so->so_type = head->so_type; 463 so->so_options = head->so_options &~ SO_ACCEPTCONN; 464 so->so_linger = head->so_linger; 465 so->so_state = head->so_state | SS_NOFDREF; 466 so->so_fibnum = head->so_fibnum; 467 so->so_proto = head->so_proto; 468 so->so_cred = crhold(head->so_cred); 469 #ifdef MAC 470 mac_socket_newconn(head, so); 471 #endif 472 knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv)); 473 knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd)); 474 VNET_SO_ASSERT(head); 475 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) || 476 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { 477 sodealloc(so); 478 return (NULL); 479 } 480 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat; 481 so->so_snd.sb_lowat = head->so_snd.sb_lowat; 482 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo; 483 so->so_snd.sb_timeo = head->so_snd.sb_timeo; 484 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE; 485 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE; 486 so->so_state |= connstatus; 487 ACCEPT_LOCK(); 488 if (connstatus) { 489 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 490 so->so_qstate |= SQ_COMP; 491 head->so_qlen++; 492 } else { 493 /* 494 * Keep removing sockets from the head until there's room for 495 * us to insert on the tail. In pre-locking revisions, this 496 * was a simple if(), but as we could be racing with other 497 * threads and soabort() requires dropping locks, we must 498 * loop waiting for the condition to be true. 499 */ 500 while (head->so_incqlen > head->so_qlimit) { 501 struct socket *sp; 502 sp = TAILQ_FIRST(&head->so_incomp); 503 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 504 head->so_incqlen--; 505 sp->so_qstate &= ~SQ_INCOMP; 506 sp->so_head = NULL; 507 ACCEPT_UNLOCK(); 508 soabort(sp); 509 ACCEPT_LOCK(); 510 } 511 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 512 so->so_qstate |= SQ_INCOMP; 513 head->so_incqlen++; 514 } 515 ACCEPT_UNLOCK(); 516 if (connstatus) { 517 sorwakeup(head); 518 wakeup_one(&head->so_timeo); 519 } 520 return (so); 521 } 522 523 int 524 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 525 { 526 int error; 527 528 CURVNET_SET(so->so_vnet); 529 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td); 530 CURVNET_RESTORE(); 531 return error; 532 } 533 534 /* 535 * solisten() transitions a socket from a non-listening state to a listening 536 * state, but can also be used to update the listen queue depth on an 537 * existing listen socket. The protocol will call back into the sockets 538 * layer using solisten_proto_check() and solisten_proto() to check and set 539 * socket-layer listen state. Call backs are used so that the protocol can 540 * acquire both protocol and socket layer locks in whatever order is required 541 * by the protocol. 542 * 543 * Protocol implementors are advised to hold the socket lock across the 544 * socket-layer test and set to avoid races at the socket layer. 545 */ 546 int 547 solisten(struct socket *so, int backlog, struct thread *td) 548 { 549 int error; 550 551 CURVNET_SET(so->so_vnet); 552 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td); 553 CURVNET_RESTORE(); 554 return error; 555 } 556 557 int 558 solisten_proto_check(struct socket *so) 559 { 560 561 SOCK_LOCK_ASSERT(so); 562 563 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 564 SS_ISDISCONNECTING)) 565 return (EINVAL); 566 return (0); 567 } 568 569 void 570 solisten_proto(struct socket *so, int backlog) 571 { 572 573 SOCK_LOCK_ASSERT(so); 574 575 if (backlog < 0 || backlog > somaxconn) 576 backlog = somaxconn; 577 so->so_qlimit = backlog; 578 so->so_options |= SO_ACCEPTCONN; 579 } 580 581 /* 582 * Evaluate the reference count and named references on a socket; if no 583 * references remain, free it. This should be called whenever a reference is 584 * released, such as in sorele(), but also when named reference flags are 585 * cleared in socket or protocol code. 586 * 587 * sofree() will free the socket if: 588 * 589 * - There are no outstanding file descriptor references or related consumers 590 * (so_count == 0). 591 * 592 * - The socket has been closed by user space, if ever open (SS_NOFDREF). 593 * 594 * - The protocol does not have an outstanding strong reference on the socket 595 * (SS_PROTOREF). 596 * 597 * - The socket is not in a completed connection queue, so a process has been 598 * notified that it is present. If it is removed, the user process may 599 * block in accept() despite select() saying the socket was ready. 600 */ 601 void 602 sofree(struct socket *so) 603 { 604 struct protosw *pr = so->so_proto; 605 struct socket *head; 606 607 ACCEPT_LOCK_ASSERT(); 608 SOCK_LOCK_ASSERT(so); 609 610 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 || 611 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) { 612 SOCK_UNLOCK(so); 613 ACCEPT_UNLOCK(); 614 return; 615 } 616 617 head = so->so_head; 618 if (head != NULL) { 619 KASSERT((so->so_qstate & SQ_COMP) != 0 || 620 (so->so_qstate & SQ_INCOMP) != 0, 621 ("sofree: so_head != NULL, but neither SQ_COMP nor " 622 "SQ_INCOMP")); 623 KASSERT((so->so_qstate & SQ_COMP) == 0 || 624 (so->so_qstate & SQ_INCOMP) == 0, 625 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 626 TAILQ_REMOVE(&head->so_incomp, so, so_list); 627 head->so_incqlen--; 628 so->so_qstate &= ~SQ_INCOMP; 629 so->so_head = NULL; 630 } 631 KASSERT((so->so_qstate & SQ_COMP) == 0 && 632 (so->so_qstate & SQ_INCOMP) == 0, 633 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 634 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 635 if (so->so_options & SO_ACCEPTCONN) { 636 KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated")); 637 KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated")); 638 } 639 SOCK_UNLOCK(so); 640 ACCEPT_UNLOCK(); 641 642 VNET_SO_ASSERT(so); 643 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 644 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb); 645 if (pr->pr_usrreqs->pru_detach != NULL) 646 (*pr->pr_usrreqs->pru_detach)(so); 647 648 /* 649 * From this point on, we assume that no other references to this 650 * socket exist anywhere else in the stack. Therefore, no locks need 651 * to be acquired or held. 652 * 653 * We used to do a lot of socket buffer and socket locking here, as 654 * well as invoke sorflush() and perform wakeups. The direct call to 655 * dom_dispose() and sbrelease_internal() are an inlining of what was 656 * necessary from sorflush(). 657 * 658 * Notice that the socket buffer and kqueue state are torn down 659 * before calling pru_detach. This means that protocols shold not 660 * assume they can perform socket wakeups, etc, in their detach code. 661 */ 662 sbdestroy(&so->so_snd, so); 663 sbdestroy(&so->so_rcv, so); 664 seldrain(&so->so_snd.sb_sel); 665 seldrain(&so->so_rcv.sb_sel); 666 knlist_destroy(&so->so_rcv.sb_sel.si_note); 667 knlist_destroy(&so->so_snd.sb_sel.si_note); 668 sodealloc(so); 669 } 670 671 /* 672 * Close a socket on last file table reference removal. Initiate disconnect 673 * if connected. Free socket when disconnect complete. 674 * 675 * This function will sorele() the socket. Note that soclose() may be called 676 * prior to the ref count reaching zero. The actual socket structure will 677 * not be freed until the ref count reaches zero. 678 */ 679 int 680 soclose(struct socket *so) 681 { 682 int error = 0; 683 684 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); 685 686 CURVNET_SET(so->so_vnet); 687 funsetown(&so->so_sigio); 688 if (so->so_state & SS_ISCONNECTED) { 689 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 690 error = sodisconnect(so); 691 if (error) { 692 if (error == ENOTCONN) 693 error = 0; 694 goto drop; 695 } 696 } 697 if (so->so_options & SO_LINGER) { 698 if ((so->so_state & SS_ISDISCONNECTING) && 699 (so->so_state & SS_NBIO)) 700 goto drop; 701 while (so->so_state & SS_ISCONNECTED) { 702 error = tsleep(&so->so_timeo, 703 PSOCK | PCATCH, "soclos", so->so_linger * hz); 704 if (error) 705 break; 706 } 707 } 708 } 709 710 drop: 711 if (so->so_proto->pr_usrreqs->pru_close != NULL) 712 (*so->so_proto->pr_usrreqs->pru_close)(so); 713 if (so->so_options & SO_ACCEPTCONN) { 714 struct socket *sp; 715 ACCEPT_LOCK(); 716 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 717 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 718 so->so_incqlen--; 719 sp->so_qstate &= ~SQ_INCOMP; 720 sp->so_head = NULL; 721 ACCEPT_UNLOCK(); 722 soabort(sp); 723 ACCEPT_LOCK(); 724 } 725 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 726 TAILQ_REMOVE(&so->so_comp, sp, so_list); 727 so->so_qlen--; 728 sp->so_qstate &= ~SQ_COMP; 729 sp->so_head = NULL; 730 ACCEPT_UNLOCK(); 731 soabort(sp); 732 ACCEPT_LOCK(); 733 } 734 ACCEPT_UNLOCK(); 735 } 736 ACCEPT_LOCK(); 737 SOCK_LOCK(so); 738 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 739 so->so_state |= SS_NOFDREF; 740 sorele(so); 741 CURVNET_RESTORE(); 742 return (error); 743 } 744 745 /* 746 * soabort() is used to abruptly tear down a connection, such as when a 747 * resource limit is reached (listen queue depth exceeded), or if a listen 748 * socket is closed while there are sockets waiting to be accepted. 749 * 750 * This interface is tricky, because it is called on an unreferenced socket, 751 * and must be called only by a thread that has actually removed the socket 752 * from the listen queue it was on, or races with other threads are risked. 753 * 754 * This interface will call into the protocol code, so must not be called 755 * with any socket locks held. Protocols do call it while holding their own 756 * recursible protocol mutexes, but this is something that should be subject 757 * to review in the future. 758 */ 759 void 760 soabort(struct socket *so) 761 { 762 763 /* 764 * In as much as is possible, assert that no references to this 765 * socket are held. This is not quite the same as asserting that the 766 * current thread is responsible for arranging for no references, but 767 * is as close as we can get for now. 768 */ 769 KASSERT(so->so_count == 0, ("soabort: so_count")); 770 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF")); 771 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF")); 772 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP")); 773 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP")); 774 VNET_SO_ASSERT(so); 775 776 if (so->so_proto->pr_usrreqs->pru_abort != NULL) 777 (*so->so_proto->pr_usrreqs->pru_abort)(so); 778 ACCEPT_LOCK(); 779 SOCK_LOCK(so); 780 sofree(so); 781 } 782 783 int 784 soaccept(struct socket *so, struct sockaddr **nam) 785 { 786 int error; 787 788 SOCK_LOCK(so); 789 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 790 so->so_state &= ~SS_NOFDREF; 791 SOCK_UNLOCK(so); 792 793 CURVNET_SET(so->so_vnet); 794 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 795 CURVNET_RESTORE(); 796 return (error); 797 } 798 799 int 800 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 801 { 802 int error; 803 804 if (so->so_options & SO_ACCEPTCONN) 805 return (EOPNOTSUPP); 806 807 CURVNET_SET(so->so_vnet); 808 /* 809 * If protocol is connection-based, can only connect once. 810 * Otherwise, if connected, try to disconnect first. This allows 811 * user to disconnect by connecting to, e.g., a null address. 812 */ 813 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 814 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 815 (error = sodisconnect(so)))) { 816 error = EISCONN; 817 } else { 818 /* 819 * Prevent accumulated error from previous connection from 820 * biting us. 821 */ 822 so->so_error = 0; 823 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 824 } 825 CURVNET_RESTORE(); 826 827 return (error); 828 } 829 830 int 831 soconnect2(struct socket *so1, struct socket *so2) 832 { 833 int error; 834 835 CURVNET_SET(so1->so_vnet); 836 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2); 837 CURVNET_RESTORE(); 838 return (error); 839 } 840 841 int 842 sodisconnect(struct socket *so) 843 { 844 int error; 845 846 if ((so->so_state & SS_ISCONNECTED) == 0) 847 return (ENOTCONN); 848 if (so->so_state & SS_ISDISCONNECTING) 849 return (EALREADY); 850 VNET_SO_ASSERT(so); 851 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 852 return (error); 853 } 854 855 #ifdef ZERO_COPY_SOCKETS 856 struct so_zerocopy_stats{ 857 int size_ok; 858 int align_ok; 859 int found_ifp; 860 }; 861 struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 862 #include <netinet/in.h> 863 #include <net/route.h> 864 #include <netinet/in_pcb.h> 865 #include <vm/vm.h> 866 #include <vm/vm_page.h> 867 #include <vm/vm_object.h> 868 869 /* 870 * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise 871 * sosend_dgram() and sosend_generic() use m_uiotombuf(). 872 * 873 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or 874 * all of the data referenced by the uio. If desired, it uses zero-copy. 875 * *space will be updated to reflect data copied in. 876 * 877 * NB: If atomic I/O is requested, the caller must already have checked that 878 * space can hold resid bytes. 879 * 880 * NB: In the event of an error, the caller may need to free the partial 881 * chain pointed to by *mpp. The contents of both *uio and *space may be 882 * modified even in the case of an error. 883 */ 884 static int 885 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space, 886 int flags) 887 { 888 struct mbuf *m, **mp, *top; 889 long len, resid; 890 int error; 891 #ifdef ZERO_COPY_SOCKETS 892 int cow_send; 893 #endif 894 895 *retmp = top = NULL; 896 mp = ⊤ 897 len = 0; 898 resid = uio->uio_resid; 899 error = 0; 900 do { 901 #ifdef ZERO_COPY_SOCKETS 902 cow_send = 0; 903 #endif /* ZERO_COPY_SOCKETS */ 904 if (resid >= MINCLSIZE) { 905 #ifdef ZERO_COPY_SOCKETS 906 if (top == NULL) { 907 m = m_gethdr(M_WAITOK, MT_DATA); 908 m->m_pkthdr.len = 0; 909 m->m_pkthdr.rcvif = NULL; 910 } else 911 m = m_get(M_WAITOK, MT_DATA); 912 if (so_zero_copy_send && 913 resid>=PAGE_SIZE && 914 *space>=PAGE_SIZE && 915 uio->uio_iov->iov_len>=PAGE_SIZE) { 916 so_zerocp_stats.size_ok++; 917 so_zerocp_stats.align_ok++; 918 cow_send = socow_setup(m, uio); 919 len = cow_send; 920 } 921 if (!cow_send) { 922 m_clget(m, M_WAITOK); 923 len = min(min(MCLBYTES, resid), *space); 924 } 925 #else /* ZERO_COPY_SOCKETS */ 926 if (top == NULL) { 927 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 928 m->m_pkthdr.len = 0; 929 m->m_pkthdr.rcvif = NULL; 930 } else 931 m = m_getcl(M_WAIT, MT_DATA, 0); 932 len = min(min(MCLBYTES, resid), *space); 933 #endif /* ZERO_COPY_SOCKETS */ 934 } else { 935 if (top == NULL) { 936 m = m_gethdr(M_WAIT, MT_DATA); 937 m->m_pkthdr.len = 0; 938 m->m_pkthdr.rcvif = NULL; 939 940 len = min(min(MHLEN, resid), *space); 941 /* 942 * For datagram protocols, leave room 943 * for protocol headers in first mbuf. 944 */ 945 if (atomic && m && len < MHLEN) 946 MH_ALIGN(m, len); 947 } else { 948 m = m_get(M_WAIT, MT_DATA); 949 len = min(min(MLEN, resid), *space); 950 } 951 } 952 if (m == NULL) { 953 error = ENOBUFS; 954 goto out; 955 } 956 957 *space -= len; 958 #ifdef ZERO_COPY_SOCKETS 959 if (cow_send) 960 error = 0; 961 else 962 #endif /* ZERO_COPY_SOCKETS */ 963 error = uiomove(mtod(m, void *), (int)len, uio); 964 resid = uio->uio_resid; 965 m->m_len = len; 966 *mp = m; 967 top->m_pkthdr.len += len; 968 if (error) 969 goto out; 970 mp = &m->m_next; 971 if (resid <= 0) { 972 if (flags & MSG_EOR) 973 top->m_flags |= M_EOR; 974 break; 975 } 976 } while (*space > 0 && atomic); 977 out: 978 *retmp = top; 979 return (error); 980 } 981 #endif /*ZERO_COPY_SOCKETS*/ 982 983 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 984 985 int 986 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, 987 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 988 { 989 long space, resid; 990 int clen = 0, error, dontroute; 991 #ifdef ZERO_COPY_SOCKETS 992 int atomic = sosendallatonce(so) || top; 993 #endif 994 995 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM")); 996 KASSERT(so->so_proto->pr_flags & PR_ATOMIC, 997 ("sodgram_send: !PR_ATOMIC")); 998 999 if (uio != NULL) 1000 resid = uio->uio_resid; 1001 else 1002 resid = top->m_pkthdr.len; 1003 /* 1004 * In theory resid should be unsigned. However, space must be 1005 * signed, as it might be less than 0 if we over-committed, and we 1006 * must use a signed comparison of space and resid. On the other 1007 * hand, a negative resid causes us to loop sending 0-length 1008 * segments to the protocol. 1009 */ 1010 if (resid < 0) { 1011 error = EINVAL; 1012 goto out; 1013 } 1014 1015 dontroute = 1016 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; 1017 if (td != NULL) 1018 td->td_ru.ru_msgsnd++; 1019 if (control != NULL) 1020 clen = control->m_len; 1021 1022 SOCKBUF_LOCK(&so->so_snd); 1023 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1024 SOCKBUF_UNLOCK(&so->so_snd); 1025 error = EPIPE; 1026 goto out; 1027 } 1028 if (so->so_error) { 1029 error = so->so_error; 1030 so->so_error = 0; 1031 SOCKBUF_UNLOCK(&so->so_snd); 1032 goto out; 1033 } 1034 if ((so->so_state & SS_ISCONNECTED) == 0) { 1035 /* 1036 * `sendto' and `sendmsg' is allowed on a connection-based 1037 * socket if it supports implied connect. Return ENOTCONN if 1038 * not connected and no address is supplied. 1039 */ 1040 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1041 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1042 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1043 !(resid == 0 && clen != 0)) { 1044 SOCKBUF_UNLOCK(&so->so_snd); 1045 error = ENOTCONN; 1046 goto out; 1047 } 1048 } else if (addr == NULL) { 1049 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1050 error = ENOTCONN; 1051 else 1052 error = EDESTADDRREQ; 1053 SOCKBUF_UNLOCK(&so->so_snd); 1054 goto out; 1055 } 1056 } 1057 1058 /* 1059 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a 1060 * problem and need fixing. 1061 */ 1062 space = sbspace(&so->so_snd); 1063 if (flags & MSG_OOB) 1064 space += 1024; 1065 space -= clen; 1066 SOCKBUF_UNLOCK(&so->so_snd); 1067 if (resid > space) { 1068 error = EMSGSIZE; 1069 goto out; 1070 } 1071 if (uio == NULL) { 1072 resid = 0; 1073 if (flags & MSG_EOR) 1074 top->m_flags |= M_EOR; 1075 } else { 1076 #ifdef ZERO_COPY_SOCKETS 1077 error = sosend_copyin(uio, &top, atomic, &space, flags); 1078 if (error) 1079 goto out; 1080 #else 1081 /* 1082 * Copy the data from userland into a mbuf chain. 1083 * If no data is to be copied in, a single empty mbuf 1084 * is returned. 1085 */ 1086 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr, 1087 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0))); 1088 if (top == NULL) { 1089 error = EFAULT; /* only possible error */ 1090 goto out; 1091 } 1092 space -= resid - uio->uio_resid; 1093 #endif 1094 resid = uio->uio_resid; 1095 } 1096 KASSERT(resid == 0, ("sosend_dgram: resid != 0")); 1097 /* 1098 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock 1099 * than with. 1100 */ 1101 if (dontroute) { 1102 SOCK_LOCK(so); 1103 so->so_options |= SO_DONTROUTE; 1104 SOCK_UNLOCK(so); 1105 } 1106 /* 1107 * XXX all the SBS_CANTSENDMORE checks previously done could be out 1108 * of date. We could have recieved a reset packet in an interrupt or 1109 * maybe we slept while doing page faults in uiomove() etc. We could 1110 * probably recheck again inside the locking protection here, but 1111 * there are probably other places that this also happens. We must 1112 * rethink this. 1113 */ 1114 VNET_SO_ASSERT(so); 1115 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1116 (flags & MSG_OOB) ? PRUS_OOB : 1117 /* 1118 * If the user set MSG_EOF, the protocol understands this flag and 1119 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND. 1120 */ 1121 ((flags & MSG_EOF) && 1122 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1123 (resid <= 0)) ? 1124 PRUS_EOF : 1125 /* If there is more to send set PRUS_MORETOCOME */ 1126 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1127 top, addr, control, td); 1128 if (dontroute) { 1129 SOCK_LOCK(so); 1130 so->so_options &= ~SO_DONTROUTE; 1131 SOCK_UNLOCK(so); 1132 } 1133 clen = 0; 1134 control = NULL; 1135 top = NULL; 1136 out: 1137 if (top != NULL) 1138 m_freem(top); 1139 if (control != NULL) 1140 m_freem(control); 1141 return (error); 1142 } 1143 1144 /* 1145 * Send on a socket. If send must go all at once and message is larger than 1146 * send buffering, then hard error. Lock against other senders. If must go 1147 * all at once and not enough room now, then inform user that this would 1148 * block and do nothing. Otherwise, if nonblocking, send as much as 1149 * possible. The data to be sent is described by "uio" if nonzero, otherwise 1150 * by the mbuf chain "top" (which must be null if uio is not). Data provided 1151 * in mbuf chain must be small enough to send all at once. 1152 * 1153 * Returns nonzero on error, timeout or signal; callers must check for short 1154 * counts if EINTR/ERESTART are returned. Data and control buffers are freed 1155 * on return. 1156 */ 1157 int 1158 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, 1159 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1160 { 1161 long space, resid; 1162 int clen = 0, error, dontroute; 1163 int atomic = sosendallatonce(so) || top; 1164 1165 if (uio != NULL) 1166 resid = uio->uio_resid; 1167 else 1168 resid = top->m_pkthdr.len; 1169 /* 1170 * In theory resid should be unsigned. However, space must be 1171 * signed, as it might be less than 0 if we over-committed, and we 1172 * must use a signed comparison of space and resid. On the other 1173 * hand, a negative resid causes us to loop sending 0-length 1174 * segments to the protocol. 1175 * 1176 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 1177 * type sockets since that's an error. 1178 */ 1179 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 1180 error = EINVAL; 1181 goto out; 1182 } 1183 1184 dontroute = 1185 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 1186 (so->so_proto->pr_flags & PR_ATOMIC); 1187 if (td != NULL) 1188 td->td_ru.ru_msgsnd++; 1189 if (control != NULL) 1190 clen = control->m_len; 1191 1192 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1193 if (error) 1194 goto out; 1195 1196 restart: 1197 do { 1198 SOCKBUF_LOCK(&so->so_snd); 1199 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1200 SOCKBUF_UNLOCK(&so->so_snd); 1201 error = EPIPE; 1202 goto release; 1203 } 1204 if (so->so_error) { 1205 error = so->so_error; 1206 so->so_error = 0; 1207 SOCKBUF_UNLOCK(&so->so_snd); 1208 goto release; 1209 } 1210 if ((so->so_state & SS_ISCONNECTED) == 0) { 1211 /* 1212 * `sendto' and `sendmsg' is allowed on a connection- 1213 * based socket if it supports implied connect. 1214 * Return ENOTCONN if not connected and no address is 1215 * supplied. 1216 */ 1217 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1218 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1219 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1220 !(resid == 0 && clen != 0)) { 1221 SOCKBUF_UNLOCK(&so->so_snd); 1222 error = ENOTCONN; 1223 goto release; 1224 } 1225 } else if (addr == NULL) { 1226 SOCKBUF_UNLOCK(&so->so_snd); 1227 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1228 error = ENOTCONN; 1229 else 1230 error = EDESTADDRREQ; 1231 goto release; 1232 } 1233 } 1234 space = sbspace(&so->so_snd); 1235 if (flags & MSG_OOB) 1236 space += 1024; 1237 if ((atomic && resid > so->so_snd.sb_hiwat) || 1238 clen > so->so_snd.sb_hiwat) { 1239 SOCKBUF_UNLOCK(&so->so_snd); 1240 error = EMSGSIZE; 1241 goto release; 1242 } 1243 if (space < resid + clen && 1244 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 1245 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) { 1246 SOCKBUF_UNLOCK(&so->so_snd); 1247 error = EWOULDBLOCK; 1248 goto release; 1249 } 1250 error = sbwait(&so->so_snd); 1251 SOCKBUF_UNLOCK(&so->so_snd); 1252 if (error) 1253 goto release; 1254 goto restart; 1255 } 1256 SOCKBUF_UNLOCK(&so->so_snd); 1257 space -= clen; 1258 do { 1259 if (uio == NULL) { 1260 resid = 0; 1261 if (flags & MSG_EOR) 1262 top->m_flags |= M_EOR; 1263 } else { 1264 #ifdef ZERO_COPY_SOCKETS 1265 error = sosend_copyin(uio, &top, atomic, 1266 &space, flags); 1267 if (error != 0) 1268 goto release; 1269 #else 1270 /* 1271 * Copy the data from userland into a mbuf 1272 * chain. If no data is to be copied in, 1273 * a single empty mbuf is returned. 1274 */ 1275 top = m_uiotombuf(uio, M_WAITOK, space, 1276 (atomic ? max_hdr : 0), 1277 (atomic ? M_PKTHDR : 0) | 1278 ((flags & MSG_EOR) ? M_EOR : 0)); 1279 if (top == NULL) { 1280 error = EFAULT; /* only possible error */ 1281 goto release; 1282 } 1283 space -= resid - uio->uio_resid; 1284 #endif 1285 resid = uio->uio_resid; 1286 } 1287 if (dontroute) { 1288 SOCK_LOCK(so); 1289 so->so_options |= SO_DONTROUTE; 1290 SOCK_UNLOCK(so); 1291 } 1292 /* 1293 * XXX all the SBS_CANTSENDMORE checks previously 1294 * done could be out of date. We could have recieved 1295 * a reset packet in an interrupt or maybe we slept 1296 * while doing page faults in uiomove() etc. We 1297 * could probably recheck again inside the locking 1298 * protection here, but there are probably other 1299 * places that this also happens. We must rethink 1300 * this. 1301 */ 1302 VNET_SO_ASSERT(so); 1303 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1304 (flags & MSG_OOB) ? PRUS_OOB : 1305 /* 1306 * If the user set MSG_EOF, the protocol understands 1307 * this flag and nothing left to send then use 1308 * PRU_SEND_EOF instead of PRU_SEND. 1309 */ 1310 ((flags & MSG_EOF) && 1311 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1312 (resid <= 0)) ? 1313 PRUS_EOF : 1314 /* If there is more to send set PRUS_MORETOCOME. */ 1315 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1316 top, addr, control, td); 1317 if (dontroute) { 1318 SOCK_LOCK(so); 1319 so->so_options &= ~SO_DONTROUTE; 1320 SOCK_UNLOCK(so); 1321 } 1322 clen = 0; 1323 control = NULL; 1324 top = NULL; 1325 if (error) 1326 goto release; 1327 } while (resid && space > 0); 1328 } while (resid); 1329 1330 release: 1331 sbunlock(&so->so_snd); 1332 out: 1333 if (top != NULL) 1334 m_freem(top); 1335 if (control != NULL) 1336 m_freem(control); 1337 return (error); 1338 } 1339 1340 int 1341 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 1342 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1343 { 1344 int error; 1345 1346 CURVNET_SET(so->so_vnet); 1347 error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top, 1348 control, flags, td); 1349 CURVNET_RESTORE(); 1350 return (error); 1351 } 1352 1353 /* 1354 * The part of soreceive() that implements reading non-inline out-of-band 1355 * data from a socket. For more complete comments, see soreceive(), from 1356 * which this code originated. 1357 * 1358 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is 1359 * unable to return an mbuf chain to the caller. 1360 */ 1361 static int 1362 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) 1363 { 1364 struct protosw *pr = so->so_proto; 1365 struct mbuf *m; 1366 int error; 1367 1368 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 1369 VNET_SO_ASSERT(so); 1370 1371 m = m_get(M_WAIT, MT_DATA); 1372 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 1373 if (error) 1374 goto bad; 1375 do { 1376 #ifdef ZERO_COPY_SOCKETS 1377 if (so_zero_copy_receive) { 1378 int disposable; 1379 1380 if ((m->m_flags & M_EXT) 1381 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1382 disposable = 1; 1383 else 1384 disposable = 0; 1385 1386 error = uiomoveco(mtod(m, void *), 1387 min(uio->uio_resid, m->m_len), 1388 uio, disposable); 1389 } else 1390 #endif /* ZERO_COPY_SOCKETS */ 1391 error = uiomove(mtod(m, void *), 1392 (int) min(uio->uio_resid, m->m_len), uio); 1393 m = m_free(m); 1394 } while (uio->uio_resid && error == 0 && m); 1395 bad: 1396 if (m != NULL) 1397 m_freem(m); 1398 return (error); 1399 } 1400 1401 /* 1402 * Following replacement or removal of the first mbuf on the first mbuf chain 1403 * of a socket buffer, push necessary state changes back into the socket 1404 * buffer so that other consumers see the values consistently. 'nextrecord' 1405 * is the callers locally stored value of the original value of 1406 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 1407 * NOTE: 'nextrecord' may be NULL. 1408 */ 1409 static __inline void 1410 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 1411 { 1412 1413 SOCKBUF_LOCK_ASSERT(sb); 1414 /* 1415 * First, update for the new value of nextrecord. If necessary, make 1416 * it the first record. 1417 */ 1418 if (sb->sb_mb != NULL) 1419 sb->sb_mb->m_nextpkt = nextrecord; 1420 else 1421 sb->sb_mb = nextrecord; 1422 1423 /* 1424 * Now update any dependent socket buffer fields to reflect the new 1425 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 1426 * addition of a second clause that takes care of the case where 1427 * sb_mb has been updated, but remains the last record. 1428 */ 1429 if (sb->sb_mb == NULL) { 1430 sb->sb_mbtail = NULL; 1431 sb->sb_lastrecord = NULL; 1432 } else if (sb->sb_mb->m_nextpkt == NULL) 1433 sb->sb_lastrecord = sb->sb_mb; 1434 } 1435 1436 1437 /* 1438 * Implement receive operations on a socket. We depend on the way that 1439 * records are added to the sockbuf by sbappend. In particular, each record 1440 * (mbufs linked through m_next) must begin with an address if the protocol 1441 * so specifies, followed by an optional mbuf or mbufs containing ancillary 1442 * data, and then zero or more mbufs of data. In order to allow parallelism 1443 * between network receive and copying to user space, as well as avoid 1444 * sleeping with a mutex held, we release the socket buffer mutex during the 1445 * user space copy. Although the sockbuf is locked, new data may still be 1446 * appended, and thus we must maintain consistency of the sockbuf during that 1447 * time. 1448 * 1449 * The caller may receive the data as a single mbuf chain by supplying an 1450 * mbuf **mp0 for use in returning the chain. The uio is then used only for 1451 * the count in uio_resid. 1452 */ 1453 int 1454 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio, 1455 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1456 { 1457 struct mbuf *m, **mp; 1458 int flags, len, error, offset; 1459 struct protosw *pr = so->so_proto; 1460 struct mbuf *nextrecord; 1461 int moff, type = 0; 1462 int orig_resid = uio->uio_resid; 1463 1464 mp = mp0; 1465 if (psa != NULL) 1466 *psa = NULL; 1467 if (controlp != NULL) 1468 *controlp = NULL; 1469 if (flagsp != NULL) 1470 flags = *flagsp &~ MSG_EOR; 1471 else 1472 flags = 0; 1473 if (flags & MSG_OOB) 1474 return (soreceive_rcvoob(so, uio, flags)); 1475 if (mp != NULL) 1476 *mp = NULL; 1477 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) 1478 && uio->uio_resid) { 1479 VNET_SO_ASSERT(so); 1480 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 1481 } 1482 1483 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 1484 if (error) 1485 return (error); 1486 1487 restart: 1488 SOCKBUF_LOCK(&so->so_rcv); 1489 m = so->so_rcv.sb_mb; 1490 /* 1491 * If we have less data than requested, block awaiting more (subject 1492 * to any timeout) if: 1493 * 1. the current count is less than the low water mark, or 1494 * 2. MSG_WAITALL is set, and it is possible to do the entire 1495 * receive operation at once if we block (resid <= hiwat). 1496 * 3. MSG_DONTWAIT is not set 1497 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1498 * we have to do the receive in sections, and thus risk returning a 1499 * short count if a timeout or signal occurs after we start. 1500 */ 1501 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1502 so->so_rcv.sb_cc < uio->uio_resid) && 1503 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1504 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1505 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1506 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1507 ("receive: m == %p so->so_rcv.sb_cc == %u", 1508 m, so->so_rcv.sb_cc)); 1509 if (so->so_error) { 1510 if (m != NULL) 1511 goto dontblock; 1512 error = so->so_error; 1513 if ((flags & MSG_PEEK) == 0) 1514 so->so_error = 0; 1515 SOCKBUF_UNLOCK(&so->so_rcv); 1516 goto release; 1517 } 1518 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1519 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1520 if (m == NULL) { 1521 SOCKBUF_UNLOCK(&so->so_rcv); 1522 goto release; 1523 } else 1524 goto dontblock; 1525 } 1526 for (; m != NULL; m = m->m_next) 1527 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1528 m = so->so_rcv.sb_mb; 1529 goto dontblock; 1530 } 1531 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1532 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1533 SOCKBUF_UNLOCK(&so->so_rcv); 1534 error = ENOTCONN; 1535 goto release; 1536 } 1537 if (uio->uio_resid == 0) { 1538 SOCKBUF_UNLOCK(&so->so_rcv); 1539 goto release; 1540 } 1541 if ((so->so_state & SS_NBIO) || 1542 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1543 SOCKBUF_UNLOCK(&so->so_rcv); 1544 error = EWOULDBLOCK; 1545 goto release; 1546 } 1547 SBLASTRECORDCHK(&so->so_rcv); 1548 SBLASTMBUFCHK(&so->so_rcv); 1549 error = sbwait(&so->so_rcv); 1550 SOCKBUF_UNLOCK(&so->so_rcv); 1551 if (error) 1552 goto release; 1553 goto restart; 1554 } 1555 dontblock: 1556 /* 1557 * From this point onward, we maintain 'nextrecord' as a cache of the 1558 * pointer to the next record in the socket buffer. We must keep the 1559 * various socket buffer pointers and local stack versions of the 1560 * pointers in sync, pushing out modifications before dropping the 1561 * socket buffer mutex, and re-reading them when picking it up. 1562 * 1563 * Otherwise, we will race with the network stack appending new data 1564 * or records onto the socket buffer by using inconsistent/stale 1565 * versions of the field, possibly resulting in socket buffer 1566 * corruption. 1567 * 1568 * By holding the high-level sblock(), we prevent simultaneous 1569 * readers from pulling off the front of the socket buffer. 1570 */ 1571 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1572 if (uio->uio_td) 1573 uio->uio_td->td_ru.ru_msgrcv++; 1574 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1575 SBLASTRECORDCHK(&so->so_rcv); 1576 SBLASTMBUFCHK(&so->so_rcv); 1577 nextrecord = m->m_nextpkt; 1578 if (pr->pr_flags & PR_ADDR) { 1579 KASSERT(m->m_type == MT_SONAME, 1580 ("m->m_type == %d", m->m_type)); 1581 orig_resid = 0; 1582 if (psa != NULL) 1583 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1584 M_NOWAIT); 1585 if (flags & MSG_PEEK) { 1586 m = m->m_next; 1587 } else { 1588 sbfree(&so->so_rcv, m); 1589 so->so_rcv.sb_mb = m_free(m); 1590 m = so->so_rcv.sb_mb; 1591 sockbuf_pushsync(&so->so_rcv, nextrecord); 1592 } 1593 } 1594 1595 /* 1596 * Process one or more MT_CONTROL mbufs present before any data mbufs 1597 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1598 * just copy the data; if !MSG_PEEK, we call into the protocol to 1599 * perform externalization (or freeing if controlp == NULL). 1600 */ 1601 if (m != NULL && m->m_type == MT_CONTROL) { 1602 struct mbuf *cm = NULL, *cmn; 1603 struct mbuf **cme = &cm; 1604 1605 do { 1606 if (flags & MSG_PEEK) { 1607 if (controlp != NULL) { 1608 *controlp = m_copy(m, 0, m->m_len); 1609 controlp = &(*controlp)->m_next; 1610 } 1611 m = m->m_next; 1612 } else { 1613 sbfree(&so->so_rcv, m); 1614 so->so_rcv.sb_mb = m->m_next; 1615 m->m_next = NULL; 1616 *cme = m; 1617 cme = &(*cme)->m_next; 1618 m = so->so_rcv.sb_mb; 1619 } 1620 } while (m != NULL && m->m_type == MT_CONTROL); 1621 if ((flags & MSG_PEEK) == 0) 1622 sockbuf_pushsync(&so->so_rcv, nextrecord); 1623 while (cm != NULL) { 1624 cmn = cm->m_next; 1625 cm->m_next = NULL; 1626 if (pr->pr_domain->dom_externalize != NULL) { 1627 SOCKBUF_UNLOCK(&so->so_rcv); 1628 VNET_SO_ASSERT(so); 1629 error = (*pr->pr_domain->dom_externalize) 1630 (cm, controlp); 1631 SOCKBUF_LOCK(&so->so_rcv); 1632 } else if (controlp != NULL) 1633 *controlp = cm; 1634 else 1635 m_freem(cm); 1636 if (controlp != NULL) { 1637 orig_resid = 0; 1638 while (*controlp != NULL) 1639 controlp = &(*controlp)->m_next; 1640 } 1641 cm = cmn; 1642 } 1643 if (m != NULL) 1644 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1645 else 1646 nextrecord = so->so_rcv.sb_mb; 1647 orig_resid = 0; 1648 } 1649 if (m != NULL) { 1650 if ((flags & MSG_PEEK) == 0) { 1651 KASSERT(m->m_nextpkt == nextrecord, 1652 ("soreceive: post-control, nextrecord !sync")); 1653 if (nextrecord == NULL) { 1654 KASSERT(so->so_rcv.sb_mb == m, 1655 ("soreceive: post-control, sb_mb!=m")); 1656 KASSERT(so->so_rcv.sb_lastrecord == m, 1657 ("soreceive: post-control, lastrecord!=m")); 1658 } 1659 } 1660 type = m->m_type; 1661 if (type == MT_OOBDATA) 1662 flags |= MSG_OOB; 1663 } else { 1664 if ((flags & MSG_PEEK) == 0) { 1665 KASSERT(so->so_rcv.sb_mb == nextrecord, 1666 ("soreceive: sb_mb != nextrecord")); 1667 if (so->so_rcv.sb_mb == NULL) { 1668 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1669 ("soreceive: sb_lastercord != NULL")); 1670 } 1671 } 1672 } 1673 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1674 SBLASTRECORDCHK(&so->so_rcv); 1675 SBLASTMBUFCHK(&so->so_rcv); 1676 1677 /* 1678 * Now continue to read any data mbufs off of the head of the socket 1679 * buffer until the read request is satisfied. Note that 'type' is 1680 * used to store the type of any mbuf reads that have happened so far 1681 * such that soreceive() can stop reading if the type changes, which 1682 * causes soreceive() to return only one of regular data and inline 1683 * out-of-band data in a single socket receive operation. 1684 */ 1685 moff = 0; 1686 offset = 0; 1687 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1688 /* 1689 * If the type of mbuf has changed since the last mbuf 1690 * examined ('type'), end the receive operation. 1691 */ 1692 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1693 if (m->m_type == MT_OOBDATA) { 1694 if (type != MT_OOBDATA) 1695 break; 1696 } else if (type == MT_OOBDATA) 1697 break; 1698 else 1699 KASSERT(m->m_type == MT_DATA, 1700 ("m->m_type == %d", m->m_type)); 1701 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1702 len = uio->uio_resid; 1703 if (so->so_oobmark && len > so->so_oobmark - offset) 1704 len = so->so_oobmark - offset; 1705 if (len > m->m_len - moff) 1706 len = m->m_len - moff; 1707 /* 1708 * If mp is set, just pass back the mbufs. Otherwise copy 1709 * them out via the uio, then free. Sockbuf must be 1710 * consistent here (points to current mbuf, it points to next 1711 * record) when we drop priority; we must note any additions 1712 * to the sockbuf when we block interrupts again. 1713 */ 1714 if (mp == NULL) { 1715 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1716 SBLASTRECORDCHK(&so->so_rcv); 1717 SBLASTMBUFCHK(&so->so_rcv); 1718 SOCKBUF_UNLOCK(&so->so_rcv); 1719 #ifdef ZERO_COPY_SOCKETS 1720 if (so_zero_copy_receive) { 1721 int disposable; 1722 1723 if ((m->m_flags & M_EXT) 1724 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1725 disposable = 1; 1726 else 1727 disposable = 0; 1728 1729 error = uiomoveco(mtod(m, char *) + moff, 1730 (int)len, uio, 1731 disposable); 1732 } else 1733 #endif /* ZERO_COPY_SOCKETS */ 1734 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1735 SOCKBUF_LOCK(&so->so_rcv); 1736 if (error) { 1737 /* 1738 * The MT_SONAME mbuf has already been removed 1739 * from the record, so it is necessary to 1740 * remove the data mbufs, if any, to preserve 1741 * the invariant in the case of PR_ADDR that 1742 * requires MT_SONAME mbufs at the head of 1743 * each record. 1744 */ 1745 if (m && pr->pr_flags & PR_ATOMIC && 1746 ((flags & MSG_PEEK) == 0)) 1747 (void)sbdroprecord_locked(&so->so_rcv); 1748 SOCKBUF_UNLOCK(&so->so_rcv); 1749 goto release; 1750 } 1751 } else 1752 uio->uio_resid -= len; 1753 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1754 if (len == m->m_len - moff) { 1755 if (m->m_flags & M_EOR) 1756 flags |= MSG_EOR; 1757 if (flags & MSG_PEEK) { 1758 m = m->m_next; 1759 moff = 0; 1760 } else { 1761 nextrecord = m->m_nextpkt; 1762 sbfree(&so->so_rcv, m); 1763 if (mp != NULL) { 1764 *mp = m; 1765 mp = &m->m_next; 1766 so->so_rcv.sb_mb = m = m->m_next; 1767 *mp = NULL; 1768 } else { 1769 so->so_rcv.sb_mb = m_free(m); 1770 m = so->so_rcv.sb_mb; 1771 } 1772 sockbuf_pushsync(&so->so_rcv, nextrecord); 1773 SBLASTRECORDCHK(&so->so_rcv); 1774 SBLASTMBUFCHK(&so->so_rcv); 1775 } 1776 } else { 1777 if (flags & MSG_PEEK) 1778 moff += len; 1779 else { 1780 if (mp != NULL) { 1781 int copy_flag; 1782 1783 if (flags & MSG_DONTWAIT) 1784 copy_flag = M_DONTWAIT; 1785 else 1786 copy_flag = M_WAIT; 1787 if (copy_flag == M_WAIT) 1788 SOCKBUF_UNLOCK(&so->so_rcv); 1789 *mp = m_copym(m, 0, len, copy_flag); 1790 if (copy_flag == M_WAIT) 1791 SOCKBUF_LOCK(&so->so_rcv); 1792 if (*mp == NULL) { 1793 /* 1794 * m_copym() couldn't 1795 * allocate an mbuf. Adjust 1796 * uio_resid back (it was 1797 * adjusted down by len 1798 * bytes, which we didn't end 1799 * up "copying" over). 1800 */ 1801 uio->uio_resid += len; 1802 break; 1803 } 1804 } 1805 m->m_data += len; 1806 m->m_len -= len; 1807 so->so_rcv.sb_cc -= len; 1808 } 1809 } 1810 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1811 if (so->so_oobmark) { 1812 if ((flags & MSG_PEEK) == 0) { 1813 so->so_oobmark -= len; 1814 if (so->so_oobmark == 0) { 1815 so->so_rcv.sb_state |= SBS_RCVATMARK; 1816 break; 1817 } 1818 } else { 1819 offset += len; 1820 if (offset == so->so_oobmark) 1821 break; 1822 } 1823 } 1824 if (flags & MSG_EOR) 1825 break; 1826 /* 1827 * If the MSG_WAITALL flag is set (for non-atomic socket), we 1828 * must not quit until "uio->uio_resid == 0" or an error 1829 * termination. If a signal/timeout occurs, return with a 1830 * short count but without error. Keep sockbuf locked 1831 * against other readers. 1832 */ 1833 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1834 !sosendallatonce(so) && nextrecord == NULL) { 1835 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1836 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1837 break; 1838 /* 1839 * Notify the protocol that some data has been 1840 * drained before blocking. 1841 */ 1842 if (pr->pr_flags & PR_WANTRCVD) { 1843 SOCKBUF_UNLOCK(&so->so_rcv); 1844 VNET_SO_ASSERT(so); 1845 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1846 SOCKBUF_LOCK(&so->so_rcv); 1847 } 1848 SBLASTRECORDCHK(&so->so_rcv); 1849 SBLASTMBUFCHK(&so->so_rcv); 1850 /* 1851 * We could receive some data while was notifying 1852 * the protocol. Skip blocking in this case. 1853 */ 1854 if (so->so_rcv.sb_mb == NULL) { 1855 error = sbwait(&so->so_rcv); 1856 if (error) { 1857 SOCKBUF_UNLOCK(&so->so_rcv); 1858 goto release; 1859 } 1860 } 1861 m = so->so_rcv.sb_mb; 1862 if (m != NULL) 1863 nextrecord = m->m_nextpkt; 1864 } 1865 } 1866 1867 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1868 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1869 flags |= MSG_TRUNC; 1870 if ((flags & MSG_PEEK) == 0) 1871 (void) sbdroprecord_locked(&so->so_rcv); 1872 } 1873 if ((flags & MSG_PEEK) == 0) { 1874 if (m == NULL) { 1875 /* 1876 * First part is an inline SB_EMPTY_FIXUP(). Second 1877 * part makes sure sb_lastrecord is up-to-date if 1878 * there is still data in the socket buffer. 1879 */ 1880 so->so_rcv.sb_mb = nextrecord; 1881 if (so->so_rcv.sb_mb == NULL) { 1882 so->so_rcv.sb_mbtail = NULL; 1883 so->so_rcv.sb_lastrecord = NULL; 1884 } else if (nextrecord->m_nextpkt == NULL) 1885 so->so_rcv.sb_lastrecord = nextrecord; 1886 } 1887 SBLASTRECORDCHK(&so->so_rcv); 1888 SBLASTMBUFCHK(&so->so_rcv); 1889 /* 1890 * If soreceive() is being done from the socket callback, 1891 * then don't need to generate ACK to peer to update window, 1892 * since ACK will be generated on return to TCP. 1893 */ 1894 if (!(flags & MSG_SOCALLBCK) && 1895 (pr->pr_flags & PR_WANTRCVD)) { 1896 SOCKBUF_UNLOCK(&so->so_rcv); 1897 VNET_SO_ASSERT(so); 1898 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1899 SOCKBUF_LOCK(&so->so_rcv); 1900 } 1901 } 1902 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1903 if (orig_resid == uio->uio_resid && orig_resid && 1904 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1905 SOCKBUF_UNLOCK(&so->so_rcv); 1906 goto restart; 1907 } 1908 SOCKBUF_UNLOCK(&so->so_rcv); 1909 1910 if (flagsp != NULL) 1911 *flagsp |= flags; 1912 release: 1913 sbunlock(&so->so_rcv); 1914 return (error); 1915 } 1916 1917 /* 1918 * Optimized version of soreceive() for stream (TCP) sockets. 1919 */ 1920 int 1921 soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio, 1922 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1923 { 1924 int len = 0, error = 0, flags, oresid; 1925 struct sockbuf *sb; 1926 struct mbuf *m, *n = NULL; 1927 1928 /* We only do stream sockets. */ 1929 if (so->so_type != SOCK_STREAM) 1930 return (EINVAL); 1931 if (psa != NULL) 1932 *psa = NULL; 1933 if (controlp != NULL) 1934 return (EINVAL); 1935 if (flagsp != NULL) 1936 flags = *flagsp &~ MSG_EOR; 1937 else 1938 flags = 0; 1939 if (flags & MSG_OOB) 1940 return (soreceive_rcvoob(so, uio, flags)); 1941 if (mp0 != NULL) 1942 *mp0 = NULL; 1943 1944 sb = &so->so_rcv; 1945 1946 /* Prevent other readers from entering the socket. */ 1947 error = sblock(sb, SBLOCKWAIT(flags)); 1948 if (error) 1949 goto out; 1950 SOCKBUF_LOCK(sb); 1951 1952 /* Easy one, no space to copyout anything. */ 1953 if (uio->uio_resid == 0) { 1954 error = EINVAL; 1955 goto out; 1956 } 1957 oresid = uio->uio_resid; 1958 1959 /* We will never ever get anything unless we are or were connected. */ 1960 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 1961 error = ENOTCONN; 1962 goto out; 1963 } 1964 1965 restart: 1966 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1967 1968 /* Abort if socket has reported problems. */ 1969 if (so->so_error) { 1970 if (sb->sb_cc > 0) 1971 goto deliver; 1972 if (oresid > uio->uio_resid) 1973 goto out; 1974 error = so->so_error; 1975 if (!(flags & MSG_PEEK)) 1976 so->so_error = 0; 1977 goto out; 1978 } 1979 1980 /* Door is closed. Deliver what is left, if any. */ 1981 if (sb->sb_state & SBS_CANTRCVMORE) { 1982 if (sb->sb_cc > 0) 1983 goto deliver; 1984 else 1985 goto out; 1986 } 1987 1988 /* Socket buffer is empty and we shall not block. */ 1989 if (sb->sb_cc == 0 && 1990 ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) { 1991 error = EAGAIN; 1992 goto out; 1993 } 1994 1995 /* Socket buffer got some data that we shall deliver now. */ 1996 if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) && 1997 ((sb->sb_flags & SS_NBIO) || 1998 (flags & (MSG_DONTWAIT|MSG_NBIO)) || 1999 sb->sb_cc >= sb->sb_lowat || 2000 sb->sb_cc >= uio->uio_resid || 2001 sb->sb_cc >= sb->sb_hiwat) ) { 2002 goto deliver; 2003 } 2004 2005 /* On MSG_WAITALL we must wait until all data or error arrives. */ 2006 if ((flags & MSG_WAITALL) && 2007 (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat)) 2008 goto deliver; 2009 2010 /* 2011 * Wait and block until (more) data comes in. 2012 * NB: Drops the sockbuf lock during wait. 2013 */ 2014 error = sbwait(sb); 2015 if (error) 2016 goto out; 2017 goto restart; 2018 2019 deliver: 2020 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2021 KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__)); 2022 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__)); 2023 2024 /* Statistics. */ 2025 if (uio->uio_td) 2026 uio->uio_td->td_ru.ru_msgrcv++; 2027 2028 /* Fill uio until full or current end of socket buffer is reached. */ 2029 len = min(uio->uio_resid, sb->sb_cc); 2030 if (mp0 != NULL) { 2031 /* Dequeue as many mbufs as possible. */ 2032 if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) { 2033 for (*mp0 = m = sb->sb_mb; 2034 m != NULL && m->m_len <= len; 2035 m = m->m_next) { 2036 len -= m->m_len; 2037 uio->uio_resid -= m->m_len; 2038 sbfree(sb, m); 2039 n = m; 2040 } 2041 sb->sb_mb = m; 2042 if (sb->sb_mb == NULL) 2043 SB_EMPTY_FIXUP(sb); 2044 n->m_next = NULL; 2045 } 2046 /* Copy the remainder. */ 2047 if (len > 0) { 2048 KASSERT(sb->sb_mb != NULL, 2049 ("%s: len > 0 && sb->sb_mb empty", __func__)); 2050 2051 m = m_copym(sb->sb_mb, 0, len, M_DONTWAIT); 2052 if (m == NULL) 2053 len = 0; /* Don't flush data from sockbuf. */ 2054 else 2055 uio->uio_resid -= m->m_len; 2056 if (*mp0 != NULL) 2057 n->m_next = m; 2058 else 2059 *mp0 = m; 2060 if (*mp0 == NULL) { 2061 error = ENOBUFS; 2062 goto out; 2063 } 2064 } 2065 } else { 2066 /* NB: Must unlock socket buffer as uiomove may sleep. */ 2067 SOCKBUF_UNLOCK(sb); 2068 error = m_mbuftouio(uio, sb->sb_mb, len); 2069 SOCKBUF_LOCK(sb); 2070 if (error) 2071 goto out; 2072 } 2073 SBLASTRECORDCHK(sb); 2074 SBLASTMBUFCHK(sb); 2075 2076 /* 2077 * Remove the delivered data from the socket buffer unless we 2078 * were only peeking. 2079 */ 2080 if (!(flags & MSG_PEEK)) { 2081 if (len > 0) 2082 sbdrop_locked(sb, len); 2083 2084 /* Notify protocol that we drained some data. */ 2085 if ((so->so_proto->pr_flags & PR_WANTRCVD) && 2086 (((flags & MSG_WAITALL) && uio->uio_resid > 0) || 2087 !(flags & MSG_SOCALLBCK))) { 2088 SOCKBUF_UNLOCK(sb); 2089 VNET_SO_ASSERT(so); 2090 (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags); 2091 SOCKBUF_LOCK(sb); 2092 } 2093 } 2094 2095 /* 2096 * For MSG_WAITALL we may have to loop again and wait for 2097 * more data to come in. 2098 */ 2099 if ((flags & MSG_WAITALL) && uio->uio_resid > 0) 2100 goto restart; 2101 out: 2102 SOCKBUF_LOCK_ASSERT(sb); 2103 SBLASTRECORDCHK(sb); 2104 SBLASTMBUFCHK(sb); 2105 SOCKBUF_UNLOCK(sb); 2106 sbunlock(sb); 2107 return (error); 2108 } 2109 2110 /* 2111 * Optimized version of soreceive() for simple datagram cases from userspace. 2112 * Unlike in the stream case, we're able to drop a datagram if copyout() 2113 * fails, and because we handle datagrams atomically, we don't need to use a 2114 * sleep lock to prevent I/O interlacing. 2115 */ 2116 int 2117 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, 2118 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2119 { 2120 struct mbuf *m, *m2; 2121 int flags, len, error; 2122 struct protosw *pr = so->so_proto; 2123 struct mbuf *nextrecord; 2124 2125 if (psa != NULL) 2126 *psa = NULL; 2127 if (controlp != NULL) 2128 *controlp = NULL; 2129 if (flagsp != NULL) 2130 flags = *flagsp &~ MSG_EOR; 2131 else 2132 flags = 0; 2133 2134 /* 2135 * For any complicated cases, fall back to the full 2136 * soreceive_generic(). 2137 */ 2138 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB)) 2139 return (soreceive_generic(so, psa, uio, mp0, controlp, 2140 flagsp)); 2141 2142 /* 2143 * Enforce restrictions on use. 2144 */ 2145 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0, 2146 ("soreceive_dgram: wantrcvd")); 2147 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic")); 2148 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0, 2149 ("soreceive_dgram: SBS_RCVATMARK")); 2150 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0, 2151 ("soreceive_dgram: P_CONNREQUIRED")); 2152 2153 /* 2154 * Loop blocking while waiting for a datagram. 2155 */ 2156 SOCKBUF_LOCK(&so->so_rcv); 2157 while ((m = so->so_rcv.sb_mb) == NULL) { 2158 KASSERT(so->so_rcv.sb_cc == 0, 2159 ("soreceive_dgram: sb_mb NULL but sb_cc %u", 2160 so->so_rcv.sb_cc)); 2161 if (so->so_error) { 2162 error = so->so_error; 2163 so->so_error = 0; 2164 SOCKBUF_UNLOCK(&so->so_rcv); 2165 return (error); 2166 } 2167 if (so->so_rcv.sb_state & SBS_CANTRCVMORE || 2168 uio->uio_resid == 0) { 2169 SOCKBUF_UNLOCK(&so->so_rcv); 2170 return (0); 2171 } 2172 if ((so->so_state & SS_NBIO) || 2173 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 2174 SOCKBUF_UNLOCK(&so->so_rcv); 2175 return (EWOULDBLOCK); 2176 } 2177 SBLASTRECORDCHK(&so->so_rcv); 2178 SBLASTMBUFCHK(&so->so_rcv); 2179 error = sbwait(&so->so_rcv); 2180 if (error) { 2181 SOCKBUF_UNLOCK(&so->so_rcv); 2182 return (error); 2183 } 2184 } 2185 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2186 2187 if (uio->uio_td) 2188 uio->uio_td->td_ru.ru_msgrcv++; 2189 SBLASTRECORDCHK(&so->so_rcv); 2190 SBLASTMBUFCHK(&so->so_rcv); 2191 nextrecord = m->m_nextpkt; 2192 if (nextrecord == NULL) { 2193 KASSERT(so->so_rcv.sb_lastrecord == m, 2194 ("soreceive_dgram: lastrecord != m")); 2195 } 2196 2197 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord, 2198 ("soreceive_dgram: m_nextpkt != nextrecord")); 2199 2200 /* 2201 * Pull 'm' and its chain off the front of the packet queue. 2202 */ 2203 so->so_rcv.sb_mb = NULL; 2204 sockbuf_pushsync(&so->so_rcv, nextrecord); 2205 2206 /* 2207 * Walk 'm's chain and free that many bytes from the socket buffer. 2208 */ 2209 for (m2 = m; m2 != NULL; m2 = m2->m_next) 2210 sbfree(&so->so_rcv, m2); 2211 2212 /* 2213 * Do a few last checks before we let go of the lock. 2214 */ 2215 SBLASTRECORDCHK(&so->so_rcv); 2216 SBLASTMBUFCHK(&so->so_rcv); 2217 SOCKBUF_UNLOCK(&so->so_rcv); 2218 2219 if (pr->pr_flags & PR_ADDR) { 2220 KASSERT(m->m_type == MT_SONAME, 2221 ("m->m_type == %d", m->m_type)); 2222 if (psa != NULL) 2223 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 2224 M_NOWAIT); 2225 m = m_free(m); 2226 } 2227 if (m == NULL) { 2228 /* XXXRW: Can this happen? */ 2229 return (0); 2230 } 2231 2232 /* 2233 * Packet to copyout() is now in 'm' and it is disconnected from the 2234 * queue. 2235 * 2236 * Process one or more MT_CONTROL mbufs present before any data mbufs 2237 * in the first mbuf chain on the socket buffer. We call into the 2238 * protocol to perform externalization (or freeing if controlp == 2239 * NULL). 2240 */ 2241 if (m->m_type == MT_CONTROL) { 2242 struct mbuf *cm = NULL, *cmn; 2243 struct mbuf **cme = &cm; 2244 2245 do { 2246 m2 = m->m_next; 2247 m->m_next = NULL; 2248 *cme = m; 2249 cme = &(*cme)->m_next; 2250 m = m2; 2251 } while (m != NULL && m->m_type == MT_CONTROL); 2252 while (cm != NULL) { 2253 cmn = cm->m_next; 2254 cm->m_next = NULL; 2255 if (pr->pr_domain->dom_externalize != NULL) { 2256 error = (*pr->pr_domain->dom_externalize) 2257 (cm, controlp); 2258 } else if (controlp != NULL) 2259 *controlp = cm; 2260 else 2261 m_freem(cm); 2262 if (controlp != NULL) { 2263 while (*controlp != NULL) 2264 controlp = &(*controlp)->m_next; 2265 } 2266 cm = cmn; 2267 } 2268 } 2269 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data")); 2270 2271 while (m != NULL && uio->uio_resid > 0) { 2272 len = uio->uio_resid; 2273 if (len > m->m_len) 2274 len = m->m_len; 2275 error = uiomove(mtod(m, char *), (int)len, uio); 2276 if (error) { 2277 m_freem(m); 2278 return (error); 2279 } 2280 if (len == m->m_len) 2281 m = m_free(m); 2282 else { 2283 m->m_data += len; 2284 m->m_len -= len; 2285 } 2286 } 2287 if (m != NULL) 2288 flags |= MSG_TRUNC; 2289 m_freem(m); 2290 if (flagsp != NULL) 2291 *flagsp |= flags; 2292 return (0); 2293 } 2294 2295 int 2296 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 2297 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2298 { 2299 int error; 2300 2301 CURVNET_SET(so->so_vnet); 2302 error = (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0, 2303 controlp, flagsp)); 2304 CURVNET_RESTORE(); 2305 return (error); 2306 } 2307 2308 int 2309 soshutdown(struct socket *so, int how) 2310 { 2311 struct protosw *pr = so->so_proto; 2312 int error; 2313 2314 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 2315 return (EINVAL); 2316 2317 CURVNET_SET(so->so_vnet); 2318 if (pr->pr_usrreqs->pru_flush != NULL) { 2319 (*pr->pr_usrreqs->pru_flush)(so, how); 2320 } 2321 if (how != SHUT_WR) 2322 sorflush(so); 2323 if (how != SHUT_RD) { 2324 error = (*pr->pr_usrreqs->pru_shutdown)(so); 2325 CURVNET_RESTORE(); 2326 return (error); 2327 } 2328 CURVNET_RESTORE(); 2329 return (0); 2330 } 2331 2332 void 2333 sorflush(struct socket *so) 2334 { 2335 struct sockbuf *sb = &so->so_rcv; 2336 struct protosw *pr = so->so_proto; 2337 struct sockbuf asb; 2338 2339 VNET_SO_ASSERT(so); 2340 2341 /* 2342 * In order to avoid calling dom_dispose with the socket buffer mutex 2343 * held, and in order to generally avoid holding the lock for a long 2344 * time, we make a copy of the socket buffer and clear the original 2345 * (except locks, state). The new socket buffer copy won't have 2346 * initialized locks so we can only call routines that won't use or 2347 * assert those locks. 2348 * 2349 * Dislodge threads currently blocked in receive and wait to acquire 2350 * a lock against other simultaneous readers before clearing the 2351 * socket buffer. Don't let our acquire be interrupted by a signal 2352 * despite any existing socket disposition on interruptable waiting. 2353 */ 2354 socantrcvmore(so); 2355 (void) sblock(sb, SBL_WAIT | SBL_NOINTR); 2356 2357 /* 2358 * Invalidate/clear most of the sockbuf structure, but leave selinfo 2359 * and mutex data unchanged. 2360 */ 2361 SOCKBUF_LOCK(sb); 2362 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 2363 bcopy(&sb->sb_startzero, &asb.sb_startzero, 2364 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2365 bzero(&sb->sb_startzero, 2366 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2367 SOCKBUF_UNLOCK(sb); 2368 sbunlock(sb); 2369 2370 /* 2371 * Dispose of special rights and flush the socket buffer. Don't call 2372 * any unsafe routines (that rely on locks being initialized) on asb. 2373 */ 2374 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 2375 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 2376 sbrelease_internal(&asb, so); 2377 } 2378 2379 /* 2380 * Perhaps this routine, and sooptcopyout(), below, ought to come in an 2381 * additional variant to handle the case where the option value needs to be 2382 * some kind of integer, but not a specific size. In addition to their use 2383 * here, these functions are also called by the protocol-level pr_ctloutput() 2384 * routines. 2385 */ 2386 int 2387 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2388 { 2389 size_t valsize; 2390 2391 /* 2392 * If the user gives us more than we wanted, we ignore it, but if we 2393 * don't get the minimum length the caller wants, we return EINVAL. 2394 * On success, sopt->sopt_valsize is set to however much we actually 2395 * retrieved. 2396 */ 2397 if ((valsize = sopt->sopt_valsize) < minlen) 2398 return EINVAL; 2399 if (valsize > len) 2400 sopt->sopt_valsize = valsize = len; 2401 2402 if (sopt->sopt_td != NULL) 2403 return (copyin(sopt->sopt_val, buf, valsize)); 2404 2405 bcopy(sopt->sopt_val, buf, valsize); 2406 return (0); 2407 } 2408 2409 /* 2410 * Kernel version of setsockopt(2). 2411 * 2412 * XXX: optlen is size_t, not socklen_t 2413 */ 2414 int 2415 so_setsockopt(struct socket *so, int level, int optname, void *optval, 2416 size_t optlen) 2417 { 2418 struct sockopt sopt; 2419 2420 sopt.sopt_level = level; 2421 sopt.sopt_name = optname; 2422 sopt.sopt_dir = SOPT_SET; 2423 sopt.sopt_val = optval; 2424 sopt.sopt_valsize = optlen; 2425 sopt.sopt_td = NULL; 2426 return (sosetopt(so, &sopt)); 2427 } 2428 2429 int 2430 sosetopt(struct socket *so, struct sockopt *sopt) 2431 { 2432 int error, optval; 2433 struct linger l; 2434 struct timeval tv; 2435 u_long val; 2436 uint32_t val32; 2437 #ifdef MAC 2438 struct mac extmac; 2439 #endif 2440 2441 CURVNET_SET(so->so_vnet); 2442 error = 0; 2443 if (sopt->sopt_level != SOL_SOCKET) { 2444 if (so->so_proto && so->so_proto->pr_ctloutput) { 2445 error = (*so->so_proto->pr_ctloutput)(so, sopt); 2446 CURVNET_RESTORE(); 2447 return (error); 2448 } 2449 error = ENOPROTOOPT; 2450 } else { 2451 switch (sopt->sopt_name) { 2452 #ifdef INET 2453 case SO_ACCEPTFILTER: 2454 error = do_setopt_accept_filter(so, sopt); 2455 if (error) 2456 goto bad; 2457 break; 2458 #endif 2459 case SO_LINGER: 2460 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2461 if (error) 2462 goto bad; 2463 2464 SOCK_LOCK(so); 2465 so->so_linger = l.l_linger; 2466 if (l.l_onoff) 2467 so->so_options |= SO_LINGER; 2468 else 2469 so->so_options &= ~SO_LINGER; 2470 SOCK_UNLOCK(so); 2471 break; 2472 2473 case SO_DEBUG: 2474 case SO_KEEPALIVE: 2475 case SO_DONTROUTE: 2476 case SO_USELOOPBACK: 2477 case SO_BROADCAST: 2478 case SO_REUSEADDR: 2479 case SO_REUSEPORT: 2480 case SO_OOBINLINE: 2481 case SO_TIMESTAMP: 2482 case SO_BINTIME: 2483 case SO_NOSIGPIPE: 2484 case SO_NO_DDP: 2485 case SO_NO_OFFLOAD: 2486 error = sooptcopyin(sopt, &optval, sizeof optval, 2487 sizeof optval); 2488 if (error) 2489 goto bad; 2490 SOCK_LOCK(so); 2491 if (optval) 2492 so->so_options |= sopt->sopt_name; 2493 else 2494 so->so_options &= ~sopt->sopt_name; 2495 SOCK_UNLOCK(so); 2496 break; 2497 2498 case SO_SETFIB: 2499 error = sooptcopyin(sopt, &optval, sizeof optval, 2500 sizeof optval); 2501 if (optval < 0 || optval > rt_numfibs) { 2502 error = EINVAL; 2503 goto bad; 2504 } 2505 if (so->so_proto != NULL && 2506 ((so->so_proto->pr_domain->dom_family == PF_INET) || 2507 (so->so_proto->pr_domain->dom_family == PF_ROUTE))) { 2508 so->so_fibnum = optval; 2509 /* Note: ignore error */ 2510 if (so->so_proto->pr_ctloutput) 2511 (*so->so_proto->pr_ctloutput)(so, sopt); 2512 } else { 2513 so->so_fibnum = 0; 2514 } 2515 break; 2516 2517 case SO_USER_COOKIE: 2518 error = sooptcopyin(sopt, &val32, sizeof val32, 2519 sizeof val32); 2520 if (error) 2521 goto bad; 2522 so->so_user_cookie = val32; 2523 break; 2524 2525 case SO_SNDBUF: 2526 case SO_RCVBUF: 2527 case SO_SNDLOWAT: 2528 case SO_RCVLOWAT: 2529 error = sooptcopyin(sopt, &optval, sizeof optval, 2530 sizeof optval); 2531 if (error) 2532 goto bad; 2533 2534 /* 2535 * Values < 1 make no sense for any of these options, 2536 * so disallow them. 2537 */ 2538 if (optval < 1) { 2539 error = EINVAL; 2540 goto bad; 2541 } 2542 2543 switch (sopt->sopt_name) { 2544 case SO_SNDBUF: 2545 case SO_RCVBUF: 2546 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 2547 &so->so_snd : &so->so_rcv, (u_long)optval, 2548 so, curthread) == 0) { 2549 error = ENOBUFS; 2550 goto bad; 2551 } 2552 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd : 2553 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE; 2554 break; 2555 2556 /* 2557 * Make sure the low-water is never greater than the 2558 * high-water. 2559 */ 2560 case SO_SNDLOWAT: 2561 SOCKBUF_LOCK(&so->so_snd); 2562 so->so_snd.sb_lowat = 2563 (optval > so->so_snd.sb_hiwat) ? 2564 so->so_snd.sb_hiwat : optval; 2565 SOCKBUF_UNLOCK(&so->so_snd); 2566 break; 2567 case SO_RCVLOWAT: 2568 SOCKBUF_LOCK(&so->so_rcv); 2569 so->so_rcv.sb_lowat = 2570 (optval > so->so_rcv.sb_hiwat) ? 2571 so->so_rcv.sb_hiwat : optval; 2572 SOCKBUF_UNLOCK(&so->so_rcv); 2573 break; 2574 } 2575 break; 2576 2577 case SO_SNDTIMEO: 2578 case SO_RCVTIMEO: 2579 #ifdef COMPAT_FREEBSD32 2580 if (SV_CURPROC_FLAG(SV_ILP32)) { 2581 struct timeval32 tv32; 2582 2583 error = sooptcopyin(sopt, &tv32, sizeof tv32, 2584 sizeof tv32); 2585 CP(tv32, tv, tv_sec); 2586 CP(tv32, tv, tv_usec); 2587 } else 2588 #endif 2589 error = sooptcopyin(sopt, &tv, sizeof tv, 2590 sizeof tv); 2591 if (error) 2592 goto bad; 2593 2594 /* assert(hz > 0); */ 2595 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2596 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2597 error = EDOM; 2598 goto bad; 2599 } 2600 /* assert(tick > 0); */ 2601 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2602 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 2603 if (val > INT_MAX) { 2604 error = EDOM; 2605 goto bad; 2606 } 2607 if (val == 0 && tv.tv_usec != 0) 2608 val = 1; 2609 2610 switch (sopt->sopt_name) { 2611 case SO_SNDTIMEO: 2612 so->so_snd.sb_timeo = val; 2613 break; 2614 case SO_RCVTIMEO: 2615 so->so_rcv.sb_timeo = val; 2616 break; 2617 } 2618 break; 2619 2620 case SO_LABEL: 2621 #ifdef MAC 2622 error = sooptcopyin(sopt, &extmac, sizeof extmac, 2623 sizeof extmac); 2624 if (error) 2625 goto bad; 2626 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 2627 so, &extmac); 2628 #else 2629 error = EOPNOTSUPP; 2630 #endif 2631 break; 2632 2633 default: 2634 error = ENOPROTOOPT; 2635 break; 2636 } 2637 if (error == 0 && so->so_proto != NULL && 2638 so->so_proto->pr_ctloutput != NULL) { 2639 (void) ((*so->so_proto->pr_ctloutput) 2640 (so, sopt)); 2641 } 2642 } 2643 bad: 2644 CURVNET_RESTORE(); 2645 return (error); 2646 } 2647 2648 /* 2649 * Helper routine for getsockopt. 2650 */ 2651 int 2652 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2653 { 2654 int error; 2655 size_t valsize; 2656 2657 error = 0; 2658 2659 /* 2660 * Documented get behavior is that we always return a value, possibly 2661 * truncated to fit in the user's buffer. Traditional behavior is 2662 * that we always tell the user precisely how much we copied, rather 2663 * than something useful like the total amount we had available for 2664 * her. Note that this interface is not idempotent; the entire 2665 * answer must generated ahead of time. 2666 */ 2667 valsize = min(len, sopt->sopt_valsize); 2668 sopt->sopt_valsize = valsize; 2669 if (sopt->sopt_val != NULL) { 2670 if (sopt->sopt_td != NULL) 2671 error = copyout(buf, sopt->sopt_val, valsize); 2672 else 2673 bcopy(buf, sopt->sopt_val, valsize); 2674 } 2675 return (error); 2676 } 2677 2678 int 2679 sogetopt(struct socket *so, struct sockopt *sopt) 2680 { 2681 int error, optval; 2682 struct linger l; 2683 struct timeval tv; 2684 #ifdef MAC 2685 struct mac extmac; 2686 #endif 2687 2688 CURVNET_SET(so->so_vnet); 2689 error = 0; 2690 if (sopt->sopt_level != SOL_SOCKET) { 2691 if (so->so_proto && so->so_proto->pr_ctloutput) 2692 error = (*so->so_proto->pr_ctloutput)(so, sopt); 2693 else 2694 error = ENOPROTOOPT; 2695 CURVNET_RESTORE(); 2696 return (error); 2697 } else { 2698 switch (sopt->sopt_name) { 2699 #ifdef INET 2700 case SO_ACCEPTFILTER: 2701 error = do_getopt_accept_filter(so, sopt); 2702 break; 2703 #endif 2704 case SO_LINGER: 2705 SOCK_LOCK(so); 2706 l.l_onoff = so->so_options & SO_LINGER; 2707 l.l_linger = so->so_linger; 2708 SOCK_UNLOCK(so); 2709 error = sooptcopyout(sopt, &l, sizeof l); 2710 break; 2711 2712 case SO_USELOOPBACK: 2713 case SO_DONTROUTE: 2714 case SO_DEBUG: 2715 case SO_KEEPALIVE: 2716 case SO_REUSEADDR: 2717 case SO_REUSEPORT: 2718 case SO_BROADCAST: 2719 case SO_OOBINLINE: 2720 case SO_ACCEPTCONN: 2721 case SO_TIMESTAMP: 2722 case SO_BINTIME: 2723 case SO_NOSIGPIPE: 2724 optval = so->so_options & sopt->sopt_name; 2725 integer: 2726 error = sooptcopyout(sopt, &optval, sizeof optval); 2727 break; 2728 2729 case SO_TYPE: 2730 optval = so->so_type; 2731 goto integer; 2732 2733 case SO_ERROR: 2734 SOCK_LOCK(so); 2735 optval = so->so_error; 2736 so->so_error = 0; 2737 SOCK_UNLOCK(so); 2738 goto integer; 2739 2740 case SO_SNDBUF: 2741 optval = so->so_snd.sb_hiwat; 2742 goto integer; 2743 2744 case SO_RCVBUF: 2745 optval = so->so_rcv.sb_hiwat; 2746 goto integer; 2747 2748 case SO_SNDLOWAT: 2749 optval = so->so_snd.sb_lowat; 2750 goto integer; 2751 2752 case SO_RCVLOWAT: 2753 optval = so->so_rcv.sb_lowat; 2754 goto integer; 2755 2756 case SO_SNDTIMEO: 2757 case SO_RCVTIMEO: 2758 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2759 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 2760 2761 tv.tv_sec = optval / hz; 2762 tv.tv_usec = (optval % hz) * tick; 2763 #ifdef COMPAT_FREEBSD32 2764 if (SV_CURPROC_FLAG(SV_ILP32)) { 2765 struct timeval32 tv32; 2766 2767 CP(tv, tv32, tv_sec); 2768 CP(tv, tv32, tv_usec); 2769 error = sooptcopyout(sopt, &tv32, sizeof tv32); 2770 } else 2771 #endif 2772 error = sooptcopyout(sopt, &tv, sizeof tv); 2773 break; 2774 2775 case SO_LABEL: 2776 #ifdef MAC 2777 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2778 sizeof(extmac)); 2779 if (error) 2780 goto bad; 2781 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 2782 so, &extmac); 2783 if (error) 2784 goto bad; 2785 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2786 #else 2787 error = EOPNOTSUPP; 2788 #endif 2789 break; 2790 2791 case SO_PEERLABEL: 2792 #ifdef MAC 2793 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2794 sizeof(extmac)); 2795 if (error) 2796 goto bad; 2797 error = mac_getsockopt_peerlabel( 2798 sopt->sopt_td->td_ucred, so, &extmac); 2799 if (error) 2800 goto bad; 2801 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2802 #else 2803 error = EOPNOTSUPP; 2804 #endif 2805 break; 2806 2807 case SO_LISTENQLIMIT: 2808 optval = so->so_qlimit; 2809 goto integer; 2810 2811 case SO_LISTENQLEN: 2812 optval = so->so_qlen; 2813 goto integer; 2814 2815 case SO_LISTENINCQLEN: 2816 optval = so->so_incqlen; 2817 goto integer; 2818 2819 default: 2820 error = ENOPROTOOPT; 2821 break; 2822 } 2823 } 2824 #ifdef MAC 2825 bad: 2826 #endif 2827 CURVNET_RESTORE(); 2828 return (error); 2829 } 2830 2831 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2832 int 2833 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2834 { 2835 struct mbuf *m, *m_prev; 2836 int sopt_size = sopt->sopt_valsize; 2837 2838 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA); 2839 if (m == NULL) 2840 return ENOBUFS; 2841 if (sopt_size > MLEN) { 2842 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT); 2843 if ((m->m_flags & M_EXT) == 0) { 2844 m_free(m); 2845 return ENOBUFS; 2846 } 2847 m->m_len = min(MCLBYTES, sopt_size); 2848 } else { 2849 m->m_len = min(MLEN, sopt_size); 2850 } 2851 sopt_size -= m->m_len; 2852 *mp = m; 2853 m_prev = m; 2854 2855 while (sopt_size) { 2856 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA); 2857 if (m == NULL) { 2858 m_freem(*mp); 2859 return ENOBUFS; 2860 } 2861 if (sopt_size > MLEN) { 2862 MCLGET(m, sopt->sopt_td != NULL ? M_WAIT : 2863 M_DONTWAIT); 2864 if ((m->m_flags & M_EXT) == 0) { 2865 m_freem(m); 2866 m_freem(*mp); 2867 return ENOBUFS; 2868 } 2869 m->m_len = min(MCLBYTES, sopt_size); 2870 } else { 2871 m->m_len = min(MLEN, sopt_size); 2872 } 2873 sopt_size -= m->m_len; 2874 m_prev->m_next = m; 2875 m_prev = m; 2876 } 2877 return (0); 2878 } 2879 2880 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2881 int 2882 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2883 { 2884 struct mbuf *m0 = m; 2885 2886 if (sopt->sopt_val == NULL) 2887 return (0); 2888 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2889 if (sopt->sopt_td != NULL) { 2890 int error; 2891 2892 error = copyin(sopt->sopt_val, mtod(m, char *), 2893 m->m_len); 2894 if (error != 0) { 2895 m_freem(m0); 2896 return(error); 2897 } 2898 } else 2899 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 2900 sopt->sopt_valsize -= m->m_len; 2901 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2902 m = m->m_next; 2903 } 2904 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2905 panic("ip6_sooptmcopyin"); 2906 return (0); 2907 } 2908 2909 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2910 int 2911 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2912 { 2913 struct mbuf *m0 = m; 2914 size_t valsize = 0; 2915 2916 if (sopt->sopt_val == NULL) 2917 return (0); 2918 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2919 if (sopt->sopt_td != NULL) { 2920 int error; 2921 2922 error = copyout(mtod(m, char *), sopt->sopt_val, 2923 m->m_len); 2924 if (error != 0) { 2925 m_freem(m0); 2926 return(error); 2927 } 2928 } else 2929 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 2930 sopt->sopt_valsize -= m->m_len; 2931 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2932 valsize += m->m_len; 2933 m = m->m_next; 2934 } 2935 if (m != NULL) { 2936 /* enough soopt buffer should be given from user-land */ 2937 m_freem(m0); 2938 return(EINVAL); 2939 } 2940 sopt->sopt_valsize = valsize; 2941 return (0); 2942 } 2943 2944 /* 2945 * sohasoutofband(): protocol notifies socket layer of the arrival of new 2946 * out-of-band data, which will then notify socket consumers. 2947 */ 2948 void 2949 sohasoutofband(struct socket *so) 2950 { 2951 2952 if (so->so_sigio != NULL) 2953 pgsigio(&so->so_sigio, SIGURG, 0); 2954 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 2955 } 2956 2957 int 2958 sopoll(struct socket *so, int events, struct ucred *active_cred, 2959 struct thread *td) 2960 { 2961 2962 /* 2963 * We do not need to set or assert curvnet as long as everyone uses 2964 * sopoll_generic(). 2965 */ 2966 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred, 2967 td)); 2968 } 2969 2970 int 2971 sopoll_generic(struct socket *so, int events, struct ucred *active_cred, 2972 struct thread *td) 2973 { 2974 int revents = 0; 2975 2976 SOCKBUF_LOCK(&so->so_snd); 2977 SOCKBUF_LOCK(&so->so_rcv); 2978 if (events & (POLLIN | POLLRDNORM)) 2979 if (soreadabledata(so)) 2980 revents |= events & (POLLIN | POLLRDNORM); 2981 2982 if (events & (POLLOUT | POLLWRNORM)) 2983 if (sowriteable(so)) 2984 revents |= events & (POLLOUT | POLLWRNORM); 2985 2986 if (events & (POLLPRI | POLLRDBAND)) 2987 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 2988 revents |= events & (POLLPRI | POLLRDBAND); 2989 2990 if ((events & POLLINIGNEOF) == 0) { 2991 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2992 revents |= events & (POLLIN | POLLRDNORM); 2993 if (so->so_snd.sb_state & SBS_CANTSENDMORE) 2994 revents |= POLLHUP; 2995 } 2996 } 2997 2998 if (revents == 0) { 2999 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { 3000 selrecord(td, &so->so_rcv.sb_sel); 3001 so->so_rcv.sb_flags |= SB_SEL; 3002 } 3003 3004 if (events & (POLLOUT | POLLWRNORM)) { 3005 selrecord(td, &so->so_snd.sb_sel); 3006 so->so_snd.sb_flags |= SB_SEL; 3007 } 3008 } 3009 3010 SOCKBUF_UNLOCK(&so->so_rcv); 3011 SOCKBUF_UNLOCK(&so->so_snd); 3012 return (revents); 3013 } 3014 3015 int 3016 soo_kqfilter(struct file *fp, struct knote *kn) 3017 { 3018 struct socket *so = kn->kn_fp->f_data; 3019 struct sockbuf *sb; 3020 3021 switch (kn->kn_filter) { 3022 case EVFILT_READ: 3023 if (so->so_options & SO_ACCEPTCONN) 3024 kn->kn_fop = &solisten_filtops; 3025 else 3026 kn->kn_fop = &soread_filtops; 3027 sb = &so->so_rcv; 3028 break; 3029 case EVFILT_WRITE: 3030 kn->kn_fop = &sowrite_filtops; 3031 sb = &so->so_snd; 3032 break; 3033 default: 3034 return (EINVAL); 3035 } 3036 3037 SOCKBUF_LOCK(sb); 3038 knlist_add(&sb->sb_sel.si_note, kn, 1); 3039 sb->sb_flags |= SB_KNOTE; 3040 SOCKBUF_UNLOCK(sb); 3041 return (0); 3042 } 3043 3044 /* 3045 * Some routines that return EOPNOTSUPP for entry points that are not 3046 * supported by a protocol. Fill in as needed. 3047 */ 3048 int 3049 pru_accept_notsupp(struct socket *so, struct sockaddr **nam) 3050 { 3051 3052 return EOPNOTSUPP; 3053 } 3054 3055 int 3056 pru_attach_notsupp(struct socket *so, int proto, struct thread *td) 3057 { 3058 3059 return EOPNOTSUPP; 3060 } 3061 3062 int 3063 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 3064 { 3065 3066 return EOPNOTSUPP; 3067 } 3068 3069 int 3070 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 3071 { 3072 3073 return EOPNOTSUPP; 3074 } 3075 3076 int 3077 pru_connect2_notsupp(struct socket *so1, struct socket *so2) 3078 { 3079 3080 return EOPNOTSUPP; 3081 } 3082 3083 int 3084 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, 3085 struct ifnet *ifp, struct thread *td) 3086 { 3087 3088 return EOPNOTSUPP; 3089 } 3090 3091 int 3092 pru_disconnect_notsupp(struct socket *so) 3093 { 3094 3095 return EOPNOTSUPP; 3096 } 3097 3098 int 3099 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td) 3100 { 3101 3102 return EOPNOTSUPP; 3103 } 3104 3105 int 3106 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) 3107 { 3108 3109 return EOPNOTSUPP; 3110 } 3111 3112 int 3113 pru_rcvd_notsupp(struct socket *so, int flags) 3114 { 3115 3116 return EOPNOTSUPP; 3117 } 3118 3119 int 3120 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) 3121 { 3122 3123 return EOPNOTSUPP; 3124 } 3125 3126 int 3127 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, 3128 struct sockaddr *addr, struct mbuf *control, struct thread *td) 3129 { 3130 3131 return EOPNOTSUPP; 3132 } 3133 3134 /* 3135 * This isn't really a ``null'' operation, but it's the default one and 3136 * doesn't do anything destructive. 3137 */ 3138 int 3139 pru_sense_null(struct socket *so, struct stat *sb) 3140 { 3141 3142 sb->st_blksize = so->so_snd.sb_hiwat; 3143 return 0; 3144 } 3145 3146 int 3147 pru_shutdown_notsupp(struct socket *so) 3148 { 3149 3150 return EOPNOTSUPP; 3151 } 3152 3153 int 3154 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) 3155 { 3156 3157 return EOPNOTSUPP; 3158 } 3159 3160 int 3161 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 3162 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 3163 { 3164 3165 return EOPNOTSUPP; 3166 } 3167 3168 int 3169 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 3170 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 3171 { 3172 3173 return EOPNOTSUPP; 3174 } 3175 3176 int 3177 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred, 3178 struct thread *td) 3179 { 3180 3181 return EOPNOTSUPP; 3182 } 3183 3184 static void 3185 filt_sordetach(struct knote *kn) 3186 { 3187 struct socket *so = kn->kn_fp->f_data; 3188 3189 SOCKBUF_LOCK(&so->so_rcv); 3190 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1); 3191 if (knlist_empty(&so->so_rcv.sb_sel.si_note)) 3192 so->so_rcv.sb_flags &= ~SB_KNOTE; 3193 SOCKBUF_UNLOCK(&so->so_rcv); 3194 } 3195 3196 /*ARGSUSED*/ 3197 static int 3198 filt_soread(struct knote *kn, long hint) 3199 { 3200 struct socket *so; 3201 3202 so = kn->kn_fp->f_data; 3203 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 3204 3205 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 3206 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 3207 kn->kn_flags |= EV_EOF; 3208 kn->kn_fflags = so->so_error; 3209 return (1); 3210 } else if (so->so_error) /* temporary udp error */ 3211 return (1); 3212 else if (kn->kn_sfflags & NOTE_LOWAT) 3213 return (kn->kn_data >= kn->kn_sdata); 3214 else 3215 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 3216 } 3217 3218 static void 3219 filt_sowdetach(struct knote *kn) 3220 { 3221 struct socket *so = kn->kn_fp->f_data; 3222 3223 SOCKBUF_LOCK(&so->so_snd); 3224 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1); 3225 if (knlist_empty(&so->so_snd.sb_sel.si_note)) 3226 so->so_snd.sb_flags &= ~SB_KNOTE; 3227 SOCKBUF_UNLOCK(&so->so_snd); 3228 } 3229 3230 /*ARGSUSED*/ 3231 static int 3232 filt_sowrite(struct knote *kn, long hint) 3233 { 3234 struct socket *so; 3235 3236 so = kn->kn_fp->f_data; 3237 SOCKBUF_LOCK_ASSERT(&so->so_snd); 3238 kn->kn_data = sbspace(&so->so_snd); 3239 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 3240 kn->kn_flags |= EV_EOF; 3241 kn->kn_fflags = so->so_error; 3242 return (1); 3243 } else if (so->so_error) /* temporary udp error */ 3244 return (1); 3245 else if (((so->so_state & SS_ISCONNECTED) == 0) && 3246 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 3247 return (0); 3248 else if (kn->kn_sfflags & NOTE_LOWAT) 3249 return (kn->kn_data >= kn->kn_sdata); 3250 else 3251 return (kn->kn_data >= so->so_snd.sb_lowat); 3252 } 3253 3254 /*ARGSUSED*/ 3255 static int 3256 filt_solisten(struct knote *kn, long hint) 3257 { 3258 struct socket *so = kn->kn_fp->f_data; 3259 3260 kn->kn_data = so->so_qlen; 3261 return (! TAILQ_EMPTY(&so->so_comp)); 3262 } 3263 3264 int 3265 socheckuid(struct socket *so, uid_t uid) 3266 { 3267 3268 if (so == NULL) 3269 return (EPERM); 3270 if (so->so_cred->cr_uid != uid) 3271 return (EPERM); 3272 return (0); 3273 } 3274 3275 static int 3276 sysctl_somaxconn(SYSCTL_HANDLER_ARGS) 3277 { 3278 int error; 3279 int val; 3280 3281 val = somaxconn; 3282 error = sysctl_handle_int(oidp, &val, 0, req); 3283 if (error || !req->newptr ) 3284 return (error); 3285 3286 if (val < 1 || val > USHRT_MAX) 3287 return (EINVAL); 3288 3289 somaxconn = val; 3290 return (0); 3291 } 3292 3293 /* 3294 * These functions are used by protocols to notify the socket layer (and its 3295 * consumers) of state changes in the sockets driven by protocol-side events. 3296 */ 3297 3298 /* 3299 * Procedures to manipulate state flags of socket and do appropriate wakeups. 3300 * 3301 * Normal sequence from the active (originating) side is that 3302 * soisconnecting() is called during processing of connect() call, resulting 3303 * in an eventual call to soisconnected() if/when the connection is 3304 * established. When the connection is torn down soisdisconnecting() is 3305 * called during processing of disconnect() call, and soisdisconnected() is 3306 * called when the connection to the peer is totally severed. The semantics 3307 * of these routines are such that connectionless protocols can call 3308 * soisconnected() and soisdisconnected() only, bypassing the in-progress 3309 * calls when setting up a ``connection'' takes no time. 3310 * 3311 * From the passive side, a socket is created with two queues of sockets: 3312 * so_incomp for connections in progress and so_comp for connections already 3313 * made and awaiting user acceptance. As a protocol is preparing incoming 3314 * connections, it creates a socket structure queued on so_incomp by calling 3315 * sonewconn(). When the connection is established, soisconnected() is 3316 * called, and transfers the socket structure to so_comp, making it available 3317 * to accept(). 3318 * 3319 * If a socket is closed with sockets on either so_incomp or so_comp, these 3320 * sockets are dropped. 3321 * 3322 * If higher-level protocols are implemented in the kernel, the wakeups done 3323 * here will sometimes cause software-interrupt process scheduling. 3324 */ 3325 void 3326 soisconnecting(struct socket *so) 3327 { 3328 3329 SOCK_LOCK(so); 3330 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 3331 so->so_state |= SS_ISCONNECTING; 3332 SOCK_UNLOCK(so); 3333 } 3334 3335 void 3336 soisconnected(struct socket *so) 3337 { 3338 struct socket *head; 3339 int ret; 3340 3341 restart: 3342 ACCEPT_LOCK(); 3343 SOCK_LOCK(so); 3344 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 3345 so->so_state |= SS_ISCONNECTED; 3346 head = so->so_head; 3347 if (head != NULL && (so->so_qstate & SQ_INCOMP)) { 3348 if ((so->so_options & SO_ACCEPTFILTER) == 0) { 3349 SOCK_UNLOCK(so); 3350 TAILQ_REMOVE(&head->so_incomp, so, so_list); 3351 head->so_incqlen--; 3352 so->so_qstate &= ~SQ_INCOMP; 3353 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 3354 head->so_qlen++; 3355 so->so_qstate |= SQ_COMP; 3356 ACCEPT_UNLOCK(); 3357 sorwakeup(head); 3358 wakeup_one(&head->so_timeo); 3359 } else { 3360 ACCEPT_UNLOCK(); 3361 soupcall_set(so, SO_RCV, 3362 head->so_accf->so_accept_filter->accf_callback, 3363 head->so_accf->so_accept_filter_arg); 3364 so->so_options &= ~SO_ACCEPTFILTER; 3365 ret = head->so_accf->so_accept_filter->accf_callback(so, 3366 head->so_accf->so_accept_filter_arg, M_DONTWAIT); 3367 if (ret == SU_ISCONNECTED) 3368 soupcall_clear(so, SO_RCV); 3369 SOCK_UNLOCK(so); 3370 if (ret == SU_ISCONNECTED) 3371 goto restart; 3372 } 3373 return; 3374 } 3375 SOCK_UNLOCK(so); 3376 ACCEPT_UNLOCK(); 3377 wakeup(&so->so_timeo); 3378 sorwakeup(so); 3379 sowwakeup(so); 3380 } 3381 3382 void 3383 soisdisconnecting(struct socket *so) 3384 { 3385 3386 /* 3387 * Note: This code assumes that SOCK_LOCK(so) and 3388 * SOCKBUF_LOCK(&so->so_rcv) are the same. 3389 */ 3390 SOCKBUF_LOCK(&so->so_rcv); 3391 so->so_state &= ~SS_ISCONNECTING; 3392 so->so_state |= SS_ISDISCONNECTING; 3393 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 3394 sorwakeup_locked(so); 3395 SOCKBUF_LOCK(&so->so_snd); 3396 so->so_snd.sb_state |= SBS_CANTSENDMORE; 3397 sowwakeup_locked(so); 3398 wakeup(&so->so_timeo); 3399 } 3400 3401 void 3402 soisdisconnected(struct socket *so) 3403 { 3404 3405 /* 3406 * Note: This code assumes that SOCK_LOCK(so) and 3407 * SOCKBUF_LOCK(&so->so_rcv) are the same. 3408 */ 3409 SOCKBUF_LOCK(&so->so_rcv); 3410 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 3411 so->so_state |= SS_ISDISCONNECTED; 3412 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 3413 sorwakeup_locked(so); 3414 SOCKBUF_LOCK(&so->so_snd); 3415 so->so_snd.sb_state |= SBS_CANTSENDMORE; 3416 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc); 3417 sowwakeup_locked(so); 3418 wakeup(&so->so_timeo); 3419 } 3420 3421 /* 3422 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. 3423 */ 3424 struct sockaddr * 3425 sodupsockaddr(const struct sockaddr *sa, int mflags) 3426 { 3427 struct sockaddr *sa2; 3428 3429 sa2 = malloc(sa->sa_len, M_SONAME, mflags); 3430 if (sa2) 3431 bcopy(sa, sa2, sa->sa_len); 3432 return sa2; 3433 } 3434 3435 /* 3436 * Register per-socket buffer upcalls. 3437 */ 3438 void 3439 soupcall_set(struct socket *so, int which, 3440 int (*func)(struct socket *, void *, int), void *arg) 3441 { 3442 struct sockbuf *sb; 3443 3444 switch (which) { 3445 case SO_RCV: 3446 sb = &so->so_rcv; 3447 break; 3448 case SO_SND: 3449 sb = &so->so_snd; 3450 break; 3451 default: 3452 panic("soupcall_set: bad which"); 3453 } 3454 SOCKBUF_LOCK_ASSERT(sb); 3455 #if 0 3456 /* XXX: accf_http actually wants to do this on purpose. */ 3457 KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall")); 3458 #endif 3459 sb->sb_upcall = func; 3460 sb->sb_upcallarg = arg; 3461 sb->sb_flags |= SB_UPCALL; 3462 } 3463 3464 void 3465 soupcall_clear(struct socket *so, int which) 3466 { 3467 struct sockbuf *sb; 3468 3469 switch (which) { 3470 case SO_RCV: 3471 sb = &so->so_rcv; 3472 break; 3473 case SO_SND: 3474 sb = &so->so_snd; 3475 break; 3476 default: 3477 panic("soupcall_clear: bad which"); 3478 } 3479 SOCKBUF_LOCK_ASSERT(sb); 3480 KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear")); 3481 sb->sb_upcall = NULL; 3482 sb->sb_upcallarg = NULL; 3483 sb->sb_flags &= ~SB_UPCALL; 3484 } 3485 3486 /* 3487 * Create an external-format (``xsocket'') structure using the information in 3488 * the kernel-format socket structure pointed to by so. This is done to 3489 * reduce the spew of irrelevant information over this interface, to isolate 3490 * user code from changes in the kernel structure, and potentially to provide 3491 * information-hiding if we decide that some of this information should be 3492 * hidden from users. 3493 */ 3494 void 3495 sotoxsocket(struct socket *so, struct xsocket *xso) 3496 { 3497 3498 xso->xso_len = sizeof *xso; 3499 xso->xso_so = so; 3500 xso->so_type = so->so_type; 3501 xso->so_options = so->so_options; 3502 xso->so_linger = so->so_linger; 3503 xso->so_state = so->so_state; 3504 xso->so_pcb = so->so_pcb; 3505 xso->xso_protocol = so->so_proto->pr_protocol; 3506 xso->xso_family = so->so_proto->pr_domain->dom_family; 3507 xso->so_qlen = so->so_qlen; 3508 xso->so_incqlen = so->so_incqlen; 3509 xso->so_qlimit = so->so_qlimit; 3510 xso->so_timeo = so->so_timeo; 3511 xso->so_error = so->so_error; 3512 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 3513 xso->so_oobmark = so->so_oobmark; 3514 sbtoxsockbuf(&so->so_snd, &xso->so_snd); 3515 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 3516 xso->so_uid = so->so_cred->cr_uid; 3517 } 3518 3519 3520 /* 3521 * Socket accessor functions to provide external consumers with 3522 * a safe interface to socket state 3523 * 3524 */ 3525 3526 void 3527 so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *), void *arg) 3528 { 3529 3530 TAILQ_FOREACH(so, &so->so_comp, so_list) 3531 func(so, arg); 3532 } 3533 3534 struct sockbuf * 3535 so_sockbuf_rcv(struct socket *so) 3536 { 3537 3538 return (&so->so_rcv); 3539 } 3540 3541 struct sockbuf * 3542 so_sockbuf_snd(struct socket *so) 3543 { 3544 3545 return (&so->so_snd); 3546 } 3547 3548 int 3549 so_state_get(const struct socket *so) 3550 { 3551 3552 return (so->so_state); 3553 } 3554 3555 void 3556 so_state_set(struct socket *so, int val) 3557 { 3558 3559 so->so_state = val; 3560 } 3561 3562 int 3563 so_options_get(const struct socket *so) 3564 { 3565 3566 return (so->so_options); 3567 } 3568 3569 void 3570 so_options_set(struct socket *so, int val) 3571 { 3572 3573 so->so_options = val; 3574 } 3575 3576 int 3577 so_error_get(const struct socket *so) 3578 { 3579 3580 return (so->so_error); 3581 } 3582 3583 void 3584 so_error_set(struct socket *so, int val) 3585 { 3586 3587 so->so_error = val; 3588 } 3589 3590 int 3591 so_linger_get(const struct socket *so) 3592 { 3593 3594 return (so->so_linger); 3595 } 3596 3597 void 3598 so_linger_set(struct socket *so, int val) 3599 { 3600 3601 so->so_linger = val; 3602 } 3603 3604 struct protosw * 3605 so_protosw_get(const struct socket *so) 3606 { 3607 3608 return (so->so_proto); 3609 } 3610 3611 void 3612 so_protosw_set(struct socket *so, struct protosw *val) 3613 { 3614 3615 so->so_proto = val; 3616 } 3617 3618 void 3619 so_sorwakeup(struct socket *so) 3620 { 3621 3622 sorwakeup(so); 3623 } 3624 3625 void 3626 so_sowwakeup(struct socket *so) 3627 { 3628 3629 sowwakeup(so); 3630 } 3631 3632 void 3633 so_sorwakeup_locked(struct socket *so) 3634 { 3635 3636 sorwakeup_locked(so); 3637 } 3638 3639 void 3640 so_sowwakeup_locked(struct socket *so) 3641 { 3642 3643 sowwakeup_locked(so); 3644 } 3645 3646 void 3647 so_lock(struct socket *so) 3648 { 3649 SOCK_LOCK(so); 3650 } 3651 3652 void 3653 so_unlock(struct socket *so) 3654 { 3655 SOCK_UNLOCK(so); 3656 } 3657