1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2004 The FreeBSD Foundation 5 * Copyright (c) 2004-2006 Robert N. M. Watson 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 32 */ 33 34 /* 35 * Comments on the socket life cycle: 36 * 37 * soalloc() sets of socket layer state for a socket, called only by 38 * socreate() and sonewconn(). Socket layer private. 39 * 40 * sodealloc() tears down socket layer state for a socket, called only by 41 * sofree() and sonewconn(). Socket layer private. 42 * 43 * pru_attach() associates protocol layer state with an allocated socket; 44 * called only once, may fail, aborting socket allocation. This is called 45 * from socreate() and sonewconn(). Socket layer private. 46 * 47 * pru_detach() disassociates protocol layer state from an attached socket, 48 * and will be called exactly once for sockets in which pru_attach() has 49 * been successfully called. If pru_attach() returned an error, 50 * pru_detach() will not be called. Socket layer private. 51 * 52 * pru_abort() and pru_close() notify the protocol layer that the last 53 * consumer of a socket is starting to tear down the socket, and that the 54 * protocol should terminate the connection. Historically, pru_abort() also 55 * detached protocol state from the socket state, but this is no longer the 56 * case. 57 * 58 * socreate() creates a socket and attaches protocol state. This is a public 59 * interface that may be used by socket layer consumers to create new 60 * sockets. 61 * 62 * sonewconn() creates a socket and attaches protocol state. This is a 63 * public interface that may be used by protocols to create new sockets when 64 * a new connection is received and will be available for accept() on a 65 * listen socket. 66 * 67 * soclose() destroys a socket after possibly waiting for it to disconnect. 68 * This is a public interface that socket consumers should use to close and 69 * release a socket when done with it. 70 * 71 * soabort() destroys a socket without waiting for it to disconnect (used 72 * only for incoming connections that are already partially or fully 73 * connected). This is used internally by the socket layer when clearing 74 * listen socket queues (due to overflow or close on the listen socket), but 75 * is also a public interface protocols may use to abort connections in 76 * their incomplete listen queues should they no longer be required. Sockets 77 * placed in completed connection listen queues should not be aborted for 78 * reasons described in the comment above the soclose() implementation. This 79 * is not a general purpose close routine, and except in the specific 80 * circumstances described here, should not be used. 81 * 82 * sofree() will free a socket and its protocol state if all references on 83 * the socket have been released, and is the public interface to attempt to 84 * free a socket when a reference is removed. This is a socket layer private 85 * interface. 86 * 87 * NOTE: In addition to socreate() and soclose(), which provide a single 88 * socket reference to the consumer to be managed as required, there are two 89 * calls to explicitly manage socket references, soref(), and sorele(). 90 * Currently, these are generally required only when transitioning a socket 91 * from a listen queue to a file descriptor, in order to prevent garbage 92 * collection of the socket at an untimely moment. For a number of reasons, 93 * these interfaces are not preferred, and should be avoided. 94 */ 95 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include "opt_inet.h" 100 #include "opt_mac.h" 101 #include "opt_zero.h" 102 #include "opt_compat.h" 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/fcntl.h> 107 #include <sys/limits.h> 108 #include <sys/lock.h> 109 #include <sys/mac.h> 110 #include <sys/malloc.h> 111 #include <sys/mbuf.h> 112 #include <sys/mutex.h> 113 #include <sys/domain.h> 114 #include <sys/file.h> /* for struct knote */ 115 #include <sys/kernel.h> 116 #include <sys/event.h> 117 #include <sys/eventhandler.h> 118 #include <sys/poll.h> 119 #include <sys/proc.h> 120 #include <sys/protosw.h> 121 #include <sys/socket.h> 122 #include <sys/socketvar.h> 123 #include <sys/resourcevar.h> 124 #include <sys/signalvar.h> 125 #include <sys/sysctl.h> 126 #include <sys/uio.h> 127 #include <sys/jail.h> 128 129 #include <vm/uma.h> 130 131 #ifdef COMPAT_IA32 132 #include <sys/mount.h> 133 #include <compat/freebsd32/freebsd32.h> 134 135 extern struct sysentvec ia32_freebsd_sysvec; 136 #endif 137 138 static int soreceive_rcvoob(struct socket *so, struct uio *uio, 139 int flags); 140 141 static void filt_sordetach(struct knote *kn); 142 static int filt_soread(struct knote *kn, long hint); 143 static void filt_sowdetach(struct knote *kn); 144 static int filt_sowrite(struct knote *kn, long hint); 145 static int filt_solisten(struct knote *kn, long hint); 146 147 static struct filterops solisten_filtops = 148 { 1, NULL, filt_sordetach, filt_solisten }; 149 static struct filterops soread_filtops = 150 { 1, NULL, filt_sordetach, filt_soread }; 151 static struct filterops sowrite_filtops = 152 { 1, NULL, filt_sowdetach, filt_sowrite }; 153 154 uma_zone_t socket_zone; 155 so_gen_t so_gencnt; /* generation count for sockets */ 156 157 int maxsockets; 158 159 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 160 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 161 162 static int somaxconn = SOMAXCONN; 163 static int somaxconn_sysctl(SYSCTL_HANDLER_ARGS); 164 /* XXX: we dont have SYSCTL_USHORT */ 165 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW, 166 0, sizeof(int), somaxconn_sysctl, "I", "Maximum pending socket connection " 167 "queue size"); 168 static int numopensockets; 169 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 170 &numopensockets, 0, "Number of open sockets"); 171 #ifdef ZERO_COPY_SOCKETS 172 /* These aren't static because they're used in other files. */ 173 int so_zero_copy_send = 1; 174 int so_zero_copy_receive = 1; 175 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 176 "Zero copy controls"); 177 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 178 &so_zero_copy_receive, 0, "Enable zero copy receive"); 179 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 180 &so_zero_copy_send, 0, "Enable zero copy send"); 181 #endif /* ZERO_COPY_SOCKETS */ 182 183 /* 184 * accept_mtx locks down per-socket fields relating to accept queues. See 185 * socketvar.h for an annotation of the protected fields of struct socket. 186 */ 187 struct mtx accept_mtx; 188 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 189 190 /* 191 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 192 * so_gencnt field. 193 */ 194 static struct mtx so_global_mtx; 195 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 196 197 /* 198 * General IPC sysctl name space, used by sockets and a variety of other IPC 199 * types. 200 */ 201 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 202 203 /* 204 * Sysctl to get and set the maximum global sockets limit. Notify protocols 205 * of the change so that they can update their dependent limits as required. 206 */ 207 static int 208 sysctl_maxsockets(SYSCTL_HANDLER_ARGS) 209 { 210 int error, newmaxsockets; 211 212 newmaxsockets = maxsockets; 213 error = sysctl_handle_int(oidp, &newmaxsockets, sizeof(int), req); 214 if (error == 0 && req->newptr) { 215 if (newmaxsockets > maxsockets) { 216 maxsockets = newmaxsockets; 217 if (maxsockets > ((maxfiles / 4) * 3)) { 218 maxfiles = (maxsockets * 5) / 4; 219 maxfilesperproc = (maxfiles * 9) / 10; 220 } 221 EVENTHANDLER_INVOKE(maxsockets_change); 222 } else 223 error = EINVAL; 224 } 225 return (error); 226 } 227 228 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, 229 &maxsockets, 0, sysctl_maxsockets, "IU", 230 "Maximum number of sockets avaliable"); 231 232 /* 233 * Initialise maxsockets. 234 */ 235 static void init_maxsockets(void *ignored) 236 { 237 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 238 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 239 } 240 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); 241 242 /* 243 * Socket operation routines. These routines are called by the routines in 244 * sys_socket.c or from a system process, and implement the semantics of 245 * socket operations by switching out to the protocol specific routines. 246 */ 247 248 /* 249 * Get a socket structure from our zone, and initialize it. Note that it 250 * would probably be better to allocate socket and PCB at the same time, but 251 * I'm not convinced that all the protocols can be easily modified to do 252 * this. 253 * 254 * soalloc() returns a socket with a ref count of 0. 255 */ 256 static struct socket * 257 soalloc(int mflags) 258 { 259 struct socket *so; 260 261 so = uma_zalloc(socket_zone, mflags | M_ZERO); 262 if (so == NULL) 263 return (NULL); 264 #ifdef MAC 265 if (mac_init_socket(so, mflags) != 0) { 266 uma_zfree(socket_zone, so); 267 return (NULL); 268 } 269 #endif 270 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 271 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 272 TAILQ_INIT(&so->so_aiojobq); 273 mtx_lock(&so_global_mtx); 274 so->so_gencnt = ++so_gencnt; 275 ++numopensockets; 276 mtx_unlock(&so_global_mtx); 277 return (so); 278 } 279 280 /* 281 * Free the storage associated with a socket at the socket layer, tear down 282 * locks, labels, etc. All protocol state is assumed already to have been 283 * torn down (and possibly never set up) by the caller. 284 */ 285 static void 286 sodealloc(struct socket *so) 287 { 288 289 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 290 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); 291 292 mtx_lock(&so_global_mtx); 293 so->so_gencnt = ++so_gencnt; 294 --numopensockets; /* Could be below, but faster here. */ 295 mtx_unlock(&so_global_mtx); 296 if (so->so_rcv.sb_hiwat) 297 (void)chgsbsize(so->so_cred->cr_uidinfo, 298 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 299 if (so->so_snd.sb_hiwat) 300 (void)chgsbsize(so->so_cred->cr_uidinfo, 301 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 302 #ifdef INET 303 /* remove acccept filter if one is present. */ 304 if (so->so_accf != NULL) 305 do_setopt_accept_filter(so, NULL); 306 #endif 307 #ifdef MAC 308 mac_destroy_socket(so); 309 #endif 310 crfree(so->so_cred); 311 SOCKBUF_LOCK_DESTROY(&so->so_snd); 312 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 313 uma_zfree(socket_zone, so); 314 } 315 316 /* 317 * socreate returns a socket with a ref count of 1. The socket should be 318 * closed with soclose(). 319 */ 320 int 321 socreate(dom, aso, type, proto, cred, td) 322 int dom; 323 struct socket **aso; 324 int type; 325 int proto; 326 struct ucred *cred; 327 struct thread *td; 328 { 329 struct protosw *prp; 330 struct socket *so; 331 int error; 332 333 if (proto) 334 prp = pffindproto(dom, proto, type); 335 else 336 prp = pffindtype(dom, type); 337 338 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL || 339 prp->pr_usrreqs->pru_attach == pru_attach_notsupp) 340 return (EPROTONOSUPPORT); 341 342 if (jailed(cred) && jail_socket_unixiproute_only && 343 prp->pr_domain->dom_family != PF_LOCAL && 344 prp->pr_domain->dom_family != PF_INET && 345 prp->pr_domain->dom_family != PF_ROUTE) { 346 return (EPROTONOSUPPORT); 347 } 348 349 if (prp->pr_type != type) 350 return (EPROTOTYPE); 351 so = soalloc(M_WAITOK); 352 if (so == NULL) 353 return (ENOBUFS); 354 355 TAILQ_INIT(&so->so_incomp); 356 TAILQ_INIT(&so->so_comp); 357 so->so_type = type; 358 so->so_cred = crhold(cred); 359 so->so_proto = prp; 360 #ifdef MAC 361 mac_create_socket(cred, so); 362 #endif 363 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), 364 NULL, NULL, NULL); 365 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), 366 NULL, NULL, NULL); 367 so->so_count = 1; 368 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 369 if (error) { 370 KASSERT(so->so_count == 1, ("socreate: so_count %d", 371 so->so_count)); 372 so->so_count = 0; 373 sodealloc(so); 374 return (error); 375 } 376 *aso = so; 377 return (0); 378 } 379 380 #ifdef REGRESSION 381 static int regression_sonewconn_earlytest = 1; 382 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, 383 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); 384 #endif 385 386 /* 387 * When an attempt at a new connection is noted on a socket which accepts 388 * connections, sonewconn is called. If the connection is possible (subject 389 * to space constraints, etc.) then we allocate a new structure, propoerly 390 * linked into the data structure of the original socket, and return this. 391 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 392 * 393 * Note: the ref count on the socket is 0 on return. 394 */ 395 struct socket * 396 sonewconn(head, connstatus) 397 register struct socket *head; 398 int connstatus; 399 { 400 register struct socket *so; 401 int over; 402 403 ACCEPT_LOCK(); 404 over = (head->so_qlen > 3 * head->so_qlimit / 2); 405 ACCEPT_UNLOCK(); 406 #ifdef REGRESSION 407 if (regression_sonewconn_earlytest && over) 408 #else 409 if (over) 410 #endif 411 return (NULL); 412 so = soalloc(M_NOWAIT); 413 if (so == NULL) 414 return (NULL); 415 if ((head->so_options & SO_ACCEPTFILTER) != 0) 416 connstatus = 0; 417 so->so_head = head; 418 so->so_type = head->so_type; 419 so->so_options = head->so_options &~ SO_ACCEPTCONN; 420 so->so_linger = head->so_linger; 421 so->so_state = head->so_state | SS_NOFDREF; 422 so->so_proto = head->so_proto; 423 so->so_cred = crhold(head->so_cred); 424 #ifdef MAC 425 SOCK_LOCK(head); 426 mac_create_socket_from_socket(head, so); 427 SOCK_UNLOCK(head); 428 #endif 429 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), 430 NULL, NULL, NULL); 431 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), 432 NULL, NULL, NULL); 433 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) || 434 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { 435 sodealloc(so); 436 return (NULL); 437 } 438 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat; 439 so->so_snd.sb_lowat = head->so_snd.sb_lowat; 440 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo; 441 so->so_snd.sb_timeo = head->so_snd.sb_timeo; 442 so->so_state |= connstatus; 443 ACCEPT_LOCK(); 444 if (connstatus) { 445 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 446 so->so_qstate |= SQ_COMP; 447 head->so_qlen++; 448 } else { 449 /* 450 * Keep removing sockets from the head until there's room for 451 * us to insert on the tail. In pre-locking revisions, this 452 * was a simple if(), but as we could be racing with other 453 * threads and soabort() requires dropping locks, we must 454 * loop waiting for the condition to be true. 455 */ 456 while (head->so_incqlen > head->so_qlimit) { 457 struct socket *sp; 458 sp = TAILQ_FIRST(&head->so_incomp); 459 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 460 head->so_incqlen--; 461 sp->so_qstate &= ~SQ_INCOMP; 462 sp->so_head = NULL; 463 ACCEPT_UNLOCK(); 464 soabort(sp); 465 ACCEPT_LOCK(); 466 } 467 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 468 so->so_qstate |= SQ_INCOMP; 469 head->so_incqlen++; 470 } 471 ACCEPT_UNLOCK(); 472 if (connstatus) { 473 sorwakeup(head); 474 wakeup_one(&head->so_timeo); 475 } 476 return (so); 477 } 478 479 int 480 sobind(so, nam, td) 481 struct socket *so; 482 struct sockaddr *nam; 483 struct thread *td; 484 { 485 486 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td)); 487 } 488 489 /* 490 * solisten() transitions a socket from a non-listening state to a listening 491 * state, but can also be used to update the listen queue depth on an 492 * existing listen socket. The protocol will call back into the sockets 493 * layer using solisten_proto_check() and solisten_proto() to check and set 494 * socket-layer listen state. Call backs are used so that the protocol can 495 * acquire both protocol and socket layer locks in whatever order is required 496 * by the protocol. 497 * 498 * Protocol implementors are advised to hold the socket lock across the 499 * socket-layer test and set to avoid races at the socket layer. 500 */ 501 int 502 solisten(so, backlog, td) 503 struct socket *so; 504 int backlog; 505 struct thread *td; 506 { 507 508 return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td)); 509 } 510 511 int 512 solisten_proto_check(so) 513 struct socket *so; 514 { 515 516 SOCK_LOCK_ASSERT(so); 517 518 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 519 SS_ISDISCONNECTING)) 520 return (EINVAL); 521 return (0); 522 } 523 524 void 525 solisten_proto(so, backlog) 526 struct socket *so; 527 int backlog; 528 { 529 530 SOCK_LOCK_ASSERT(so); 531 532 if (backlog < 0 || backlog > somaxconn) 533 backlog = somaxconn; 534 so->so_qlimit = backlog; 535 so->so_options |= SO_ACCEPTCONN; 536 } 537 538 /* 539 * Attempt to free a socket. This should really be sotryfree(). 540 * 541 * sofree() will succeed if: 542 * 543 * - There are no outstanding file descriptor references or related consumers 544 * (so_count == 0). 545 * 546 * - The socket has been closed by user space, if ever open (SS_NOFDREF). 547 * 548 * - The protocol does not have an outstanding strong reference on the socket 549 * (SS_PROTOREF). 550 * 551 * - The socket is not in a completed connection queue, so a process has been 552 * notified that it is present. If it is removed, the user process may 553 * block in accept() despite select() saying the socket was ready. 554 * 555 * Otherwise, it will quietly abort so that a future call to sofree(), when 556 * conditions are right, can succeed. 557 */ 558 void 559 sofree(so) 560 struct socket *so; 561 { 562 struct protosw *pr = so->so_proto; 563 struct socket *head; 564 565 ACCEPT_LOCK_ASSERT(); 566 SOCK_LOCK_ASSERT(so); 567 568 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 || 569 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) { 570 SOCK_UNLOCK(so); 571 ACCEPT_UNLOCK(); 572 return; 573 } 574 575 head = so->so_head; 576 if (head != NULL) { 577 KASSERT((so->so_qstate & SQ_COMP) != 0 || 578 (so->so_qstate & SQ_INCOMP) != 0, 579 ("sofree: so_head != NULL, but neither SQ_COMP nor " 580 "SQ_INCOMP")); 581 KASSERT((so->so_qstate & SQ_COMP) == 0 || 582 (so->so_qstate & SQ_INCOMP) == 0, 583 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 584 TAILQ_REMOVE(&head->so_incomp, so, so_list); 585 head->so_incqlen--; 586 so->so_qstate &= ~SQ_INCOMP; 587 so->so_head = NULL; 588 } 589 KASSERT((so->so_qstate & SQ_COMP) == 0 && 590 (so->so_qstate & SQ_INCOMP) == 0, 591 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 592 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 593 SOCK_UNLOCK(so); 594 ACCEPT_UNLOCK(); 595 596 /* 597 * From this point on, we assume that no other references to this 598 * socket exist anywhere else in the stack. Therefore, no locks need 599 * to be acquired or held. 600 * 601 * We used to do a lot of socket buffer and socket locking here, as 602 * well as invoke sorflush() and perform wakeups. The direct call to 603 * dom_dispose() and sbrelease_internal() are an inlining of what was 604 * necessary from sorflush(). 605 * 606 * Notice that the socket buffer and kqueue state are torn down 607 * before calling pru_detach. This means that protocols shold not 608 * assume they can perform socket wakeups, etc, in their detach 609 * code. 610 */ 611 KASSERT((so->so_snd.sb_flags & SB_LOCK) == 0, ("sofree: snd sblock")); 612 KASSERT((so->so_rcv.sb_flags & SB_LOCK) == 0, ("sofree: rcv sblock")); 613 sbdestroy(&so->so_snd, so); 614 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 615 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb); 616 sbdestroy(&so->so_rcv, so); 617 if (pr->pr_usrreqs->pru_detach != NULL) 618 (*pr->pr_usrreqs->pru_detach)(so); 619 knlist_destroy(&so->so_rcv.sb_sel.si_note); 620 knlist_destroy(&so->so_snd.sb_sel.si_note); 621 sodealloc(so); 622 } 623 624 /* 625 * Close a socket on last file table reference removal. Initiate disconnect 626 * if connected. Free socket when disconnect complete. 627 * 628 * This function will sorele() the socket. Note that soclose() may be called 629 * prior to the ref count reaching zero. The actual socket structure will 630 * not be freed until the ref count reaches zero. 631 */ 632 int 633 soclose(so) 634 struct socket *so; 635 { 636 int error = 0; 637 638 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); 639 640 funsetown(&so->so_sigio); 641 if (so->so_options & SO_ACCEPTCONN) { 642 struct socket *sp; 643 ACCEPT_LOCK(); 644 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 645 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 646 so->so_incqlen--; 647 sp->so_qstate &= ~SQ_INCOMP; 648 sp->so_head = NULL; 649 ACCEPT_UNLOCK(); 650 soabort(sp); 651 ACCEPT_LOCK(); 652 } 653 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 654 TAILQ_REMOVE(&so->so_comp, sp, so_list); 655 so->so_qlen--; 656 sp->so_qstate &= ~SQ_COMP; 657 sp->so_head = NULL; 658 ACCEPT_UNLOCK(); 659 soabort(sp); 660 ACCEPT_LOCK(); 661 } 662 ACCEPT_UNLOCK(); 663 } 664 if (so->so_state & SS_ISCONNECTED) { 665 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 666 error = sodisconnect(so); 667 if (error) 668 goto drop; 669 } 670 if (so->so_options & SO_LINGER) { 671 if ((so->so_state & SS_ISDISCONNECTING) && 672 (so->so_state & SS_NBIO)) 673 goto drop; 674 while (so->so_state & SS_ISCONNECTED) { 675 error = tsleep(&so->so_timeo, 676 PSOCK | PCATCH, "soclos", so->so_linger * hz); 677 if (error) 678 break; 679 } 680 } 681 } 682 683 drop: 684 if (so->so_proto->pr_usrreqs->pru_close != NULL) 685 (*so->so_proto->pr_usrreqs->pru_close)(so); 686 ACCEPT_LOCK(); 687 SOCK_LOCK(so); 688 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 689 so->so_state |= SS_NOFDREF; 690 sorele(so); 691 return (error); 692 } 693 694 /* 695 * soabort() is used to abruptly tear down a connection, such as when a 696 * resource limit is reached (listen queue depth exceeded), or if a listen 697 * socket is closed while there are sockets waiting to be accepted. 698 * 699 * This interface is tricky, because it is called on an unreferenced socket, 700 * and must be called only by a thread that has actually removed the socket 701 * from the listen queue it was on, or races with other threads are risked. 702 * 703 * This interface will call into the protocol code, so must not be called 704 * with any socket locks held. Protocols do call it while holding their own 705 * recursible protocol mutexes, but this is something that should be subject 706 * to review in the future. 707 */ 708 void 709 soabort(so) 710 struct socket *so; 711 { 712 713 /* 714 * In as much as is possible, assert that no references to this 715 * socket are held. This is not quite the same as asserting that the 716 * current thread is responsible for arranging for no references, but 717 * is as close as we can get for now. 718 */ 719 KASSERT(so->so_count == 0, ("soabort: so_count")); 720 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF")); 721 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF")); 722 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP")); 723 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP")); 724 725 if (so->so_proto->pr_usrreqs->pru_abort != NULL) 726 (*so->so_proto->pr_usrreqs->pru_abort)(so); 727 ACCEPT_LOCK(); 728 SOCK_LOCK(so); 729 sofree(so); 730 } 731 732 int 733 soaccept(so, nam) 734 struct socket *so; 735 struct sockaddr **nam; 736 { 737 int error; 738 739 SOCK_LOCK(so); 740 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 741 so->so_state &= ~SS_NOFDREF; 742 SOCK_UNLOCK(so); 743 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 744 return (error); 745 } 746 747 int 748 soconnect(so, nam, td) 749 struct socket *so; 750 struct sockaddr *nam; 751 struct thread *td; 752 { 753 int error; 754 755 if (so->so_options & SO_ACCEPTCONN) 756 return (EOPNOTSUPP); 757 /* 758 * If protocol is connection-based, can only connect once. 759 * Otherwise, if connected, try to disconnect first. This allows 760 * user to disconnect by connecting to, e.g., a null address. 761 */ 762 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 763 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 764 (error = sodisconnect(so)))) { 765 error = EISCONN; 766 } else { 767 /* 768 * Prevent accumulated error from previous connection from 769 * biting us. 770 */ 771 so->so_error = 0; 772 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 773 } 774 775 return (error); 776 } 777 778 int 779 soconnect2(so1, so2) 780 struct socket *so1; 781 struct socket *so2; 782 { 783 784 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2)); 785 } 786 787 int 788 sodisconnect(so) 789 struct socket *so; 790 { 791 int error; 792 793 if ((so->so_state & SS_ISCONNECTED) == 0) 794 return (ENOTCONN); 795 if (so->so_state & SS_ISDISCONNECTING) 796 return (EALREADY); 797 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 798 return (error); 799 } 800 801 #ifdef ZERO_COPY_SOCKETS 802 struct so_zerocopy_stats{ 803 int size_ok; 804 int align_ok; 805 int found_ifp; 806 }; 807 struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 808 #include <netinet/in.h> 809 #include <net/route.h> 810 #include <netinet/in_pcb.h> 811 #include <vm/vm.h> 812 #include <vm/vm_page.h> 813 #include <vm/vm_object.h> 814 #endif /*ZERO_COPY_SOCKETS*/ 815 816 /* 817 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or 818 * all of the data referenced by the uio. If desired, it uses zero-copy. 819 * *space will be updated to reflect data copied in. 820 * 821 * NB: If atomic I/O is requested, the caller must already have checked that 822 * space can hold resid bytes. 823 * 824 * NB: In the event of an error, the caller may need to free the partial 825 * chain pointed to by *mpp. The contents of both *uio and *space may be 826 * modified even in the case of an error. 827 */ 828 static int 829 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space, 830 int flags) 831 { 832 struct mbuf *m, **mp, *top; 833 long len, resid; 834 int error; 835 #ifdef ZERO_COPY_SOCKETS 836 int cow_send; 837 #endif 838 839 *retmp = top = NULL; 840 mp = ⊤ 841 len = 0; 842 resid = uio->uio_resid; 843 error = 0; 844 do { 845 #ifdef ZERO_COPY_SOCKETS 846 cow_send = 0; 847 #endif /* ZERO_COPY_SOCKETS */ 848 if (resid >= MINCLSIZE) { 849 #ifdef ZERO_COPY_SOCKETS 850 if (top == NULL) { 851 MGETHDR(m, M_TRYWAIT, MT_DATA); 852 if (m == NULL) { 853 error = ENOBUFS; 854 goto out; 855 } 856 m->m_pkthdr.len = 0; 857 m->m_pkthdr.rcvif = NULL; 858 } else { 859 MGET(m, M_TRYWAIT, MT_DATA); 860 if (m == NULL) { 861 error = ENOBUFS; 862 goto out; 863 } 864 } 865 if (so_zero_copy_send && 866 resid>=PAGE_SIZE && 867 *space>=PAGE_SIZE && 868 uio->uio_iov->iov_len>=PAGE_SIZE) { 869 so_zerocp_stats.size_ok++; 870 so_zerocp_stats.align_ok++; 871 cow_send = socow_setup(m, uio); 872 len = cow_send; 873 } 874 if (!cow_send) { 875 MCLGET(m, M_TRYWAIT); 876 if ((m->m_flags & M_EXT) == 0) { 877 m_free(m); 878 m = NULL; 879 } else { 880 len = min(min(MCLBYTES, resid), 881 *space); 882 } 883 } 884 #else /* ZERO_COPY_SOCKETS */ 885 if (top == NULL) { 886 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 887 m->m_pkthdr.len = 0; 888 m->m_pkthdr.rcvif = NULL; 889 } else 890 m = m_getcl(M_TRYWAIT, MT_DATA, 0); 891 len = min(min(MCLBYTES, resid), *space); 892 #endif /* ZERO_COPY_SOCKETS */ 893 } else { 894 if (top == NULL) { 895 m = m_gethdr(M_TRYWAIT, MT_DATA); 896 m->m_pkthdr.len = 0; 897 m->m_pkthdr.rcvif = NULL; 898 899 len = min(min(MHLEN, resid), *space); 900 /* 901 * For datagram protocols, leave room 902 * for protocol headers in first mbuf. 903 */ 904 if (atomic && m && len < MHLEN) 905 MH_ALIGN(m, len); 906 } else { 907 m = m_get(M_TRYWAIT, MT_DATA); 908 len = min(min(MLEN, resid), *space); 909 } 910 } 911 if (m == NULL) { 912 error = ENOBUFS; 913 goto out; 914 } 915 916 *space -= len; 917 #ifdef ZERO_COPY_SOCKETS 918 if (cow_send) 919 error = 0; 920 else 921 #endif /* ZERO_COPY_SOCKETS */ 922 error = uiomove(mtod(m, void *), (int)len, uio); 923 resid = uio->uio_resid; 924 m->m_len = len; 925 *mp = m; 926 top->m_pkthdr.len += len; 927 if (error) 928 goto out; 929 mp = &m->m_next; 930 if (resid <= 0) { 931 if (flags & MSG_EOR) 932 top->m_flags |= M_EOR; 933 break; 934 } 935 } while (*space > 0 && atomic); 936 out: 937 *retmp = top; 938 return (error); 939 } 940 941 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 942 943 int 944 sosend_dgram(so, addr, uio, top, control, flags, td) 945 struct socket *so; 946 struct sockaddr *addr; 947 struct uio *uio; 948 struct mbuf *top; 949 struct mbuf *control; 950 int flags; 951 struct thread *td; 952 { 953 long space, resid; 954 int clen = 0, error, dontroute; 955 int atomic = sosendallatonce(so) || top; 956 957 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM")); 958 KASSERT(so->so_proto->pr_flags & PR_ATOMIC, 959 ("sodgram_send: !PR_ATOMIC")); 960 961 if (uio != NULL) 962 resid = uio->uio_resid; 963 else 964 resid = top->m_pkthdr.len; 965 /* 966 * In theory resid should be unsigned. However, space must be 967 * signed, as it might be less than 0 if we over-committed, and we 968 * must use a signed comparison of space and resid. On the other 969 * hand, a negative resid causes us to loop sending 0-length 970 * segments to the protocol. 971 * 972 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 973 * type sockets since that's an error. 974 */ 975 if (resid < 0) { 976 error = EINVAL; 977 goto out; 978 } 979 980 dontroute = 981 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; 982 if (td != NULL) 983 td->td_proc->p_stats->p_ru.ru_msgsnd++; 984 if (control != NULL) 985 clen = control->m_len; 986 987 SOCKBUF_LOCK(&so->so_snd); 988 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 989 SOCKBUF_UNLOCK(&so->so_snd); 990 error = EPIPE; 991 goto out; 992 } 993 if (so->so_error) { 994 error = so->so_error; 995 so->so_error = 0; 996 SOCKBUF_UNLOCK(&so->so_snd); 997 goto out; 998 } 999 if ((so->so_state & SS_ISCONNECTED) == 0) { 1000 /* 1001 * `sendto' and `sendmsg' is allowed on a connection-based 1002 * socket if it supports implied connect. Return ENOTCONN if 1003 * not connected and no address is supplied. 1004 */ 1005 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1006 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1007 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1008 !(resid == 0 && clen != 0)) { 1009 SOCKBUF_UNLOCK(&so->so_snd); 1010 error = ENOTCONN; 1011 goto out; 1012 } 1013 } else if (addr == NULL) { 1014 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1015 error = ENOTCONN; 1016 else 1017 error = EDESTADDRREQ; 1018 SOCKBUF_UNLOCK(&so->so_snd); 1019 goto out; 1020 } 1021 } 1022 1023 /* 1024 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a 1025 * problem and need fixing. 1026 */ 1027 space = sbspace(&so->so_snd); 1028 if (flags & MSG_OOB) 1029 space += 1024; 1030 space -= clen; 1031 SOCKBUF_UNLOCK(&so->so_snd); 1032 if (resid > space) { 1033 error = EMSGSIZE; 1034 goto out; 1035 } 1036 if (uio == NULL) { 1037 resid = 0; 1038 if (flags & MSG_EOR) 1039 top->m_flags |= M_EOR; 1040 } else { 1041 error = sosend_copyin(uio, &top, atomic, &space, flags); 1042 if (error) 1043 goto out; 1044 resid = uio->uio_resid; 1045 } 1046 KASSERT(resid == 0, ("sosend_dgram: resid != 0")); 1047 /* 1048 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock 1049 * than with. 1050 */ 1051 if (dontroute) { 1052 SOCK_LOCK(so); 1053 so->so_options |= SO_DONTROUTE; 1054 SOCK_UNLOCK(so); 1055 } 1056 /* 1057 * XXX all the SBS_CANTSENDMORE checks previously done could be out 1058 * of date. We could have recieved a reset packet in an interrupt or 1059 * maybe we slept while doing page faults in uiomove() etc. We could 1060 * probably recheck again inside the locking protection here, but 1061 * there are probably other places that this also happens. We must 1062 * rethink this. 1063 */ 1064 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1065 (flags & MSG_OOB) ? PRUS_OOB : 1066 /* 1067 * If the user set MSG_EOF, the protocol understands this flag and 1068 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND. 1069 */ 1070 ((flags & MSG_EOF) && 1071 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1072 (resid <= 0)) ? 1073 PRUS_EOF : 1074 /* If there is more to send set PRUS_MORETOCOME */ 1075 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1076 top, addr, control, td); 1077 if (dontroute) { 1078 SOCK_LOCK(so); 1079 so->so_options &= ~SO_DONTROUTE; 1080 SOCK_UNLOCK(so); 1081 } 1082 clen = 0; 1083 control = NULL; 1084 top = NULL; 1085 out: 1086 if (top != NULL) 1087 m_freem(top); 1088 if (control != NULL) 1089 m_freem(control); 1090 return (error); 1091 } 1092 1093 /* 1094 * Send on a socket. If send must go all at once and message is larger than 1095 * send buffering, then hard error. Lock against other senders. If must go 1096 * all at once and not enough room now, then inform user that this would 1097 * block and do nothing. Otherwise, if nonblocking, send as much as 1098 * possible. The data to be sent is described by "uio" if nonzero, otherwise 1099 * by the mbuf chain "top" (which must be null if uio is not). Data provided 1100 * in mbuf chain must be small enough to send all at once. 1101 * 1102 * Returns nonzero on error, timeout or signal; callers must check for short 1103 * counts if EINTR/ERESTART are returned. Data and control buffers are freed 1104 * on return. 1105 */ 1106 #define snderr(errno) { error = (errno); goto release; } 1107 int 1108 sosend_generic(so, addr, uio, top, control, flags, td) 1109 struct socket *so; 1110 struct sockaddr *addr; 1111 struct uio *uio; 1112 struct mbuf *top; 1113 struct mbuf *control; 1114 int flags; 1115 struct thread *td; 1116 { 1117 long space, resid; 1118 int clen = 0, error, dontroute; 1119 int atomic = sosendallatonce(so) || top; 1120 1121 if (uio != NULL) 1122 resid = uio->uio_resid; 1123 else 1124 resid = top->m_pkthdr.len; 1125 /* 1126 * In theory resid should be unsigned. However, space must be 1127 * signed, as it might be less than 0 if we over-committed, and we 1128 * must use a signed comparison of space and resid. On the other 1129 * hand, a negative resid causes us to loop sending 0-length 1130 * segments to the protocol. 1131 * 1132 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 1133 * type sockets since that's an error. 1134 */ 1135 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 1136 error = EINVAL; 1137 goto out; 1138 } 1139 1140 dontroute = 1141 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 1142 (so->so_proto->pr_flags & PR_ATOMIC); 1143 if (td != NULL) 1144 td->td_proc->p_stats->p_ru.ru_msgsnd++; 1145 if (control != NULL) 1146 clen = control->m_len; 1147 1148 SOCKBUF_LOCK(&so->so_snd); 1149 restart: 1150 SOCKBUF_LOCK_ASSERT(&so->so_snd); 1151 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1152 if (error) 1153 goto out_locked; 1154 do { 1155 SOCKBUF_LOCK_ASSERT(&so->so_snd); 1156 if (so->so_snd.sb_state & SBS_CANTSENDMORE) 1157 snderr(EPIPE); 1158 if (so->so_error) { 1159 error = so->so_error; 1160 so->so_error = 0; 1161 goto release; 1162 } 1163 if ((so->so_state & SS_ISCONNECTED) == 0) { 1164 /* 1165 * `sendto' and `sendmsg' is allowed on a connection- 1166 * based socket if it supports implied connect. 1167 * Return ENOTCONN if not connected and no address is 1168 * supplied. 1169 */ 1170 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1171 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1172 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1173 !(resid == 0 && clen != 0)) 1174 snderr(ENOTCONN); 1175 } else if (addr == NULL) 1176 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 1177 ENOTCONN : EDESTADDRREQ); 1178 } 1179 space = sbspace(&so->so_snd); 1180 if (flags & MSG_OOB) 1181 space += 1024; 1182 if ((atomic && resid > so->so_snd.sb_hiwat) || 1183 clen > so->so_snd.sb_hiwat) 1184 snderr(EMSGSIZE); 1185 if (space < resid + clen && 1186 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 1187 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) 1188 snderr(EWOULDBLOCK); 1189 sbunlock(&so->so_snd); 1190 error = sbwait(&so->so_snd); 1191 if (error) 1192 goto out_locked; 1193 goto restart; 1194 } 1195 SOCKBUF_UNLOCK(&so->so_snd); 1196 space -= clen; 1197 do { 1198 if (uio == NULL) { 1199 resid = 0; 1200 if (flags & MSG_EOR) 1201 top->m_flags |= M_EOR; 1202 } else { 1203 error = sosend_copyin(uio, &top, atomic, 1204 &space, flags); 1205 if (error != 0) { 1206 SOCKBUF_LOCK(&so->so_snd); 1207 goto release; 1208 } 1209 resid = uio->uio_resid; 1210 } 1211 if (dontroute) { 1212 SOCK_LOCK(so); 1213 so->so_options |= SO_DONTROUTE; 1214 SOCK_UNLOCK(so); 1215 } 1216 /* 1217 * XXX all the SBS_CANTSENDMORE checks previously 1218 * done could be out of date. We could have recieved 1219 * a reset packet in an interrupt or maybe we slept 1220 * while doing page faults in uiomove() etc. We 1221 * could probably recheck again inside the locking 1222 * protection here, but there are probably other 1223 * places that this also happens. We must rethink 1224 * this. 1225 */ 1226 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1227 (flags & MSG_OOB) ? PRUS_OOB : 1228 /* 1229 * If the user set MSG_EOF, the protocol understands 1230 * this flag and nothing left to send then use 1231 * PRU_SEND_EOF instead of PRU_SEND. 1232 */ 1233 ((flags & MSG_EOF) && 1234 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1235 (resid <= 0)) ? 1236 PRUS_EOF : 1237 /* If there is more to send set PRUS_MORETOCOME. */ 1238 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1239 top, addr, control, td); 1240 if (dontroute) { 1241 SOCK_LOCK(so); 1242 so->so_options &= ~SO_DONTROUTE; 1243 SOCK_UNLOCK(so); 1244 } 1245 clen = 0; 1246 control = NULL; 1247 top = NULL; 1248 if (error) { 1249 SOCKBUF_LOCK(&so->so_snd); 1250 goto release; 1251 } 1252 } while (resid && space > 0); 1253 SOCKBUF_LOCK(&so->so_snd); 1254 } while (resid); 1255 1256 release: 1257 SOCKBUF_LOCK_ASSERT(&so->so_snd); 1258 sbunlock(&so->so_snd); 1259 out_locked: 1260 SOCKBUF_LOCK_ASSERT(&so->so_snd); 1261 SOCKBUF_UNLOCK(&so->so_snd); 1262 out: 1263 if (top != NULL) 1264 m_freem(top); 1265 if (control != NULL) 1266 m_freem(control); 1267 return (error); 1268 } 1269 #undef snderr 1270 1271 int 1272 sosend(so, addr, uio, top, control, flags, td) 1273 struct socket *so; 1274 struct sockaddr *addr; 1275 struct uio *uio; 1276 struct mbuf *top; 1277 struct mbuf *control; 1278 int flags; 1279 struct thread *td; 1280 { 1281 1282 /* XXXRW: Temporary debugging. */ 1283 KASSERT(so->so_proto->pr_usrreqs->pru_sosend != sosend, 1284 ("sosend: protocol calls sosend")); 1285 1286 return (so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top, 1287 control, flags, td)); 1288 } 1289 1290 /* 1291 * The part of soreceive() that implements reading non-inline out-of-band 1292 * data from a socket. For more complete comments, see soreceive(), from 1293 * which this code originated. 1294 * 1295 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is 1296 * unable to return an mbuf chain to the caller. 1297 */ 1298 static int 1299 soreceive_rcvoob(so, uio, flags) 1300 struct socket *so; 1301 struct uio *uio; 1302 int flags; 1303 { 1304 struct protosw *pr = so->so_proto; 1305 struct mbuf *m; 1306 int error; 1307 1308 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 1309 1310 m = m_get(M_TRYWAIT, MT_DATA); 1311 if (m == NULL) 1312 return (ENOBUFS); 1313 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 1314 if (error) 1315 goto bad; 1316 do { 1317 #ifdef ZERO_COPY_SOCKETS 1318 if (so_zero_copy_receive) { 1319 int disposable; 1320 1321 if ((m->m_flags & M_EXT) 1322 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1323 disposable = 1; 1324 else 1325 disposable = 0; 1326 1327 error = uiomoveco(mtod(m, void *), 1328 min(uio->uio_resid, m->m_len), 1329 uio, disposable); 1330 } else 1331 #endif /* ZERO_COPY_SOCKETS */ 1332 error = uiomove(mtod(m, void *), 1333 (int) min(uio->uio_resid, m->m_len), uio); 1334 m = m_free(m); 1335 } while (uio->uio_resid && error == 0 && m); 1336 bad: 1337 if (m != NULL) 1338 m_freem(m); 1339 return (error); 1340 } 1341 1342 /* 1343 * Following replacement or removal of the first mbuf on the first mbuf chain 1344 * of a socket buffer, push necessary state changes back into the socket 1345 * buffer so that other consumers see the values consistently. 'nextrecord' 1346 * is the callers locally stored value of the original value of 1347 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 1348 * NOTE: 'nextrecord' may be NULL. 1349 */ 1350 static __inline void 1351 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 1352 { 1353 1354 SOCKBUF_LOCK_ASSERT(sb); 1355 /* 1356 * First, update for the new value of nextrecord. If necessary, make 1357 * it the first record. 1358 */ 1359 if (sb->sb_mb != NULL) 1360 sb->sb_mb->m_nextpkt = nextrecord; 1361 else 1362 sb->sb_mb = nextrecord; 1363 1364 /* 1365 * Now update any dependent socket buffer fields to reflect the new 1366 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 1367 * addition of a second clause that takes care of the case where 1368 * sb_mb has been updated, but remains the last record. 1369 */ 1370 if (sb->sb_mb == NULL) { 1371 sb->sb_mbtail = NULL; 1372 sb->sb_lastrecord = NULL; 1373 } else if (sb->sb_mb->m_nextpkt == NULL) 1374 sb->sb_lastrecord = sb->sb_mb; 1375 } 1376 1377 1378 /* 1379 * Implement receive operations on a socket. We depend on the way that 1380 * records are added to the sockbuf by sbappend. In particular, each record 1381 * (mbufs linked through m_next) must begin with an address if the protocol 1382 * so specifies, followed by an optional mbuf or mbufs containing ancillary 1383 * data, and then zero or more mbufs of data. In order to allow parallelism 1384 * between network receive and copying to user space, as well as avoid 1385 * sleeping with a mutex held, we release the socket buffer mutex during the 1386 * user space copy. Although the sockbuf is locked, new data may still be 1387 * appended, and thus we must maintain consistency of the sockbuf during that 1388 * time. 1389 * 1390 * The caller may receive the data as a single mbuf chain by supplying an 1391 * mbuf **mp0 for use in returning the chain. The uio is then used only for 1392 * the count in uio_resid. 1393 */ 1394 int 1395 soreceive_generic(so, psa, uio, mp0, controlp, flagsp) 1396 struct socket *so; 1397 struct sockaddr **psa; 1398 struct uio *uio; 1399 struct mbuf **mp0; 1400 struct mbuf **controlp; 1401 int *flagsp; 1402 { 1403 struct mbuf *m, **mp; 1404 int flags, len, error, offset; 1405 struct protosw *pr = so->so_proto; 1406 struct mbuf *nextrecord; 1407 int moff, type = 0; 1408 int mbuf_removed = 0; 1409 int orig_resid = uio->uio_resid; 1410 1411 mp = mp0; 1412 if (psa != NULL) 1413 *psa = NULL; 1414 if (controlp != NULL) 1415 *controlp = NULL; 1416 if (flagsp != NULL) 1417 flags = *flagsp &~ MSG_EOR; 1418 else 1419 flags = 0; 1420 if (flags & MSG_OOB) 1421 return (soreceive_rcvoob(so, uio, flags)); 1422 if (mp != NULL) 1423 *mp = NULL; 1424 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) 1425 && uio->uio_resid) 1426 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 1427 1428 SOCKBUF_LOCK(&so->so_rcv); 1429 restart: 1430 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1431 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 1432 if (error) 1433 goto out; 1434 1435 m = so->so_rcv.sb_mb; 1436 /* 1437 * If we have less data than requested, block awaiting more (subject 1438 * to any timeout) if: 1439 * 1. the current count is less than the low water mark, or 1440 * 2. MSG_WAITALL is set, and it is possible to do the entire 1441 * receive operation at once if we block (resid <= hiwat). 1442 * 3. MSG_DONTWAIT is not set 1443 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1444 * we have to do the receive in sections, and thus risk returning a 1445 * short count if a timeout or signal occurs after we start. 1446 */ 1447 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1448 so->so_rcv.sb_cc < uio->uio_resid) && 1449 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1450 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1451 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1452 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1453 ("receive: m == %p so->so_rcv.sb_cc == %u", 1454 m, so->so_rcv.sb_cc)); 1455 if (so->so_error) { 1456 if (m != NULL) 1457 goto dontblock; 1458 error = so->so_error; 1459 if ((flags & MSG_PEEK) == 0) 1460 so->so_error = 0; 1461 goto release; 1462 } 1463 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1464 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1465 if (m) 1466 goto dontblock; 1467 else 1468 goto release; 1469 } 1470 for (; m != NULL; m = m->m_next) 1471 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1472 m = so->so_rcv.sb_mb; 1473 goto dontblock; 1474 } 1475 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1476 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1477 error = ENOTCONN; 1478 goto release; 1479 } 1480 if (uio->uio_resid == 0) 1481 goto release; 1482 if ((so->so_state & SS_NBIO) || 1483 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1484 error = EWOULDBLOCK; 1485 goto release; 1486 } 1487 SBLASTRECORDCHK(&so->so_rcv); 1488 SBLASTMBUFCHK(&so->so_rcv); 1489 sbunlock(&so->so_rcv); 1490 error = sbwait(&so->so_rcv); 1491 if (error) 1492 goto out; 1493 goto restart; 1494 } 1495 dontblock: 1496 /* 1497 * From this point onward, we maintain 'nextrecord' as a cache of the 1498 * pointer to the next record in the socket buffer. We must keep the 1499 * various socket buffer pointers and local stack versions of the 1500 * pointers in sync, pushing out modifications before dropping the 1501 * socket buffer mutex, and re-reading them when picking it up. 1502 * 1503 * Otherwise, we will race with the network stack appending new data 1504 * or records onto the socket buffer by using inconsistent/stale 1505 * versions of the field, possibly resulting in socket buffer 1506 * corruption. 1507 * 1508 * By holding the high-level sblock(), we prevent simultaneous 1509 * readers from pulling off the front of the socket buffer. 1510 */ 1511 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1512 if (uio->uio_td) 1513 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++; 1514 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1515 SBLASTRECORDCHK(&so->so_rcv); 1516 SBLASTMBUFCHK(&so->so_rcv); 1517 nextrecord = m->m_nextpkt; 1518 if (pr->pr_flags & PR_ADDR) { 1519 KASSERT(m->m_type == MT_SONAME, 1520 ("m->m_type == %d", m->m_type)); 1521 orig_resid = 0; 1522 if (psa != NULL) 1523 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1524 M_NOWAIT); 1525 if (flags & MSG_PEEK) { 1526 m = m->m_next; 1527 } else { 1528 sbfree(&so->so_rcv, m); 1529 mbuf_removed = 1; 1530 so->so_rcv.sb_mb = m_free(m); 1531 m = so->so_rcv.sb_mb; 1532 sockbuf_pushsync(&so->so_rcv, nextrecord); 1533 } 1534 } 1535 1536 /* 1537 * Process one or more MT_CONTROL mbufs present before any data mbufs 1538 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1539 * just copy the data; if !MSG_PEEK, we call into the protocol to 1540 * perform externalization (or freeing if controlp == NULL). 1541 */ 1542 if (m != NULL && m->m_type == MT_CONTROL) { 1543 struct mbuf *cm = NULL, *cmn; 1544 struct mbuf **cme = &cm; 1545 1546 do { 1547 if (flags & MSG_PEEK) { 1548 if (controlp != NULL) { 1549 *controlp = m_copy(m, 0, m->m_len); 1550 controlp = &(*controlp)->m_next; 1551 } 1552 m = m->m_next; 1553 } else { 1554 sbfree(&so->so_rcv, m); 1555 mbuf_removed = 1; 1556 so->so_rcv.sb_mb = m->m_next; 1557 m->m_next = NULL; 1558 *cme = m; 1559 cme = &(*cme)->m_next; 1560 m = so->so_rcv.sb_mb; 1561 } 1562 } while (m != NULL && m->m_type == MT_CONTROL); 1563 if ((flags & MSG_PEEK) == 0) 1564 sockbuf_pushsync(&so->so_rcv, nextrecord); 1565 while (cm != NULL) { 1566 cmn = cm->m_next; 1567 cm->m_next = NULL; 1568 if (pr->pr_domain->dom_externalize != NULL) { 1569 SOCKBUF_UNLOCK(&so->so_rcv); 1570 error = (*pr->pr_domain->dom_externalize) 1571 (cm, controlp); 1572 SOCKBUF_LOCK(&so->so_rcv); 1573 } else if (controlp != NULL) 1574 *controlp = cm; 1575 else 1576 m_freem(cm); 1577 if (controlp != NULL) { 1578 orig_resid = 0; 1579 while (*controlp != NULL) 1580 controlp = &(*controlp)->m_next; 1581 } 1582 cm = cmn; 1583 } 1584 if (m != NULL) 1585 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1586 else 1587 nextrecord = so->so_rcv.sb_mb; 1588 orig_resid = 0; 1589 } 1590 if (m != NULL) { 1591 if ((flags & MSG_PEEK) == 0) { 1592 KASSERT(m->m_nextpkt == nextrecord, 1593 ("soreceive: post-control, nextrecord !sync")); 1594 if (nextrecord == NULL) { 1595 KASSERT(so->so_rcv.sb_mb == m, 1596 ("soreceive: post-control, sb_mb!=m")); 1597 KASSERT(so->so_rcv.sb_lastrecord == m, 1598 ("soreceive: post-control, lastrecord!=m")); 1599 } 1600 } 1601 type = m->m_type; 1602 if (type == MT_OOBDATA) 1603 flags |= MSG_OOB; 1604 } else { 1605 if ((flags & MSG_PEEK) == 0) { 1606 KASSERT(so->so_rcv.sb_mb == nextrecord, 1607 ("soreceive: sb_mb != nextrecord")); 1608 if (so->so_rcv.sb_mb == NULL) { 1609 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1610 ("soreceive: sb_lastercord != NULL")); 1611 } 1612 } 1613 } 1614 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1615 SBLASTRECORDCHK(&so->so_rcv); 1616 SBLASTMBUFCHK(&so->so_rcv); 1617 1618 /* 1619 * Now continue to read any data mbufs off of the head of the socket 1620 * buffer until the read request is satisfied. Note that 'type' is 1621 * used to store the type of any mbuf reads that have happened so far 1622 * such that soreceive() can stop reading if the type changes, which 1623 * causes soreceive() to return only one of regular data and inline 1624 * out-of-band data in a single socket receive operation. 1625 */ 1626 moff = 0; 1627 offset = 0; 1628 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1629 /* 1630 * If the type of mbuf has changed since the last mbuf 1631 * examined ('type'), end the receive operation. 1632 */ 1633 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1634 if (m->m_type == MT_OOBDATA) { 1635 if (type != MT_OOBDATA) 1636 break; 1637 } else if (type == MT_OOBDATA) 1638 break; 1639 else 1640 KASSERT(m->m_type == MT_DATA, 1641 ("m->m_type == %d", m->m_type)); 1642 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1643 len = uio->uio_resid; 1644 if (so->so_oobmark && len > so->so_oobmark - offset) 1645 len = so->so_oobmark - offset; 1646 if (len > m->m_len - moff) 1647 len = m->m_len - moff; 1648 /* 1649 * If mp is set, just pass back the mbufs. Otherwise copy 1650 * them out via the uio, then free. Sockbuf must be 1651 * consistent here (points to current mbuf, it points to next 1652 * record) when we drop priority; we must note any additions 1653 * to the sockbuf when we block interrupts again. 1654 */ 1655 if (mp == NULL) { 1656 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1657 SBLASTRECORDCHK(&so->so_rcv); 1658 SBLASTMBUFCHK(&so->so_rcv); 1659 SOCKBUF_UNLOCK(&so->so_rcv); 1660 #ifdef ZERO_COPY_SOCKETS 1661 if (so_zero_copy_receive) { 1662 int disposable; 1663 1664 if ((m->m_flags & M_EXT) 1665 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1666 disposable = 1; 1667 else 1668 disposable = 0; 1669 1670 error = uiomoveco(mtod(m, char *) + moff, 1671 (int)len, uio, 1672 disposable); 1673 } else 1674 #endif /* ZERO_COPY_SOCKETS */ 1675 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1676 SOCKBUF_LOCK(&so->so_rcv); 1677 if (error) { 1678 /* 1679 * If any part of the record has been removed 1680 * (such as the MT_SONAME mbuf, which will 1681 * happen when PR_ADDR, and thus also 1682 * PR_ATOMIC, is set), then drop the entire 1683 * record to maintain the atomicity of the 1684 * receive operation. 1685 */ 1686 if (m && mbuf_removed && 1687 (pr->pr_flags & PR_ATOMIC)) 1688 (void)sbdroprecord_locked(&so->so_rcv); 1689 goto release; 1690 } 1691 } else 1692 uio->uio_resid -= len; 1693 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1694 if (len == m->m_len - moff) { 1695 if (m->m_flags & M_EOR) 1696 flags |= MSG_EOR; 1697 if (flags & MSG_PEEK) { 1698 m = m->m_next; 1699 moff = 0; 1700 } else { 1701 nextrecord = m->m_nextpkt; 1702 sbfree(&so->so_rcv, m); 1703 if (mp != NULL) { 1704 *mp = m; 1705 mp = &m->m_next; 1706 so->so_rcv.sb_mb = m = m->m_next; 1707 *mp = NULL; 1708 } else { 1709 so->so_rcv.sb_mb = m_free(m); 1710 m = so->so_rcv.sb_mb; 1711 } 1712 sockbuf_pushsync(&so->so_rcv, nextrecord); 1713 SBLASTRECORDCHK(&so->so_rcv); 1714 SBLASTMBUFCHK(&so->so_rcv); 1715 } 1716 } else { 1717 if (flags & MSG_PEEK) 1718 moff += len; 1719 else { 1720 if (mp != NULL) { 1721 int copy_flag; 1722 1723 if (flags & MSG_DONTWAIT) 1724 copy_flag = M_DONTWAIT; 1725 else 1726 copy_flag = M_TRYWAIT; 1727 if (copy_flag == M_TRYWAIT) 1728 SOCKBUF_UNLOCK(&so->so_rcv); 1729 *mp = m_copym(m, 0, len, copy_flag); 1730 if (copy_flag == M_TRYWAIT) 1731 SOCKBUF_LOCK(&so->so_rcv); 1732 if (*mp == NULL) { 1733 /* 1734 * m_copym() couldn't 1735 * allocate an mbuf. Adjust 1736 * uio_resid back (it was 1737 * adjusted down by len 1738 * bytes, which we didn't end 1739 * up "copying" over). 1740 */ 1741 uio->uio_resid += len; 1742 break; 1743 } 1744 } 1745 m->m_data += len; 1746 m->m_len -= len; 1747 so->so_rcv.sb_cc -= len; 1748 } 1749 } 1750 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1751 if (so->so_oobmark) { 1752 if ((flags & MSG_PEEK) == 0) { 1753 so->so_oobmark -= len; 1754 if (so->so_oobmark == 0) { 1755 so->so_rcv.sb_state |= SBS_RCVATMARK; 1756 break; 1757 } 1758 } else { 1759 offset += len; 1760 if (offset == so->so_oobmark) 1761 break; 1762 } 1763 } 1764 if (flags & MSG_EOR) 1765 break; 1766 /* 1767 * If the MSG_WAITALL flag is set (for non-atomic socket), we 1768 * must not quit until "uio->uio_resid == 0" or an error 1769 * termination. If a signal/timeout occurs, return with a 1770 * short count but without error. Keep sockbuf locked 1771 * against other readers. 1772 */ 1773 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1774 !sosendallatonce(so) && nextrecord == NULL) { 1775 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1776 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1777 break; 1778 /* 1779 * Notify the protocol that some data has been 1780 * drained before blocking. 1781 */ 1782 if (pr->pr_flags & PR_WANTRCVD) { 1783 SOCKBUF_UNLOCK(&so->so_rcv); 1784 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1785 SOCKBUF_LOCK(&so->so_rcv); 1786 } 1787 SBLASTRECORDCHK(&so->so_rcv); 1788 SBLASTMBUFCHK(&so->so_rcv); 1789 error = sbwait(&so->so_rcv); 1790 if (error) 1791 goto release; 1792 m = so->so_rcv.sb_mb; 1793 if (m != NULL) 1794 nextrecord = m->m_nextpkt; 1795 } 1796 } 1797 1798 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1799 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1800 flags |= MSG_TRUNC; 1801 if ((flags & MSG_PEEK) == 0) 1802 (void) sbdroprecord_locked(&so->so_rcv); 1803 } 1804 if ((flags & MSG_PEEK) == 0) { 1805 if (m == NULL) { 1806 /* 1807 * First part is an inline SB_EMPTY_FIXUP(). Second 1808 * part makes sure sb_lastrecord is up-to-date if 1809 * there is still data in the socket buffer. 1810 */ 1811 so->so_rcv.sb_mb = nextrecord; 1812 if (so->so_rcv.sb_mb == NULL) { 1813 so->so_rcv.sb_mbtail = NULL; 1814 so->so_rcv.sb_lastrecord = NULL; 1815 } else if (nextrecord->m_nextpkt == NULL) 1816 so->so_rcv.sb_lastrecord = nextrecord; 1817 } 1818 SBLASTRECORDCHK(&so->so_rcv); 1819 SBLASTMBUFCHK(&so->so_rcv); 1820 /* 1821 * If soreceive() is being done from the socket callback, 1822 * then don't need to generate ACK to peer to update window, 1823 * since ACK will be generated on return to TCP. 1824 */ 1825 if (!(flags & MSG_SOCALLBCK) && 1826 (pr->pr_flags & PR_WANTRCVD)) { 1827 SOCKBUF_UNLOCK(&so->so_rcv); 1828 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1829 SOCKBUF_LOCK(&so->so_rcv); 1830 } 1831 } 1832 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1833 if (orig_resid == uio->uio_resid && orig_resid && 1834 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1835 sbunlock(&so->so_rcv); 1836 goto restart; 1837 } 1838 1839 if (flagsp != NULL) 1840 *flagsp |= flags; 1841 release: 1842 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1843 sbunlock(&so->so_rcv); 1844 out: 1845 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1846 SOCKBUF_UNLOCK(&so->so_rcv); 1847 return (error); 1848 } 1849 1850 int 1851 soreceive(so, psa, uio, mp0, controlp, flagsp) 1852 struct socket *so; 1853 struct sockaddr **psa; 1854 struct uio *uio; 1855 struct mbuf **mp0; 1856 struct mbuf **controlp; 1857 int *flagsp; 1858 { 1859 1860 /* XXXRW: Temporary debugging. */ 1861 KASSERT(so->so_proto->pr_usrreqs->pru_soreceive != soreceive, 1862 ("soreceive: protocol calls soreceive")); 1863 1864 return (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0, 1865 controlp, flagsp)); 1866 } 1867 1868 int 1869 soshutdown(so, how) 1870 struct socket *so; 1871 int how; 1872 { 1873 struct protosw *pr = so->so_proto; 1874 1875 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1876 return (EINVAL); 1877 1878 if (how != SHUT_WR) 1879 sorflush(so); 1880 if (how != SHUT_RD) 1881 return ((*pr->pr_usrreqs->pru_shutdown)(so)); 1882 return (0); 1883 } 1884 1885 void 1886 sorflush(so) 1887 struct socket *so; 1888 { 1889 struct sockbuf *sb = &so->so_rcv; 1890 struct protosw *pr = so->so_proto; 1891 struct sockbuf asb; 1892 1893 /* 1894 * XXXRW: This is quite ugly. Previously, this code made a copy of 1895 * the socket buffer, then zero'd the original to clear the buffer 1896 * fields. However, with mutexes in the socket buffer, this causes 1897 * problems. We only clear the zeroable bits of the original; 1898 * however, we have to initialize and destroy the mutex in the copy 1899 * so that dom_dispose() and sbrelease() can lock t as needed. 1900 */ 1901 SOCKBUF_LOCK(sb); 1902 sb->sb_flags |= SB_NOINTR; 1903 (void) sblock(sb, M_WAITOK); 1904 /* 1905 * socantrcvmore_locked() drops the socket buffer mutex so that it 1906 * can safely perform wakeups. Re-acquire the mutex before 1907 * continuing. 1908 */ 1909 socantrcvmore_locked(so); 1910 SOCKBUF_LOCK(sb); 1911 sbunlock(sb); 1912 /* 1913 * Invalidate/clear most of the sockbuf structure, but leave selinfo 1914 * and mutex data unchanged. 1915 */ 1916 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 1917 bcopy(&sb->sb_startzero, &asb.sb_startzero, 1918 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1919 bzero(&sb->sb_startzero, 1920 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1921 SOCKBUF_UNLOCK(sb); 1922 1923 SOCKBUF_LOCK_INIT(&asb, "so_rcv"); 1924 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 1925 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 1926 sbrelease(&asb, so); 1927 SOCKBUF_LOCK_DESTROY(&asb); 1928 } 1929 1930 /* 1931 * Perhaps this routine, and sooptcopyout(), below, ought to come in an 1932 * additional variant to handle the case where the option value needs to be 1933 * some kind of integer, but not a specific size. In addition to their use 1934 * here, these functions are also called by the protocol-level pr_ctloutput() 1935 * routines. 1936 */ 1937 int 1938 sooptcopyin(sopt, buf, len, minlen) 1939 struct sockopt *sopt; 1940 void *buf; 1941 size_t len; 1942 size_t minlen; 1943 { 1944 size_t valsize; 1945 1946 /* 1947 * If the user gives us more than we wanted, we ignore it, but if we 1948 * don't get the minimum length the caller wants, we return EINVAL. 1949 * On success, sopt->sopt_valsize is set to however much we actually 1950 * retrieved. 1951 */ 1952 if ((valsize = sopt->sopt_valsize) < minlen) 1953 return EINVAL; 1954 if (valsize > len) 1955 sopt->sopt_valsize = valsize = len; 1956 1957 if (sopt->sopt_td != NULL) 1958 return (copyin(sopt->sopt_val, buf, valsize)); 1959 1960 bcopy(sopt->sopt_val, buf, valsize); 1961 return (0); 1962 } 1963 1964 /* 1965 * Kernel version of setsockopt(2). 1966 * 1967 * XXX: optlen is size_t, not socklen_t 1968 */ 1969 int 1970 so_setsockopt(struct socket *so, int level, int optname, void *optval, 1971 size_t optlen) 1972 { 1973 struct sockopt sopt; 1974 1975 sopt.sopt_level = level; 1976 sopt.sopt_name = optname; 1977 sopt.sopt_dir = SOPT_SET; 1978 sopt.sopt_val = optval; 1979 sopt.sopt_valsize = optlen; 1980 sopt.sopt_td = NULL; 1981 return (sosetopt(so, &sopt)); 1982 } 1983 1984 int 1985 sosetopt(so, sopt) 1986 struct socket *so; 1987 struct sockopt *sopt; 1988 { 1989 int error, optval; 1990 struct linger l; 1991 struct timeval tv; 1992 u_long val; 1993 #ifdef MAC 1994 struct mac extmac; 1995 #endif 1996 1997 error = 0; 1998 if (sopt->sopt_level != SOL_SOCKET) { 1999 if (so->so_proto && so->so_proto->pr_ctloutput) 2000 return ((*so->so_proto->pr_ctloutput) 2001 (so, sopt)); 2002 error = ENOPROTOOPT; 2003 } else { 2004 switch (sopt->sopt_name) { 2005 #ifdef INET 2006 case SO_ACCEPTFILTER: 2007 error = do_setopt_accept_filter(so, sopt); 2008 if (error) 2009 goto bad; 2010 break; 2011 #endif 2012 case SO_LINGER: 2013 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2014 if (error) 2015 goto bad; 2016 2017 SOCK_LOCK(so); 2018 so->so_linger = l.l_linger; 2019 if (l.l_onoff) 2020 so->so_options |= SO_LINGER; 2021 else 2022 so->so_options &= ~SO_LINGER; 2023 SOCK_UNLOCK(so); 2024 break; 2025 2026 case SO_DEBUG: 2027 case SO_KEEPALIVE: 2028 case SO_DONTROUTE: 2029 case SO_USELOOPBACK: 2030 case SO_BROADCAST: 2031 case SO_REUSEADDR: 2032 case SO_REUSEPORT: 2033 case SO_OOBINLINE: 2034 case SO_TIMESTAMP: 2035 case SO_BINTIME: 2036 case SO_NOSIGPIPE: 2037 error = sooptcopyin(sopt, &optval, sizeof optval, 2038 sizeof optval); 2039 if (error) 2040 goto bad; 2041 SOCK_LOCK(so); 2042 if (optval) 2043 so->so_options |= sopt->sopt_name; 2044 else 2045 so->so_options &= ~sopt->sopt_name; 2046 SOCK_UNLOCK(so); 2047 break; 2048 2049 case SO_SNDBUF: 2050 case SO_RCVBUF: 2051 case SO_SNDLOWAT: 2052 case SO_RCVLOWAT: 2053 error = sooptcopyin(sopt, &optval, sizeof optval, 2054 sizeof optval); 2055 if (error) 2056 goto bad; 2057 2058 /* 2059 * Values < 1 make no sense for any of these options, 2060 * so disallow them. 2061 */ 2062 if (optval < 1) { 2063 error = EINVAL; 2064 goto bad; 2065 } 2066 2067 switch (sopt->sopt_name) { 2068 case SO_SNDBUF: 2069 case SO_RCVBUF: 2070 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 2071 &so->so_snd : &so->so_rcv, (u_long)optval, 2072 so, curthread) == 0) { 2073 error = ENOBUFS; 2074 goto bad; 2075 } 2076 break; 2077 2078 /* 2079 * Make sure the low-water is never greater than the 2080 * high-water. 2081 */ 2082 case SO_SNDLOWAT: 2083 SOCKBUF_LOCK(&so->so_snd); 2084 so->so_snd.sb_lowat = 2085 (optval > so->so_snd.sb_hiwat) ? 2086 so->so_snd.sb_hiwat : optval; 2087 SOCKBUF_UNLOCK(&so->so_snd); 2088 break; 2089 case SO_RCVLOWAT: 2090 SOCKBUF_LOCK(&so->so_rcv); 2091 so->so_rcv.sb_lowat = 2092 (optval > so->so_rcv.sb_hiwat) ? 2093 so->so_rcv.sb_hiwat : optval; 2094 SOCKBUF_UNLOCK(&so->so_rcv); 2095 break; 2096 } 2097 break; 2098 2099 case SO_SNDTIMEO: 2100 case SO_RCVTIMEO: 2101 #ifdef COMPAT_IA32 2102 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) { 2103 struct timeval32 tv32; 2104 2105 error = sooptcopyin(sopt, &tv32, sizeof tv32, 2106 sizeof tv32); 2107 CP(tv32, tv, tv_sec); 2108 CP(tv32, tv, tv_usec); 2109 } else 2110 #endif 2111 error = sooptcopyin(sopt, &tv, sizeof tv, 2112 sizeof tv); 2113 if (error) 2114 goto bad; 2115 2116 /* assert(hz > 0); */ 2117 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2118 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2119 error = EDOM; 2120 goto bad; 2121 } 2122 /* assert(tick > 0); */ 2123 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2124 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 2125 if (val > INT_MAX) { 2126 error = EDOM; 2127 goto bad; 2128 } 2129 if (val == 0 && tv.tv_usec != 0) 2130 val = 1; 2131 2132 switch (sopt->sopt_name) { 2133 case SO_SNDTIMEO: 2134 so->so_snd.sb_timeo = val; 2135 break; 2136 case SO_RCVTIMEO: 2137 so->so_rcv.sb_timeo = val; 2138 break; 2139 } 2140 break; 2141 2142 case SO_LABEL: 2143 #ifdef MAC 2144 error = sooptcopyin(sopt, &extmac, sizeof extmac, 2145 sizeof extmac); 2146 if (error) 2147 goto bad; 2148 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 2149 so, &extmac); 2150 #else 2151 error = EOPNOTSUPP; 2152 #endif 2153 break; 2154 2155 default: 2156 error = ENOPROTOOPT; 2157 break; 2158 } 2159 if (error == 0 && so->so_proto != NULL && 2160 so->so_proto->pr_ctloutput != NULL) { 2161 (void) ((*so->so_proto->pr_ctloutput) 2162 (so, sopt)); 2163 } 2164 } 2165 bad: 2166 return (error); 2167 } 2168 2169 /* 2170 * Helper routine for getsockopt. 2171 */ 2172 int 2173 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2174 { 2175 int error; 2176 size_t valsize; 2177 2178 error = 0; 2179 2180 /* 2181 * Documented get behavior is that we always return a value, possibly 2182 * truncated to fit in the user's buffer. Traditional behavior is 2183 * that we always tell the user precisely how much we copied, rather 2184 * than something useful like the total amount we had available for 2185 * her. Note that this interface is not idempotent; the entire 2186 * answer must generated ahead of time. 2187 */ 2188 valsize = min(len, sopt->sopt_valsize); 2189 sopt->sopt_valsize = valsize; 2190 if (sopt->sopt_val != NULL) { 2191 if (sopt->sopt_td != NULL) 2192 error = copyout(buf, sopt->sopt_val, valsize); 2193 else 2194 bcopy(buf, sopt->sopt_val, valsize); 2195 } 2196 return (error); 2197 } 2198 2199 int 2200 sogetopt(so, sopt) 2201 struct socket *so; 2202 struct sockopt *sopt; 2203 { 2204 int error, optval; 2205 struct linger l; 2206 struct timeval tv; 2207 #ifdef MAC 2208 struct mac extmac; 2209 #endif 2210 2211 error = 0; 2212 if (sopt->sopt_level != SOL_SOCKET) { 2213 if (so->so_proto && so->so_proto->pr_ctloutput) { 2214 return ((*so->so_proto->pr_ctloutput) 2215 (so, sopt)); 2216 } else 2217 return (ENOPROTOOPT); 2218 } else { 2219 switch (sopt->sopt_name) { 2220 #ifdef INET 2221 case SO_ACCEPTFILTER: 2222 error = do_getopt_accept_filter(so, sopt); 2223 break; 2224 #endif 2225 case SO_LINGER: 2226 SOCK_LOCK(so); 2227 l.l_onoff = so->so_options & SO_LINGER; 2228 l.l_linger = so->so_linger; 2229 SOCK_UNLOCK(so); 2230 error = sooptcopyout(sopt, &l, sizeof l); 2231 break; 2232 2233 case SO_USELOOPBACK: 2234 case SO_DONTROUTE: 2235 case SO_DEBUG: 2236 case SO_KEEPALIVE: 2237 case SO_REUSEADDR: 2238 case SO_REUSEPORT: 2239 case SO_BROADCAST: 2240 case SO_OOBINLINE: 2241 case SO_ACCEPTCONN: 2242 case SO_TIMESTAMP: 2243 case SO_BINTIME: 2244 case SO_NOSIGPIPE: 2245 optval = so->so_options & sopt->sopt_name; 2246 integer: 2247 error = sooptcopyout(sopt, &optval, sizeof optval); 2248 break; 2249 2250 case SO_TYPE: 2251 optval = so->so_type; 2252 goto integer; 2253 2254 case SO_ERROR: 2255 SOCK_LOCK(so); 2256 optval = so->so_error; 2257 so->so_error = 0; 2258 SOCK_UNLOCK(so); 2259 goto integer; 2260 2261 case SO_SNDBUF: 2262 optval = so->so_snd.sb_hiwat; 2263 goto integer; 2264 2265 case SO_RCVBUF: 2266 optval = so->so_rcv.sb_hiwat; 2267 goto integer; 2268 2269 case SO_SNDLOWAT: 2270 optval = so->so_snd.sb_lowat; 2271 goto integer; 2272 2273 case SO_RCVLOWAT: 2274 optval = so->so_rcv.sb_lowat; 2275 goto integer; 2276 2277 case SO_SNDTIMEO: 2278 case SO_RCVTIMEO: 2279 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2280 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 2281 2282 tv.tv_sec = optval / hz; 2283 tv.tv_usec = (optval % hz) * tick; 2284 #ifdef COMPAT_IA32 2285 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) { 2286 struct timeval32 tv32; 2287 2288 CP(tv, tv32, tv_sec); 2289 CP(tv, tv32, tv_usec); 2290 error = sooptcopyout(sopt, &tv32, sizeof tv32); 2291 } else 2292 #endif 2293 error = sooptcopyout(sopt, &tv, sizeof tv); 2294 break; 2295 2296 case SO_LABEL: 2297 #ifdef MAC 2298 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2299 sizeof(extmac)); 2300 if (error) 2301 return (error); 2302 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 2303 so, &extmac); 2304 if (error) 2305 return (error); 2306 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2307 #else 2308 error = EOPNOTSUPP; 2309 #endif 2310 break; 2311 2312 case SO_PEERLABEL: 2313 #ifdef MAC 2314 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2315 sizeof(extmac)); 2316 if (error) 2317 return (error); 2318 error = mac_getsockopt_peerlabel( 2319 sopt->sopt_td->td_ucred, so, &extmac); 2320 if (error) 2321 return (error); 2322 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2323 #else 2324 error = EOPNOTSUPP; 2325 #endif 2326 break; 2327 2328 case SO_LISTENQLIMIT: 2329 optval = so->so_qlimit; 2330 goto integer; 2331 2332 case SO_LISTENQLEN: 2333 optval = so->so_qlen; 2334 goto integer; 2335 2336 case SO_LISTENINCQLEN: 2337 optval = so->so_incqlen; 2338 goto integer; 2339 2340 default: 2341 error = ENOPROTOOPT; 2342 break; 2343 } 2344 return (error); 2345 } 2346 } 2347 2348 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2349 int 2350 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2351 { 2352 struct mbuf *m, *m_prev; 2353 int sopt_size = sopt->sopt_valsize; 2354 2355 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 2356 if (m == NULL) 2357 return ENOBUFS; 2358 if (sopt_size > MLEN) { 2359 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT); 2360 if ((m->m_flags & M_EXT) == 0) { 2361 m_free(m); 2362 return ENOBUFS; 2363 } 2364 m->m_len = min(MCLBYTES, sopt_size); 2365 } else { 2366 m->m_len = min(MLEN, sopt_size); 2367 } 2368 sopt_size -= m->m_len; 2369 *mp = m; 2370 m_prev = m; 2371 2372 while (sopt_size) { 2373 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 2374 if (m == NULL) { 2375 m_freem(*mp); 2376 return ENOBUFS; 2377 } 2378 if (sopt_size > MLEN) { 2379 MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT : 2380 M_DONTWAIT); 2381 if ((m->m_flags & M_EXT) == 0) { 2382 m_freem(m); 2383 m_freem(*mp); 2384 return ENOBUFS; 2385 } 2386 m->m_len = min(MCLBYTES, sopt_size); 2387 } else { 2388 m->m_len = min(MLEN, sopt_size); 2389 } 2390 sopt_size -= m->m_len; 2391 m_prev->m_next = m; 2392 m_prev = m; 2393 } 2394 return (0); 2395 } 2396 2397 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2398 int 2399 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2400 { 2401 struct mbuf *m0 = m; 2402 2403 if (sopt->sopt_val == NULL) 2404 return (0); 2405 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2406 if (sopt->sopt_td != NULL) { 2407 int error; 2408 2409 error = copyin(sopt->sopt_val, mtod(m, char *), 2410 m->m_len); 2411 if (error != 0) { 2412 m_freem(m0); 2413 return(error); 2414 } 2415 } else 2416 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 2417 sopt->sopt_valsize -= m->m_len; 2418 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2419 m = m->m_next; 2420 } 2421 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2422 panic("ip6_sooptmcopyin"); 2423 return (0); 2424 } 2425 2426 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2427 int 2428 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2429 { 2430 struct mbuf *m0 = m; 2431 size_t valsize = 0; 2432 2433 if (sopt->sopt_val == NULL) 2434 return (0); 2435 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2436 if (sopt->sopt_td != NULL) { 2437 int error; 2438 2439 error = copyout(mtod(m, char *), sopt->sopt_val, 2440 m->m_len); 2441 if (error != 0) { 2442 m_freem(m0); 2443 return(error); 2444 } 2445 } else 2446 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 2447 sopt->sopt_valsize -= m->m_len; 2448 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2449 valsize += m->m_len; 2450 m = m->m_next; 2451 } 2452 if (m != NULL) { 2453 /* enough soopt buffer should be given from user-land */ 2454 m_freem(m0); 2455 return(EINVAL); 2456 } 2457 sopt->sopt_valsize = valsize; 2458 return (0); 2459 } 2460 2461 /* 2462 * sohasoutofband(): protocol notifies socket layer of the arrival of new 2463 * out-of-band data, which will then notify socket consumers. 2464 */ 2465 void 2466 sohasoutofband(so) 2467 struct socket *so; 2468 { 2469 if (so->so_sigio != NULL) 2470 pgsigio(&so->so_sigio, SIGURG, 0); 2471 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 2472 } 2473 2474 int 2475 sopoll(struct socket *so, int events, struct ucred *active_cred, 2476 struct thread *td) 2477 { 2478 2479 /* XXXRW: Temporary debugging. */ 2480 KASSERT(so->so_proto->pr_usrreqs->pru_sopoll != sopoll, 2481 ("sopoll: protocol calls sopoll")); 2482 2483 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred, 2484 td)); 2485 } 2486 2487 int 2488 sopoll_generic(struct socket *so, int events, struct ucred *active_cred, 2489 struct thread *td) 2490 { 2491 int revents = 0; 2492 2493 SOCKBUF_LOCK(&so->so_snd); 2494 SOCKBUF_LOCK(&so->so_rcv); 2495 if (events & (POLLIN | POLLRDNORM)) 2496 if (soreadable(so)) 2497 revents |= events & (POLLIN | POLLRDNORM); 2498 2499 if (events & POLLINIGNEOF) 2500 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || 2501 !TAILQ_EMPTY(&so->so_comp) || so->so_error) 2502 revents |= POLLINIGNEOF; 2503 2504 if (events & (POLLOUT | POLLWRNORM)) 2505 if (sowriteable(so)) 2506 revents |= events & (POLLOUT | POLLWRNORM); 2507 2508 if (events & (POLLPRI | POLLRDBAND)) 2509 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 2510 revents |= events & (POLLPRI | POLLRDBAND); 2511 2512 if (revents == 0) { 2513 if (events & 2514 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | 2515 POLLRDBAND)) { 2516 selrecord(td, &so->so_rcv.sb_sel); 2517 so->so_rcv.sb_flags |= SB_SEL; 2518 } 2519 2520 if (events & (POLLOUT | POLLWRNORM)) { 2521 selrecord(td, &so->so_snd.sb_sel); 2522 so->so_snd.sb_flags |= SB_SEL; 2523 } 2524 } 2525 2526 SOCKBUF_UNLOCK(&so->so_rcv); 2527 SOCKBUF_UNLOCK(&so->so_snd); 2528 return (revents); 2529 } 2530 2531 int 2532 soo_kqfilter(struct file *fp, struct knote *kn) 2533 { 2534 struct socket *so = kn->kn_fp->f_data; 2535 struct sockbuf *sb; 2536 2537 switch (kn->kn_filter) { 2538 case EVFILT_READ: 2539 if (so->so_options & SO_ACCEPTCONN) 2540 kn->kn_fop = &solisten_filtops; 2541 else 2542 kn->kn_fop = &soread_filtops; 2543 sb = &so->so_rcv; 2544 break; 2545 case EVFILT_WRITE: 2546 kn->kn_fop = &sowrite_filtops; 2547 sb = &so->so_snd; 2548 break; 2549 default: 2550 return (EINVAL); 2551 } 2552 2553 SOCKBUF_LOCK(sb); 2554 knlist_add(&sb->sb_sel.si_note, kn, 1); 2555 sb->sb_flags |= SB_KNOTE; 2556 SOCKBUF_UNLOCK(sb); 2557 return (0); 2558 } 2559 2560 static void 2561 filt_sordetach(struct knote *kn) 2562 { 2563 struct socket *so = kn->kn_fp->f_data; 2564 2565 SOCKBUF_LOCK(&so->so_rcv); 2566 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1); 2567 if (knlist_empty(&so->so_rcv.sb_sel.si_note)) 2568 so->so_rcv.sb_flags &= ~SB_KNOTE; 2569 SOCKBUF_UNLOCK(&so->so_rcv); 2570 } 2571 2572 /*ARGSUSED*/ 2573 static int 2574 filt_soread(struct knote *kn, long hint) 2575 { 2576 struct socket *so; 2577 2578 so = kn->kn_fp->f_data; 2579 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2580 2581 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 2582 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2583 kn->kn_flags |= EV_EOF; 2584 kn->kn_fflags = so->so_error; 2585 return (1); 2586 } else if (so->so_error) /* temporary udp error */ 2587 return (1); 2588 else if (kn->kn_sfflags & NOTE_LOWAT) 2589 return (kn->kn_data >= kn->kn_sdata); 2590 else 2591 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 2592 } 2593 2594 static void 2595 filt_sowdetach(struct knote *kn) 2596 { 2597 struct socket *so = kn->kn_fp->f_data; 2598 2599 SOCKBUF_LOCK(&so->so_snd); 2600 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1); 2601 if (knlist_empty(&so->so_snd.sb_sel.si_note)) 2602 so->so_snd.sb_flags &= ~SB_KNOTE; 2603 SOCKBUF_UNLOCK(&so->so_snd); 2604 } 2605 2606 /*ARGSUSED*/ 2607 static int 2608 filt_sowrite(struct knote *kn, long hint) 2609 { 2610 struct socket *so; 2611 2612 so = kn->kn_fp->f_data; 2613 SOCKBUF_LOCK_ASSERT(&so->so_snd); 2614 kn->kn_data = sbspace(&so->so_snd); 2615 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2616 kn->kn_flags |= EV_EOF; 2617 kn->kn_fflags = so->so_error; 2618 return (1); 2619 } else if (so->so_error) /* temporary udp error */ 2620 return (1); 2621 else if (((so->so_state & SS_ISCONNECTED) == 0) && 2622 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2623 return (0); 2624 else if (kn->kn_sfflags & NOTE_LOWAT) 2625 return (kn->kn_data >= kn->kn_sdata); 2626 else 2627 return (kn->kn_data >= so->so_snd.sb_lowat); 2628 } 2629 2630 /*ARGSUSED*/ 2631 static int 2632 filt_solisten(struct knote *kn, long hint) 2633 { 2634 struct socket *so = kn->kn_fp->f_data; 2635 2636 kn->kn_data = so->so_qlen; 2637 return (! TAILQ_EMPTY(&so->so_comp)); 2638 } 2639 2640 int 2641 socheckuid(struct socket *so, uid_t uid) 2642 { 2643 2644 if (so == NULL) 2645 return (EPERM); 2646 if (so->so_cred->cr_uid != uid) 2647 return (EPERM); 2648 return (0); 2649 } 2650 2651 static int 2652 somaxconn_sysctl(SYSCTL_HANDLER_ARGS) 2653 { 2654 int error; 2655 int val; 2656 2657 val = somaxconn; 2658 error = sysctl_handle_int(oidp, &val, sizeof(int), req); 2659 if (error || !req->newptr ) 2660 return (error); 2661 2662 if (val < 1 || val > USHRT_MAX) 2663 return (EINVAL); 2664 2665 somaxconn = val; 2666 return (0); 2667 } 2668