1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2004 The FreeBSD Foundation 5 * Copyright (c) 2004-2006 Robert N. M. Watson 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 32 */ 33 34 /* 35 * Comments on the socket life cycle: 36 * 37 * soalloc() sets of socket layer state for a socket, called only by 38 * socreate() and sonewconn(). Socket layer private. 39 * 40 * sdealloc() tears down socket layer state for a socket, called only by 41 * sofree() and sonewconn(). Socket layer private. 42 * 43 * pru_attach() associates protocol layer state with an allocated socket; 44 * called only once, may fail, aborting socket allocation. This is called 45 * from socreate() and sonewconn(). Socket layer private. 46 * 47 * pru_detach() disassociates protocol layer state from an attached socket, 48 * and will be called exactly once for sockets in which pru_attach() has 49 * been successfully called. If pru_attach() returned an error, 50 * pru_detach() will not be called. Socket layer private. 51 * 52 * socreate() creates a socket and attaches protocol state. This is a public 53 * interface that may be used by socket layer consumers to create new 54 * sockets. 55 * 56 * sonewconn() creates a socket and attaches protocol state. This is a 57 * public interface that may be used by protocols to create new sockets when 58 * a new connection is received and will be available for accept() on a 59 * listen socket. 60 * 61 * soclose() destroys a socket after possibly waiting for it to disconnect. 62 * This is a public interface that socket consumers should use to close and 63 * release a socket when done with it. 64 * 65 * soabort() destroys a socket without waiting for it to disconnect (used 66 * only for incoming connections that are already partially or fully 67 * connected). This is used internally by the socket layer when clearing 68 * listen socket queues (due to overflow or close on the listen socket), but 69 * is also a public interface protocols may use to abort connections in 70 * their incomplete listen queues should they no longer be required. Sockets 71 * placed in completed connection listen queues should not be aborted. 72 * 73 * sofree() will free a socket and its protocol state if all references on 74 * the socket have been released, and is the public interface to attempt to 75 * free a socket when a reference is removed. This is a socket layer private 76 * interface. 77 * 78 * NOTE: In addition to socreate() and soclose(), which provide a single 79 * socket reference to the consumer to be managed as required, there are two 80 * calls to explicitly manage socket references, soref(), and sorele(). 81 * Currently, these are generally required only when transitioning a socket 82 * from a listen queue to a file descriptor, in order to prevent garbage 83 * collection of the socket at an untimely moment. For a number of reasons, 84 * these interfaces are not preferred, and should be avoided. 85 * 86 * XXXRW: The behavior of sockets after soclose() but before the last 87 * sorele() is poorly defined. We can probably entirely eliminate them with 88 * a little work, since consumers are managing references anyway. 89 */ 90 91 #include <sys/cdefs.h> 92 __FBSDID("$FreeBSD$"); 93 94 #include "opt_inet.h" 95 #include "opt_mac.h" 96 #include "opt_zero.h" 97 #include "opt_compat.h" 98 99 #include <sys/param.h> 100 #include <sys/systm.h> 101 #include <sys/fcntl.h> 102 #include <sys/limits.h> 103 #include <sys/lock.h> 104 #include <sys/mac.h> 105 #include <sys/malloc.h> 106 #include <sys/mbuf.h> 107 #include <sys/mutex.h> 108 #include <sys/domain.h> 109 #include <sys/file.h> /* for struct knote */ 110 #include <sys/kernel.h> 111 #include <sys/event.h> 112 #include <sys/poll.h> 113 #include <sys/proc.h> 114 #include <sys/protosw.h> 115 #include <sys/socket.h> 116 #include <sys/socketvar.h> 117 #include <sys/resourcevar.h> 118 #include <sys/signalvar.h> 119 #include <sys/sysctl.h> 120 #include <sys/uio.h> 121 #include <sys/jail.h> 122 123 #include <vm/uma.h> 124 125 #ifdef COMPAT_IA32 126 #include <sys/mount.h> 127 #include <compat/freebsd32/freebsd32.h> 128 129 extern struct sysentvec ia32_freebsd_sysvec; 130 #endif 131 132 static int soreceive_rcvoob(struct socket *so, struct uio *uio, 133 int flags); 134 135 static void filt_sordetach(struct knote *kn); 136 static int filt_soread(struct knote *kn, long hint); 137 static void filt_sowdetach(struct knote *kn); 138 static int filt_sowrite(struct knote *kn, long hint); 139 static int filt_solisten(struct knote *kn, long hint); 140 141 static struct filterops solisten_filtops = 142 { 1, NULL, filt_sordetach, filt_solisten }; 143 static struct filterops soread_filtops = 144 { 1, NULL, filt_sordetach, filt_soread }; 145 static struct filterops sowrite_filtops = 146 { 1, NULL, filt_sowdetach, filt_sowrite }; 147 148 uma_zone_t socket_zone; 149 so_gen_t so_gencnt; /* generation count for sockets */ 150 151 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 152 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 153 154 SYSCTL_DECL(_kern_ipc); 155 156 static int somaxconn = SOMAXCONN; 157 static int somaxconn_sysctl(SYSCTL_HANDLER_ARGS); 158 /* XXX: we dont have SYSCTL_USHORT */ 159 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW, 160 0, sizeof(int), somaxconn_sysctl, "I", "Maximum pending socket connection " 161 "queue size"); 162 static int numopensockets; 163 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 164 &numopensockets, 0, "Number of open sockets"); 165 #ifdef ZERO_COPY_SOCKETS 166 /* These aren't static because they're used in other files. */ 167 int so_zero_copy_send = 1; 168 int so_zero_copy_receive = 1; 169 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 170 "Zero copy controls"); 171 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 172 &so_zero_copy_receive, 0, "Enable zero copy receive"); 173 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 174 &so_zero_copy_send, 0, "Enable zero copy send"); 175 #endif /* ZERO_COPY_SOCKETS */ 176 177 /* 178 * accept_mtx locks down per-socket fields relating to accept queues. See 179 * socketvar.h for an annotation of the protected fields of struct socket. 180 */ 181 struct mtx accept_mtx; 182 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 183 184 /* 185 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 186 * so_gencnt field. 187 */ 188 static struct mtx so_global_mtx; 189 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 190 191 /* 192 * Socket operation routines. 193 * These routines are called by the routines in 194 * sys_socket.c or from a system process, and 195 * implement the semantics of socket operations by 196 * switching out to the protocol specific routines. 197 */ 198 199 /* 200 * Get a socket structure from our zone, and initialize it. 201 * Note that it would probably be better to allocate socket 202 * and PCB at the same time, but I'm not convinced that all 203 * the protocols can be easily modified to do this. 204 * 205 * soalloc() returns a socket with a ref count of 0. 206 */ 207 struct socket * 208 soalloc(int mflags) 209 { 210 struct socket *so; 211 212 so = uma_zalloc(socket_zone, mflags | M_ZERO); 213 if (so == NULL) 214 return (NULL); 215 #ifdef MAC 216 if (mac_init_socket(so, mflags) != 0) { 217 uma_zfree(socket_zone, so); 218 return (NULL); 219 } 220 #endif 221 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 222 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 223 TAILQ_INIT(&so->so_aiojobq); 224 mtx_lock(&so_global_mtx); 225 so->so_gencnt = ++so_gencnt; 226 ++numopensockets; 227 mtx_unlock(&so_global_mtx); 228 return (so); 229 } 230 231 /* 232 * socreate returns a socket with a ref count of 1. The socket should be 233 * closed with soclose(). 234 */ 235 int 236 socreate(dom, aso, type, proto, cred, td) 237 int dom; 238 struct socket **aso; 239 int type; 240 int proto; 241 struct ucred *cred; 242 struct thread *td; 243 { 244 struct protosw *prp; 245 struct socket *so; 246 int error; 247 248 if (proto) 249 prp = pffindproto(dom, proto, type); 250 else 251 prp = pffindtype(dom, type); 252 253 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL || 254 prp->pr_usrreqs->pru_attach == pru_attach_notsupp) 255 return (EPROTONOSUPPORT); 256 257 if (jailed(cred) && jail_socket_unixiproute_only && 258 prp->pr_domain->dom_family != PF_LOCAL && 259 prp->pr_domain->dom_family != PF_INET && 260 prp->pr_domain->dom_family != PF_ROUTE) { 261 return (EPROTONOSUPPORT); 262 } 263 264 if (prp->pr_type != type) 265 return (EPROTOTYPE); 266 so = soalloc(M_WAITOK); 267 if (so == NULL) 268 return (ENOBUFS); 269 270 TAILQ_INIT(&so->so_incomp); 271 TAILQ_INIT(&so->so_comp); 272 so->so_type = type; 273 so->so_cred = crhold(cred); 274 so->so_proto = prp; 275 #ifdef MAC 276 mac_create_socket(cred, so); 277 #endif 278 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), 279 NULL, NULL, NULL); 280 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), 281 NULL, NULL, NULL); 282 so->so_count = 1; 283 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 284 if (error) { 285 ACCEPT_LOCK(); 286 SOCK_LOCK(so); 287 so->so_state |= SS_NOFDREF; 288 sorele(so); 289 return (error); 290 } 291 *aso = so; 292 return (0); 293 } 294 295 int 296 sobind(so, nam, td) 297 struct socket *so; 298 struct sockaddr *nam; 299 struct thread *td; 300 { 301 302 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td)); 303 } 304 305 void 306 sodealloc(struct socket *so) 307 { 308 309 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 310 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); 311 312 mtx_lock(&so_global_mtx); 313 so->so_gencnt = ++so_gencnt; 314 mtx_unlock(&so_global_mtx); 315 if (so->so_rcv.sb_hiwat) 316 (void)chgsbsize(so->so_cred->cr_uidinfo, 317 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 318 if (so->so_snd.sb_hiwat) 319 (void)chgsbsize(so->so_cred->cr_uidinfo, 320 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 321 #ifdef INET 322 /* remove acccept filter if one is present. */ 323 if (so->so_accf != NULL) 324 do_setopt_accept_filter(so, NULL); 325 #endif 326 #ifdef MAC 327 mac_destroy_socket(so); 328 #endif 329 crfree(so->so_cred); 330 SOCKBUF_LOCK_DESTROY(&so->so_snd); 331 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 332 uma_zfree(socket_zone, so); 333 mtx_lock(&so_global_mtx); 334 --numopensockets; 335 mtx_unlock(&so_global_mtx); 336 } 337 338 /* 339 * solisten() transitions a socket from a non-listening state to a listening 340 * state, but can also be used to update the listen queue depth on an 341 * existing listen socket. The protocol will call back into the sockets 342 * layer using solisten_proto_check() and solisten_proto() to check and set 343 * socket-layer listen state. Call backs are used so that the protocol can 344 * acquire both protocol and socket layer locks in whatever order is required 345 * by the protocol. 346 * 347 * Protocol implementors are advised to hold the socket lock across the 348 * socket-layer test and set to avoid races at the socket layer. 349 */ 350 int 351 solisten(so, backlog, td) 352 struct socket *so; 353 int backlog; 354 struct thread *td; 355 { 356 357 return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td)); 358 } 359 360 int 361 solisten_proto_check(so) 362 struct socket *so; 363 { 364 365 SOCK_LOCK_ASSERT(so); 366 367 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 368 SS_ISDISCONNECTING)) 369 return (EINVAL); 370 return (0); 371 } 372 373 void 374 solisten_proto(so, backlog) 375 struct socket *so; 376 int backlog; 377 { 378 379 SOCK_LOCK_ASSERT(so); 380 381 if (backlog < 0 || backlog > somaxconn) 382 backlog = somaxconn; 383 so->so_qlimit = backlog; 384 so->so_options |= SO_ACCEPTCONN; 385 } 386 387 /* 388 * Attempt to free a socket. This should really be sotryfree(). 389 * 390 * sofree() will succeed if: 391 * 392 * - There are no outstanding file descriptor references or related consumers 393 * (so_count == 0). 394 * 395 * - The socket has been closed by user space, if ever open (SS_NOFDREF). 396 * 397 * - The protocol does not have an outstanding strong reference on the socket 398 * (SS_PROTOREF). 399 * 400 * - The socket is not in a completed connection queue, so a process has been 401 * notified that it is present. If it is removed, the user process may 402 * block in accept() despite select() saying the socket was ready. 403 * 404 * Otherwise, it will quietly abort so that a future call to sofree(), when 405 * conditions are right, can succeed. 406 */ 407 void 408 sofree(so) 409 struct socket *so; 410 { 411 struct socket *head; 412 413 ACCEPT_LOCK_ASSERT(); 414 SOCK_LOCK_ASSERT(so); 415 416 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 || 417 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) { 418 SOCK_UNLOCK(so); 419 ACCEPT_UNLOCK(); 420 return; 421 } 422 423 head = so->so_head; 424 if (head != NULL) { 425 KASSERT((so->so_qstate & SQ_COMP) != 0 || 426 (so->so_qstate & SQ_INCOMP) != 0, 427 ("sofree: so_head != NULL, but neither SQ_COMP nor " 428 "SQ_INCOMP")); 429 KASSERT((so->so_qstate & SQ_COMP) == 0 || 430 (so->so_qstate & SQ_INCOMP) == 0, 431 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 432 TAILQ_REMOVE(&head->so_incomp, so, so_list); 433 head->so_incqlen--; 434 so->so_qstate &= ~SQ_INCOMP; 435 so->so_head = NULL; 436 } 437 KASSERT((so->so_qstate & SQ_COMP) == 0 && 438 (so->so_qstate & SQ_INCOMP) == 0, 439 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 440 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 441 SOCK_UNLOCK(so); 442 ACCEPT_UNLOCK(); 443 444 SOCKBUF_LOCK(&so->so_snd); 445 so->so_snd.sb_flags |= SB_NOINTR; 446 (void)sblock(&so->so_snd, M_WAITOK); 447 /* 448 * socantsendmore_locked() drops the socket buffer mutex so that it 449 * can safely perform wakeups. Re-acquire the mutex before 450 * continuing. 451 */ 452 socantsendmore_locked(so); 453 SOCKBUF_LOCK(&so->so_snd); 454 sbunlock(&so->so_snd); 455 sbrelease_locked(&so->so_snd, so); 456 SOCKBUF_UNLOCK(&so->so_snd); 457 sorflush(so); 458 knlist_destroy(&so->so_rcv.sb_sel.si_note); 459 knlist_destroy(&so->so_snd.sb_sel.si_note); 460 sodealloc(so); 461 } 462 463 /* 464 * Close a socket on last file table reference removal. 465 * Initiate disconnect if connected. 466 * Free socket when disconnect complete. 467 * 468 * This function will sorele() the socket. Note that soclose() may be 469 * called prior to the ref count reaching zero. The actual socket 470 * structure will not be freed until the ref count reaches zero. 471 */ 472 int 473 soclose(so) 474 struct socket *so; 475 { 476 int error = 0; 477 478 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); 479 480 funsetown(&so->so_sigio); 481 if (so->so_options & SO_ACCEPTCONN) { 482 struct socket *sp; 483 ACCEPT_LOCK(); 484 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 485 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 486 so->so_incqlen--; 487 sp->so_qstate &= ~SQ_INCOMP; 488 sp->so_head = NULL; 489 ACCEPT_UNLOCK(); 490 soabort(sp); 491 ACCEPT_LOCK(); 492 } 493 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 494 TAILQ_REMOVE(&so->so_comp, sp, so_list); 495 so->so_qlen--; 496 sp->so_qstate &= ~SQ_COMP; 497 sp->so_head = NULL; 498 ACCEPT_UNLOCK(); 499 soabort(sp); 500 ACCEPT_LOCK(); 501 } 502 ACCEPT_UNLOCK(); 503 } 504 if (so->so_state & SS_ISCONNECTED) { 505 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 506 error = sodisconnect(so); 507 if (error) 508 goto drop; 509 } 510 if (so->so_options & SO_LINGER) { 511 if ((so->so_state & SS_ISDISCONNECTING) && 512 (so->so_state & SS_NBIO)) 513 goto drop; 514 while (so->so_state & SS_ISCONNECTED) { 515 error = tsleep(&so->so_timeo, 516 PSOCK | PCATCH, "soclos", so->so_linger * hz); 517 if (error) 518 break; 519 } 520 } 521 } 522 523 drop: 524 (*so->so_proto->pr_usrreqs->pru_detach)(so); 525 ACCEPT_LOCK(); 526 SOCK_LOCK(so); 527 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 528 so->so_state |= SS_NOFDREF; 529 sorele(so); 530 return (error); 531 } 532 533 /* 534 * soabort() allows the socket code or protocol code to detach a socket that 535 * has been in an incomplete or completed listen queue, but has not yet been 536 * accepted. 537 * 538 * This interface is tricky, because it is called on an unreferenced socket, 539 * and must be called only by a thread that has actually removed the socket 540 * from the listen queue it was on, or races with other threads are risked. 541 * 542 * This interface will call into the protocol code, so must not be called 543 * with any socket locks held. Protocols do call it while holding their own 544 * recursible protocol mutexes, but this is something that should be subject 545 * to review in the future. 546 * 547 * XXXRW: Why do we maintain a distinction between pru_abort() and 548 * pru_detach()? 549 */ 550 void 551 soabort(so) 552 struct socket *so; 553 { 554 555 /* 556 * In as much as is possible, assert that no references to this 557 * socket are held. This is not quite the same as asserting that the 558 * current thread is responsible for arranging for no references, but 559 * is as close as we can get for now. 560 */ 561 KASSERT(so->so_count == 0, ("soabort: so_count")); 562 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF")); 563 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF")); 564 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP")); 565 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP")); 566 567 (*so->so_proto->pr_usrreqs->pru_abort)(so); 568 ACCEPT_LOCK(); 569 SOCK_LOCK(so); 570 sofree(so); 571 } 572 573 int 574 soaccept(so, nam) 575 struct socket *so; 576 struct sockaddr **nam; 577 { 578 int error; 579 580 SOCK_LOCK(so); 581 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 582 so->so_state &= ~SS_NOFDREF; 583 SOCK_UNLOCK(so); 584 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 585 return (error); 586 } 587 588 int 589 soconnect(so, nam, td) 590 struct socket *so; 591 struct sockaddr *nam; 592 struct thread *td; 593 { 594 int error; 595 596 if (so->so_options & SO_ACCEPTCONN) 597 return (EOPNOTSUPP); 598 /* 599 * If protocol is connection-based, can only connect once. 600 * Otherwise, if connected, try to disconnect first. 601 * This allows user to disconnect by connecting to, e.g., 602 * a null address. 603 */ 604 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 605 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 606 (error = sodisconnect(so)))) { 607 error = EISCONN; 608 } else { 609 /* 610 * Prevent accumulated error from previous connection 611 * from biting us. 612 */ 613 so->so_error = 0; 614 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 615 } 616 617 return (error); 618 } 619 620 int 621 soconnect2(so1, so2) 622 struct socket *so1; 623 struct socket *so2; 624 { 625 626 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2)); 627 } 628 629 int 630 sodisconnect(so) 631 struct socket *so; 632 { 633 int error; 634 635 if ((so->so_state & SS_ISCONNECTED) == 0) 636 return (ENOTCONN); 637 if (so->so_state & SS_ISDISCONNECTING) 638 return (EALREADY); 639 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 640 return (error); 641 } 642 643 #ifdef ZERO_COPY_SOCKETS 644 struct so_zerocopy_stats{ 645 int size_ok; 646 int align_ok; 647 int found_ifp; 648 }; 649 struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 650 #include <netinet/in.h> 651 #include <net/route.h> 652 #include <netinet/in_pcb.h> 653 #include <vm/vm.h> 654 #include <vm/vm_page.h> 655 #include <vm/vm_object.h> 656 #endif /*ZERO_COPY_SOCKETS*/ 657 658 /* 659 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or 660 * all of the data referenced by the uio. If desired, it uses zero-copy. 661 * *space will be updated to reflect data copied in. 662 * 663 * NB: If atomic I/O is requested, the caller must already have checked that 664 * space can hold resid bytes. 665 * 666 * NB: In the event of an error, the caller may need to free the partial 667 * chain pointed to by *mpp. The contents of both *uio and *space may be 668 * modified even in the case of an error. 669 */ 670 static int 671 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space, 672 int flags) 673 { 674 struct mbuf *m, **mp, *top; 675 long len, resid; 676 int error; 677 #ifdef ZERO_COPY_SOCKETS 678 int cow_send; 679 #endif 680 681 *retmp = top = NULL; 682 mp = ⊤ 683 len = 0; 684 resid = uio->uio_resid; 685 error = 0; 686 do { 687 #ifdef ZERO_COPY_SOCKETS 688 cow_send = 0; 689 #endif /* ZERO_COPY_SOCKETS */ 690 if (resid >= MINCLSIZE) { 691 #ifdef ZERO_COPY_SOCKETS 692 if (top == NULL) { 693 MGETHDR(m, M_TRYWAIT, MT_DATA); 694 if (m == NULL) { 695 error = ENOBUFS; 696 goto out; 697 } 698 m->m_pkthdr.len = 0; 699 m->m_pkthdr.rcvif = NULL; 700 } else { 701 MGET(m, M_TRYWAIT, MT_DATA); 702 if (m == NULL) { 703 error = ENOBUFS; 704 goto out; 705 } 706 } 707 if (so_zero_copy_send && 708 resid>=PAGE_SIZE && 709 *space>=PAGE_SIZE && 710 uio->uio_iov->iov_len>=PAGE_SIZE) { 711 so_zerocp_stats.size_ok++; 712 so_zerocp_stats.align_ok++; 713 cow_send = socow_setup(m, uio); 714 len = cow_send; 715 } 716 if (!cow_send) { 717 MCLGET(m, M_TRYWAIT); 718 if ((m->m_flags & M_EXT) == 0) { 719 m_free(m); 720 m = NULL; 721 } else { 722 len = min(min(MCLBYTES, resid), 723 *space); 724 } 725 } 726 #else /* ZERO_COPY_SOCKETS */ 727 if (top == NULL) { 728 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 729 m->m_pkthdr.len = 0; 730 m->m_pkthdr.rcvif = NULL; 731 } else 732 m = m_getcl(M_TRYWAIT, MT_DATA, 0); 733 len = min(min(MCLBYTES, resid), *space); 734 #endif /* ZERO_COPY_SOCKETS */ 735 } else { 736 if (top == NULL) { 737 m = m_gethdr(M_TRYWAIT, MT_DATA); 738 m->m_pkthdr.len = 0; 739 m->m_pkthdr.rcvif = NULL; 740 741 len = min(min(MHLEN, resid), *space); 742 /* 743 * For datagram protocols, leave room 744 * for protocol headers in first mbuf. 745 */ 746 if (atomic && m && len < MHLEN) 747 MH_ALIGN(m, len); 748 } else { 749 m = m_get(M_TRYWAIT, MT_DATA); 750 len = min(min(MLEN, resid), *space); 751 } 752 } 753 if (m == NULL) { 754 error = ENOBUFS; 755 goto out; 756 } 757 758 *space -= len; 759 #ifdef ZERO_COPY_SOCKETS 760 if (cow_send) 761 error = 0; 762 else 763 #endif /* ZERO_COPY_SOCKETS */ 764 error = uiomove(mtod(m, void *), (int)len, uio); 765 resid = uio->uio_resid; 766 m->m_len = len; 767 *mp = m; 768 top->m_pkthdr.len += len; 769 if (error) 770 goto out; 771 mp = &m->m_next; 772 if (resid <= 0) { 773 if (flags & MSG_EOR) 774 top->m_flags |= M_EOR; 775 break; 776 } 777 } while (*space > 0 && atomic); 778 out: 779 *retmp = top; 780 return (error); 781 } 782 783 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 784 785 int 786 sosend_dgram(so, addr, uio, top, control, flags, td) 787 struct socket *so; 788 struct sockaddr *addr; 789 struct uio *uio; 790 struct mbuf *top; 791 struct mbuf *control; 792 int flags; 793 struct thread *td; 794 { 795 long space, resid; 796 int clen = 0, error, dontroute; 797 int atomic = sosendallatonce(so) || top; 798 799 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM")); 800 KASSERT(so->so_proto->pr_flags & PR_ATOMIC, 801 ("sodgram_send: !PR_ATOMIC")); 802 803 if (uio != NULL) 804 resid = uio->uio_resid; 805 else 806 resid = top->m_pkthdr.len; 807 /* 808 * In theory resid should be unsigned. 809 * However, space must be signed, as it might be less than 0 810 * if we over-committed, and we must use a signed comparison 811 * of space and resid. On the other hand, a negative resid 812 * causes us to loop sending 0-length segments to the protocol. 813 * 814 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 815 * type sockets since that's an error. 816 */ 817 if (resid < 0) { 818 error = EINVAL; 819 goto out; 820 } 821 822 dontroute = 823 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; 824 if (td != NULL) 825 td->td_proc->p_stats->p_ru.ru_msgsnd++; 826 if (control != NULL) 827 clen = control->m_len; 828 829 SOCKBUF_LOCK(&so->so_snd); 830 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 831 SOCKBUF_UNLOCK(&so->so_snd); 832 error = EPIPE; 833 goto out; 834 } 835 if (so->so_error) { 836 error = so->so_error; 837 so->so_error = 0; 838 SOCKBUF_UNLOCK(&so->so_snd); 839 goto out; 840 } 841 if ((so->so_state & SS_ISCONNECTED) == 0) { 842 /* 843 * `sendto' and `sendmsg' is allowed on a connection- 844 * based socket if it supports implied connect. 845 * Return ENOTCONN if not connected and no address is 846 * supplied. 847 */ 848 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 849 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 850 if ((so->so_state & SS_ISCONFIRMING) == 0 && 851 !(resid == 0 && clen != 0)) { 852 SOCKBUF_UNLOCK(&so->so_snd); 853 error = ENOTCONN; 854 goto out; 855 } 856 } else if (addr == NULL) { 857 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 858 error = ENOTCONN; 859 else 860 error = EDESTADDRREQ; 861 SOCKBUF_UNLOCK(&so->so_snd); 862 goto out; 863 } 864 } 865 866 /* 867 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a 868 * problem and need fixing. 869 */ 870 space = sbspace(&so->so_snd); 871 if (flags & MSG_OOB) 872 space += 1024; 873 space -= clen; 874 if (resid > space) { 875 error = EMSGSIZE; 876 goto out; 877 } 878 SOCKBUF_UNLOCK(&so->so_snd); 879 if (uio == NULL) { 880 resid = 0; 881 if (flags & MSG_EOR) 882 top->m_flags |= M_EOR; 883 } else { 884 error = sosend_copyin(uio, &top, atomic, &space, flags); 885 if (error) 886 goto out; 887 resid = uio->uio_resid; 888 } 889 KASSERT(resid == 0, ("sosend_dgram: resid != 0")); 890 /* 891 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock 892 * than with. 893 */ 894 if (dontroute) { 895 SOCK_LOCK(so); 896 so->so_options |= SO_DONTROUTE; 897 SOCK_UNLOCK(so); 898 } 899 /* 900 * XXX all the SBS_CANTSENDMORE checks previously 901 * done could be out of date. We could have recieved 902 * a reset packet in an interrupt or maybe we slept 903 * while doing page faults in uiomove() etc. We could 904 * probably recheck again inside the locking protection 905 * here, but there are probably other places that this 906 * also happens. We must rethink this. 907 */ 908 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 909 (flags & MSG_OOB) ? PRUS_OOB : 910 /* 911 * If the user set MSG_EOF, the protocol 912 * understands this flag and nothing left to 913 * send then use PRU_SEND_EOF instead of PRU_SEND. 914 */ 915 ((flags & MSG_EOF) && 916 (so->so_proto->pr_flags & PR_IMPLOPCL) && 917 (resid <= 0)) ? 918 PRUS_EOF : 919 /* If there is more to send set PRUS_MORETOCOME */ 920 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 921 top, addr, control, td); 922 if (dontroute) { 923 SOCK_LOCK(so); 924 so->so_options &= ~SO_DONTROUTE; 925 SOCK_UNLOCK(so); 926 } 927 clen = 0; 928 control = NULL; 929 top = NULL; 930 out: 931 if (top != NULL) 932 m_freem(top); 933 if (control != NULL) 934 m_freem(control); 935 return (error); 936 } 937 938 /* 939 * Send on a socket. 940 * If send must go all at once and message is larger than 941 * send buffering, then hard error. 942 * Lock against other senders. 943 * If must go all at once and not enough room now, then 944 * inform user that this would block and do nothing. 945 * Otherwise, if nonblocking, send as much as possible. 946 * The data to be sent is described by "uio" if nonzero, 947 * otherwise by the mbuf chain "top" (which must be null 948 * if uio is not). Data provided in mbuf chain must be small 949 * enough to send all at once. 950 * 951 * Returns nonzero on error, timeout or signal; callers 952 * must check for short counts if EINTR/ERESTART are returned. 953 * Data and control buffers are freed on return. 954 */ 955 #define snderr(errno) { error = (errno); goto release; } 956 int 957 sosend(so, addr, uio, top, control, flags, td) 958 struct socket *so; 959 struct sockaddr *addr; 960 struct uio *uio; 961 struct mbuf *top; 962 struct mbuf *control; 963 int flags; 964 struct thread *td; 965 { 966 long space, resid; 967 int clen = 0, error, dontroute; 968 int atomic = sosendallatonce(so) || top; 969 970 if (uio != NULL) 971 resid = uio->uio_resid; 972 else 973 resid = top->m_pkthdr.len; 974 /* 975 * In theory resid should be unsigned. 976 * However, space must be signed, as it might be less than 0 977 * if we over-committed, and we must use a signed comparison 978 * of space and resid. On the other hand, a negative resid 979 * causes us to loop sending 0-length segments to the protocol. 980 * 981 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 982 * type sockets since that's an error. 983 */ 984 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 985 error = EINVAL; 986 goto out; 987 } 988 989 dontroute = 990 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 991 (so->so_proto->pr_flags & PR_ATOMIC); 992 if (td != NULL) 993 td->td_proc->p_stats->p_ru.ru_msgsnd++; 994 if (control != NULL) 995 clen = control->m_len; 996 997 SOCKBUF_LOCK(&so->so_snd); 998 restart: 999 SOCKBUF_LOCK_ASSERT(&so->so_snd); 1000 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1001 if (error) 1002 goto out_locked; 1003 do { 1004 SOCKBUF_LOCK_ASSERT(&so->so_snd); 1005 if (so->so_snd.sb_state & SBS_CANTSENDMORE) 1006 snderr(EPIPE); 1007 if (so->so_error) { 1008 error = so->so_error; 1009 so->so_error = 0; 1010 goto release; 1011 } 1012 if ((so->so_state & SS_ISCONNECTED) == 0) { 1013 /* 1014 * `sendto' and `sendmsg' is allowed on a connection- 1015 * based socket if it supports implied connect. 1016 * Return ENOTCONN if not connected and no address is 1017 * supplied. 1018 */ 1019 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1020 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1021 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1022 !(resid == 0 && clen != 0)) 1023 snderr(ENOTCONN); 1024 } else if (addr == NULL) 1025 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 1026 ENOTCONN : EDESTADDRREQ); 1027 } 1028 space = sbspace(&so->so_snd); 1029 if (flags & MSG_OOB) 1030 space += 1024; 1031 if ((atomic && resid > so->so_snd.sb_hiwat) || 1032 clen > so->so_snd.sb_hiwat) 1033 snderr(EMSGSIZE); 1034 if (space < resid + clen && 1035 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 1036 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) 1037 snderr(EWOULDBLOCK); 1038 sbunlock(&so->so_snd); 1039 error = sbwait(&so->so_snd); 1040 if (error) 1041 goto out_locked; 1042 goto restart; 1043 } 1044 SOCKBUF_UNLOCK(&so->so_snd); 1045 space -= clen; 1046 do { 1047 if (uio == NULL) { 1048 resid = 0; 1049 if (flags & MSG_EOR) 1050 top->m_flags |= M_EOR; 1051 } else { 1052 error = sosend_copyin(uio, &top, atomic, 1053 &space, flags); 1054 if (error != 0) { 1055 SOCKBUF_LOCK(&so->so_snd); 1056 goto release; 1057 } 1058 resid = uio->uio_resid; 1059 } 1060 if (dontroute) { 1061 SOCK_LOCK(so); 1062 so->so_options |= SO_DONTROUTE; 1063 SOCK_UNLOCK(so); 1064 } 1065 /* 1066 * XXX all the SBS_CANTSENDMORE checks previously 1067 * done could be out of date. We could have recieved 1068 * a reset packet in an interrupt or maybe we slept 1069 * while doing page faults in uiomove() etc. We could 1070 * probably recheck again inside the locking protection 1071 * here, but there are probably other places that this 1072 * also happens. We must rethink this. 1073 */ 1074 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1075 (flags & MSG_OOB) ? PRUS_OOB : 1076 /* 1077 * If the user set MSG_EOF, the protocol 1078 * understands this flag and nothing left to 1079 * send then use PRU_SEND_EOF instead of PRU_SEND. 1080 */ 1081 ((flags & MSG_EOF) && 1082 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1083 (resid <= 0)) ? 1084 PRUS_EOF : 1085 /* If there is more to send set PRUS_MORETOCOME */ 1086 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1087 top, addr, control, td); 1088 if (dontroute) { 1089 SOCK_LOCK(so); 1090 so->so_options &= ~SO_DONTROUTE; 1091 SOCK_UNLOCK(so); 1092 } 1093 clen = 0; 1094 control = NULL; 1095 top = NULL; 1096 if (error) { 1097 SOCKBUF_LOCK(&so->so_snd); 1098 goto release; 1099 } 1100 } while (resid && space > 0); 1101 SOCKBUF_LOCK(&so->so_snd); 1102 } while (resid); 1103 1104 release: 1105 SOCKBUF_LOCK_ASSERT(&so->so_snd); 1106 sbunlock(&so->so_snd); 1107 out_locked: 1108 SOCKBUF_LOCK_ASSERT(&so->so_snd); 1109 SOCKBUF_UNLOCK(&so->so_snd); 1110 out: 1111 if (top != NULL) 1112 m_freem(top); 1113 if (control != NULL) 1114 m_freem(control); 1115 return (error); 1116 } 1117 #undef snderr 1118 1119 /* 1120 * The part of soreceive() that implements reading non-inline out-of-band 1121 * data from a socket. For more complete comments, see soreceive(), from 1122 * which this code originated. 1123 * 1124 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is 1125 * unable to return an mbuf chain to the caller. 1126 */ 1127 static int 1128 soreceive_rcvoob(so, uio, flags) 1129 struct socket *so; 1130 struct uio *uio; 1131 int flags; 1132 { 1133 struct protosw *pr = so->so_proto; 1134 struct mbuf *m; 1135 int error; 1136 1137 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 1138 1139 m = m_get(M_TRYWAIT, MT_DATA); 1140 if (m == NULL) 1141 return (ENOBUFS); 1142 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 1143 if (error) 1144 goto bad; 1145 do { 1146 #ifdef ZERO_COPY_SOCKETS 1147 if (so_zero_copy_receive) { 1148 int disposable; 1149 1150 if ((m->m_flags & M_EXT) 1151 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1152 disposable = 1; 1153 else 1154 disposable = 0; 1155 1156 error = uiomoveco(mtod(m, void *), 1157 min(uio->uio_resid, m->m_len), 1158 uio, disposable); 1159 } else 1160 #endif /* ZERO_COPY_SOCKETS */ 1161 error = uiomove(mtod(m, void *), 1162 (int) min(uio->uio_resid, m->m_len), uio); 1163 m = m_free(m); 1164 } while (uio->uio_resid && error == 0 && m); 1165 bad: 1166 if (m != NULL) 1167 m_freem(m); 1168 return (error); 1169 } 1170 1171 /* 1172 * Following replacement or removal of the first mbuf on the first mbuf chain 1173 * of a socket buffer, push necessary state changes back into the socket 1174 * buffer so that other consumers see the values consistently. 'nextrecord' 1175 * is the callers locally stored value of the original value of 1176 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 1177 * NOTE: 'nextrecord' may be NULL. 1178 */ 1179 static __inline void 1180 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 1181 { 1182 1183 SOCKBUF_LOCK_ASSERT(sb); 1184 /* 1185 * First, update for the new value of nextrecord. If necessary, make 1186 * it the first record. 1187 */ 1188 if (sb->sb_mb != NULL) 1189 sb->sb_mb->m_nextpkt = nextrecord; 1190 else 1191 sb->sb_mb = nextrecord; 1192 1193 /* 1194 * Now update any dependent socket buffer fields to reflect the new 1195 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 1196 * addition of a second clause that takes care of the case where 1197 * sb_mb has been updated, but remains the last record. 1198 */ 1199 if (sb->sb_mb == NULL) { 1200 sb->sb_mbtail = NULL; 1201 sb->sb_lastrecord = NULL; 1202 } else if (sb->sb_mb->m_nextpkt == NULL) 1203 sb->sb_lastrecord = sb->sb_mb; 1204 } 1205 1206 1207 /* 1208 * Implement receive operations on a socket. 1209 * We depend on the way that records are added to the sockbuf 1210 * by sbappend*. In particular, each record (mbufs linked through m_next) 1211 * must begin with an address if the protocol so specifies, 1212 * followed by an optional mbuf or mbufs containing ancillary data, 1213 * and then zero or more mbufs of data. 1214 * In order to avoid blocking network interrupts for the entire time here, 1215 * we splx() while doing the actual copy to user space. 1216 * Although the sockbuf is locked, new data may still be appended, 1217 * and thus we must maintain consistency of the sockbuf during that time. 1218 * 1219 * The caller may receive the data as a single mbuf chain by supplying 1220 * an mbuf **mp0 for use in returning the chain. The uio is then used 1221 * only for the count in uio_resid. 1222 */ 1223 int 1224 soreceive(so, psa, uio, mp0, controlp, flagsp) 1225 struct socket *so; 1226 struct sockaddr **psa; 1227 struct uio *uio; 1228 struct mbuf **mp0; 1229 struct mbuf **controlp; 1230 int *flagsp; 1231 { 1232 struct mbuf *m, **mp; 1233 int flags, len, error, offset; 1234 struct protosw *pr = so->so_proto; 1235 struct mbuf *nextrecord; 1236 int moff, type = 0; 1237 int orig_resid = uio->uio_resid; 1238 1239 mp = mp0; 1240 if (psa != NULL) 1241 *psa = NULL; 1242 if (controlp != NULL) 1243 *controlp = NULL; 1244 if (flagsp != NULL) 1245 flags = *flagsp &~ MSG_EOR; 1246 else 1247 flags = 0; 1248 if (flags & MSG_OOB) 1249 return (soreceive_rcvoob(so, uio, flags)); 1250 if (mp != NULL) 1251 *mp = NULL; 1252 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) 1253 && uio->uio_resid) 1254 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 1255 1256 SOCKBUF_LOCK(&so->so_rcv); 1257 restart: 1258 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1259 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 1260 if (error) 1261 goto out; 1262 1263 m = so->so_rcv.sb_mb; 1264 /* 1265 * If we have less data than requested, block awaiting more 1266 * (subject to any timeout) if: 1267 * 1. the current count is less than the low water mark, or 1268 * 2. MSG_WAITALL is set, and it is possible to do the entire 1269 * receive operation at once if we block (resid <= hiwat). 1270 * 3. MSG_DONTWAIT is not set 1271 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1272 * we have to do the receive in sections, and thus risk returning 1273 * a short count if a timeout or signal occurs after we start. 1274 */ 1275 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1276 so->so_rcv.sb_cc < uio->uio_resid) && 1277 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1278 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1279 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1280 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1281 ("receive: m == %p so->so_rcv.sb_cc == %u", 1282 m, so->so_rcv.sb_cc)); 1283 if (so->so_error) { 1284 if (m != NULL) 1285 goto dontblock; 1286 error = so->so_error; 1287 if ((flags & MSG_PEEK) == 0) 1288 so->so_error = 0; 1289 goto release; 1290 } 1291 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1292 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1293 if (m) 1294 goto dontblock; 1295 else 1296 goto release; 1297 } 1298 for (; m != NULL; m = m->m_next) 1299 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1300 m = so->so_rcv.sb_mb; 1301 goto dontblock; 1302 } 1303 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1304 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1305 error = ENOTCONN; 1306 goto release; 1307 } 1308 if (uio->uio_resid == 0) 1309 goto release; 1310 if ((so->so_state & SS_NBIO) || 1311 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1312 error = EWOULDBLOCK; 1313 goto release; 1314 } 1315 SBLASTRECORDCHK(&so->so_rcv); 1316 SBLASTMBUFCHK(&so->so_rcv); 1317 sbunlock(&so->so_rcv); 1318 error = sbwait(&so->so_rcv); 1319 if (error) 1320 goto out; 1321 goto restart; 1322 } 1323 dontblock: 1324 /* 1325 * From this point onward, we maintain 'nextrecord' as a cache of the 1326 * pointer to the next record in the socket buffer. We must keep the 1327 * various socket buffer pointers and local stack versions of the 1328 * pointers in sync, pushing out modifications before dropping the 1329 * socket buffer mutex, and re-reading them when picking it up. 1330 * 1331 * Otherwise, we will race with the network stack appending new data 1332 * or records onto the socket buffer by using inconsistent/stale 1333 * versions of the field, possibly resulting in socket buffer 1334 * corruption. 1335 * 1336 * By holding the high-level sblock(), we prevent simultaneous 1337 * readers from pulling off the front of the socket buffer. 1338 */ 1339 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1340 if (uio->uio_td) 1341 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++; 1342 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1343 SBLASTRECORDCHK(&so->so_rcv); 1344 SBLASTMBUFCHK(&so->so_rcv); 1345 nextrecord = m->m_nextpkt; 1346 if (pr->pr_flags & PR_ADDR) { 1347 KASSERT(m->m_type == MT_SONAME, 1348 ("m->m_type == %d", m->m_type)); 1349 orig_resid = 0; 1350 if (psa != NULL) 1351 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1352 M_NOWAIT); 1353 if (flags & MSG_PEEK) { 1354 m = m->m_next; 1355 } else { 1356 sbfree(&so->so_rcv, m); 1357 so->so_rcv.sb_mb = m_free(m); 1358 m = so->so_rcv.sb_mb; 1359 sockbuf_pushsync(&so->so_rcv, nextrecord); 1360 } 1361 } 1362 1363 /* 1364 * Process one or more MT_CONTROL mbufs present before any data mbufs 1365 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1366 * just copy the data; if !MSG_PEEK, we call into the protocol to 1367 * perform externalization (or freeing if controlp == NULL). 1368 */ 1369 if (m != NULL && m->m_type == MT_CONTROL) { 1370 struct mbuf *cm = NULL, *cmn; 1371 struct mbuf **cme = &cm; 1372 1373 do { 1374 if (flags & MSG_PEEK) { 1375 if (controlp != NULL) { 1376 *controlp = m_copy(m, 0, m->m_len); 1377 controlp = &(*controlp)->m_next; 1378 } 1379 m = m->m_next; 1380 } else { 1381 sbfree(&so->so_rcv, m); 1382 so->so_rcv.sb_mb = m->m_next; 1383 m->m_next = NULL; 1384 *cme = m; 1385 cme = &(*cme)->m_next; 1386 m = so->so_rcv.sb_mb; 1387 } 1388 } while (m != NULL && m->m_type == MT_CONTROL); 1389 if ((flags & MSG_PEEK) == 0) 1390 sockbuf_pushsync(&so->so_rcv, nextrecord); 1391 while (cm != NULL) { 1392 cmn = cm->m_next; 1393 cm->m_next = NULL; 1394 if (pr->pr_domain->dom_externalize != NULL) { 1395 SOCKBUF_UNLOCK(&so->so_rcv); 1396 error = (*pr->pr_domain->dom_externalize) 1397 (cm, controlp); 1398 SOCKBUF_LOCK(&so->so_rcv); 1399 } else if (controlp != NULL) 1400 *controlp = cm; 1401 else 1402 m_freem(cm); 1403 if (controlp != NULL) { 1404 orig_resid = 0; 1405 while (*controlp != NULL) 1406 controlp = &(*controlp)->m_next; 1407 } 1408 cm = cmn; 1409 } 1410 if (so->so_rcv.sb_mb) 1411 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1412 else 1413 nextrecord = NULL; 1414 orig_resid = 0; 1415 } 1416 if (m != NULL) { 1417 if ((flags & MSG_PEEK) == 0) { 1418 KASSERT(m->m_nextpkt == nextrecord, 1419 ("soreceive: post-control, nextrecord !sync")); 1420 if (nextrecord == NULL) { 1421 KASSERT(so->so_rcv.sb_mb == m, 1422 ("soreceive: post-control, sb_mb!=m")); 1423 KASSERT(so->so_rcv.sb_lastrecord == m, 1424 ("soreceive: post-control, lastrecord!=m")); 1425 } 1426 } 1427 type = m->m_type; 1428 if (type == MT_OOBDATA) 1429 flags |= MSG_OOB; 1430 } else { 1431 if ((flags & MSG_PEEK) == 0) { 1432 KASSERT(so->so_rcv.sb_mb == nextrecord, 1433 ("soreceive: sb_mb != nextrecord")); 1434 if (so->so_rcv.sb_mb == NULL) { 1435 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1436 ("soreceive: sb_lastercord != NULL")); 1437 } 1438 } 1439 } 1440 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1441 SBLASTRECORDCHK(&so->so_rcv); 1442 SBLASTMBUFCHK(&so->so_rcv); 1443 1444 /* 1445 * Now continue to read any data mbufs off of the head of the socket 1446 * buffer until the read request is satisfied. Note that 'type' is 1447 * used to store the type of any mbuf reads that have happened so far 1448 * such that soreceive() can stop reading if the type changes, which 1449 * causes soreceive() to return only one of regular data and inline 1450 * out-of-band data in a single socket receive operation. 1451 */ 1452 moff = 0; 1453 offset = 0; 1454 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1455 /* 1456 * If the type of mbuf has changed since the last mbuf 1457 * examined ('type'), end the receive operation. 1458 */ 1459 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1460 if (m->m_type == MT_OOBDATA) { 1461 if (type != MT_OOBDATA) 1462 break; 1463 } else if (type == MT_OOBDATA) 1464 break; 1465 else 1466 KASSERT(m->m_type == MT_DATA, 1467 ("m->m_type == %d", m->m_type)); 1468 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1469 len = uio->uio_resid; 1470 if (so->so_oobmark && len > so->so_oobmark - offset) 1471 len = so->so_oobmark - offset; 1472 if (len > m->m_len - moff) 1473 len = m->m_len - moff; 1474 /* 1475 * If mp is set, just pass back the mbufs. 1476 * Otherwise copy them out via the uio, then free. 1477 * Sockbuf must be consistent here (points to current mbuf, 1478 * it points to next record) when we drop priority; 1479 * we must note any additions to the sockbuf when we 1480 * block interrupts again. 1481 */ 1482 if (mp == NULL) { 1483 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1484 SBLASTRECORDCHK(&so->so_rcv); 1485 SBLASTMBUFCHK(&so->so_rcv); 1486 SOCKBUF_UNLOCK(&so->so_rcv); 1487 #ifdef ZERO_COPY_SOCKETS 1488 if (so_zero_copy_receive) { 1489 int disposable; 1490 1491 if ((m->m_flags & M_EXT) 1492 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1493 disposable = 1; 1494 else 1495 disposable = 0; 1496 1497 error = uiomoveco(mtod(m, char *) + moff, 1498 (int)len, uio, 1499 disposable); 1500 } else 1501 #endif /* ZERO_COPY_SOCKETS */ 1502 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1503 SOCKBUF_LOCK(&so->so_rcv); 1504 if (error) 1505 goto release; 1506 } else 1507 uio->uio_resid -= len; 1508 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1509 if (len == m->m_len - moff) { 1510 if (m->m_flags & M_EOR) 1511 flags |= MSG_EOR; 1512 if (flags & MSG_PEEK) { 1513 m = m->m_next; 1514 moff = 0; 1515 } else { 1516 nextrecord = m->m_nextpkt; 1517 sbfree(&so->so_rcv, m); 1518 if (mp != NULL) { 1519 *mp = m; 1520 mp = &m->m_next; 1521 so->so_rcv.sb_mb = m = m->m_next; 1522 *mp = NULL; 1523 } else { 1524 so->so_rcv.sb_mb = m_free(m); 1525 m = so->so_rcv.sb_mb; 1526 } 1527 sockbuf_pushsync(&so->so_rcv, nextrecord); 1528 SBLASTRECORDCHK(&so->so_rcv); 1529 SBLASTMBUFCHK(&so->so_rcv); 1530 } 1531 } else { 1532 if (flags & MSG_PEEK) 1533 moff += len; 1534 else { 1535 if (mp != NULL) { 1536 int copy_flag; 1537 1538 if (flags & MSG_DONTWAIT) 1539 copy_flag = M_DONTWAIT; 1540 else 1541 copy_flag = M_TRYWAIT; 1542 if (copy_flag == M_TRYWAIT) 1543 SOCKBUF_UNLOCK(&so->so_rcv); 1544 *mp = m_copym(m, 0, len, copy_flag); 1545 if (copy_flag == M_TRYWAIT) 1546 SOCKBUF_LOCK(&so->so_rcv); 1547 if (*mp == NULL) { 1548 /* 1549 * m_copym() couldn't allocate an mbuf. 1550 * Adjust uio_resid back (it was adjusted 1551 * down by len bytes, which we didn't end 1552 * up "copying" over). 1553 */ 1554 uio->uio_resid += len; 1555 break; 1556 } 1557 } 1558 m->m_data += len; 1559 m->m_len -= len; 1560 so->so_rcv.sb_cc -= len; 1561 } 1562 } 1563 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1564 if (so->so_oobmark) { 1565 if ((flags & MSG_PEEK) == 0) { 1566 so->so_oobmark -= len; 1567 if (so->so_oobmark == 0) { 1568 so->so_rcv.sb_state |= SBS_RCVATMARK; 1569 break; 1570 } 1571 } else { 1572 offset += len; 1573 if (offset == so->so_oobmark) 1574 break; 1575 } 1576 } 1577 if (flags & MSG_EOR) 1578 break; 1579 /* 1580 * If the MSG_WAITALL flag is set (for non-atomic socket), 1581 * we must not quit until "uio->uio_resid == 0" or an error 1582 * termination. If a signal/timeout occurs, return 1583 * with a short count but without error. 1584 * Keep sockbuf locked against other readers. 1585 */ 1586 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1587 !sosendallatonce(so) && nextrecord == NULL) { 1588 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1589 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1590 break; 1591 /* 1592 * Notify the protocol that some data has been 1593 * drained before blocking. 1594 */ 1595 if (pr->pr_flags & PR_WANTRCVD) { 1596 SOCKBUF_UNLOCK(&so->so_rcv); 1597 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1598 SOCKBUF_LOCK(&so->so_rcv); 1599 } 1600 SBLASTRECORDCHK(&so->so_rcv); 1601 SBLASTMBUFCHK(&so->so_rcv); 1602 error = sbwait(&so->so_rcv); 1603 if (error) 1604 goto release; 1605 m = so->so_rcv.sb_mb; 1606 if (m != NULL) 1607 nextrecord = m->m_nextpkt; 1608 } 1609 } 1610 1611 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1612 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1613 flags |= MSG_TRUNC; 1614 if ((flags & MSG_PEEK) == 0) 1615 (void) sbdroprecord_locked(&so->so_rcv); 1616 } 1617 if ((flags & MSG_PEEK) == 0) { 1618 if (m == NULL) { 1619 /* 1620 * First part is an inline SB_EMPTY_FIXUP(). Second 1621 * part makes sure sb_lastrecord is up-to-date if 1622 * there is still data in the socket buffer. 1623 */ 1624 so->so_rcv.sb_mb = nextrecord; 1625 if (so->so_rcv.sb_mb == NULL) { 1626 so->so_rcv.sb_mbtail = NULL; 1627 so->so_rcv.sb_lastrecord = NULL; 1628 } else if (nextrecord->m_nextpkt == NULL) 1629 so->so_rcv.sb_lastrecord = nextrecord; 1630 } 1631 SBLASTRECORDCHK(&so->so_rcv); 1632 SBLASTMBUFCHK(&so->so_rcv); 1633 /* 1634 * If soreceive() is being done from the socket callback, then 1635 * don't need to generate ACK to peer to update window, since 1636 * ACK will be generated on return to TCP. 1637 */ 1638 if (!(flags & MSG_SOCALLBCK) && 1639 (pr->pr_flags & PR_WANTRCVD)) { 1640 SOCKBUF_UNLOCK(&so->so_rcv); 1641 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1642 SOCKBUF_LOCK(&so->so_rcv); 1643 } 1644 } 1645 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1646 if (orig_resid == uio->uio_resid && orig_resid && 1647 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1648 sbunlock(&so->so_rcv); 1649 goto restart; 1650 } 1651 1652 if (flagsp != NULL) 1653 *flagsp |= flags; 1654 release: 1655 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1656 sbunlock(&so->so_rcv); 1657 out: 1658 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1659 SOCKBUF_UNLOCK(&so->so_rcv); 1660 return (error); 1661 } 1662 1663 int 1664 soshutdown(so, how) 1665 struct socket *so; 1666 int how; 1667 { 1668 struct protosw *pr = so->so_proto; 1669 1670 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1671 return (EINVAL); 1672 1673 if (how != SHUT_WR) 1674 sorflush(so); 1675 if (how != SHUT_RD) 1676 return ((*pr->pr_usrreqs->pru_shutdown)(so)); 1677 return (0); 1678 } 1679 1680 void 1681 sorflush(so) 1682 struct socket *so; 1683 { 1684 struct sockbuf *sb = &so->so_rcv; 1685 struct protosw *pr = so->so_proto; 1686 struct sockbuf asb; 1687 1688 /* 1689 * XXXRW: This is quite ugly. Previously, this code made a copy of 1690 * the socket buffer, then zero'd the original to clear the buffer 1691 * fields. However, with mutexes in the socket buffer, this causes 1692 * problems. We only clear the zeroable bits of the original; 1693 * however, we have to initialize and destroy the mutex in the copy 1694 * so that dom_dispose() and sbrelease() can lock t as needed. 1695 */ 1696 SOCKBUF_LOCK(sb); 1697 sb->sb_flags |= SB_NOINTR; 1698 (void) sblock(sb, M_WAITOK); 1699 /* 1700 * socantrcvmore_locked() drops the socket buffer mutex so that it 1701 * can safely perform wakeups. Re-acquire the mutex before 1702 * continuing. 1703 */ 1704 socantrcvmore_locked(so); 1705 SOCKBUF_LOCK(sb); 1706 sbunlock(sb); 1707 /* 1708 * Invalidate/clear most of the sockbuf structure, but leave 1709 * selinfo and mutex data unchanged. 1710 */ 1711 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 1712 bcopy(&sb->sb_startzero, &asb.sb_startzero, 1713 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1714 bzero(&sb->sb_startzero, 1715 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1716 SOCKBUF_UNLOCK(sb); 1717 1718 SOCKBUF_LOCK_INIT(&asb, "so_rcv"); 1719 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 1720 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 1721 sbrelease(&asb, so); 1722 SOCKBUF_LOCK_DESTROY(&asb); 1723 } 1724 1725 /* 1726 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1727 * an additional variant to handle the case where the option value needs 1728 * to be some kind of integer, but not a specific size. 1729 * In addition to their use here, these functions are also called by the 1730 * protocol-level pr_ctloutput() routines. 1731 */ 1732 int 1733 sooptcopyin(sopt, buf, len, minlen) 1734 struct sockopt *sopt; 1735 void *buf; 1736 size_t len; 1737 size_t minlen; 1738 { 1739 size_t valsize; 1740 1741 /* 1742 * If the user gives us more than we wanted, we ignore it, 1743 * but if we don't get the minimum length the caller 1744 * wants, we return EINVAL. On success, sopt->sopt_valsize 1745 * is set to however much we actually retrieved. 1746 */ 1747 if ((valsize = sopt->sopt_valsize) < minlen) 1748 return EINVAL; 1749 if (valsize > len) 1750 sopt->sopt_valsize = valsize = len; 1751 1752 if (sopt->sopt_td != NULL) 1753 return (copyin(sopt->sopt_val, buf, valsize)); 1754 1755 bcopy(sopt->sopt_val, buf, valsize); 1756 return (0); 1757 } 1758 1759 /* 1760 * Kernel version of setsockopt(2)/ 1761 * XXX: optlen is size_t, not socklen_t 1762 */ 1763 int 1764 so_setsockopt(struct socket *so, int level, int optname, void *optval, 1765 size_t optlen) 1766 { 1767 struct sockopt sopt; 1768 1769 sopt.sopt_level = level; 1770 sopt.sopt_name = optname; 1771 sopt.sopt_dir = SOPT_SET; 1772 sopt.sopt_val = optval; 1773 sopt.sopt_valsize = optlen; 1774 sopt.sopt_td = NULL; 1775 return (sosetopt(so, &sopt)); 1776 } 1777 1778 int 1779 sosetopt(so, sopt) 1780 struct socket *so; 1781 struct sockopt *sopt; 1782 { 1783 int error, optval; 1784 struct linger l; 1785 struct timeval tv; 1786 u_long val; 1787 #ifdef MAC 1788 struct mac extmac; 1789 #endif 1790 1791 error = 0; 1792 if (sopt->sopt_level != SOL_SOCKET) { 1793 if (so->so_proto && so->so_proto->pr_ctloutput) 1794 return ((*so->so_proto->pr_ctloutput) 1795 (so, sopt)); 1796 error = ENOPROTOOPT; 1797 } else { 1798 switch (sopt->sopt_name) { 1799 #ifdef INET 1800 case SO_ACCEPTFILTER: 1801 error = do_setopt_accept_filter(so, sopt); 1802 if (error) 1803 goto bad; 1804 break; 1805 #endif 1806 case SO_LINGER: 1807 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1808 if (error) 1809 goto bad; 1810 1811 SOCK_LOCK(so); 1812 so->so_linger = l.l_linger; 1813 if (l.l_onoff) 1814 so->so_options |= SO_LINGER; 1815 else 1816 so->so_options &= ~SO_LINGER; 1817 SOCK_UNLOCK(so); 1818 break; 1819 1820 case SO_DEBUG: 1821 case SO_KEEPALIVE: 1822 case SO_DONTROUTE: 1823 case SO_USELOOPBACK: 1824 case SO_BROADCAST: 1825 case SO_REUSEADDR: 1826 case SO_REUSEPORT: 1827 case SO_OOBINLINE: 1828 case SO_TIMESTAMP: 1829 case SO_BINTIME: 1830 case SO_NOSIGPIPE: 1831 error = sooptcopyin(sopt, &optval, sizeof optval, 1832 sizeof optval); 1833 if (error) 1834 goto bad; 1835 SOCK_LOCK(so); 1836 if (optval) 1837 so->so_options |= sopt->sopt_name; 1838 else 1839 so->so_options &= ~sopt->sopt_name; 1840 SOCK_UNLOCK(so); 1841 break; 1842 1843 case SO_SNDBUF: 1844 case SO_RCVBUF: 1845 case SO_SNDLOWAT: 1846 case SO_RCVLOWAT: 1847 error = sooptcopyin(sopt, &optval, sizeof optval, 1848 sizeof optval); 1849 if (error) 1850 goto bad; 1851 1852 /* 1853 * Values < 1 make no sense for any of these 1854 * options, so disallow them. 1855 */ 1856 if (optval < 1) { 1857 error = EINVAL; 1858 goto bad; 1859 } 1860 1861 switch (sopt->sopt_name) { 1862 case SO_SNDBUF: 1863 case SO_RCVBUF: 1864 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 1865 &so->so_snd : &so->so_rcv, (u_long)optval, 1866 so, curthread) == 0) { 1867 error = ENOBUFS; 1868 goto bad; 1869 } 1870 break; 1871 1872 /* 1873 * Make sure the low-water is never greater than 1874 * the high-water. 1875 */ 1876 case SO_SNDLOWAT: 1877 SOCKBUF_LOCK(&so->so_snd); 1878 so->so_snd.sb_lowat = 1879 (optval > so->so_snd.sb_hiwat) ? 1880 so->so_snd.sb_hiwat : optval; 1881 SOCKBUF_UNLOCK(&so->so_snd); 1882 break; 1883 case SO_RCVLOWAT: 1884 SOCKBUF_LOCK(&so->so_rcv); 1885 so->so_rcv.sb_lowat = 1886 (optval > so->so_rcv.sb_hiwat) ? 1887 so->so_rcv.sb_hiwat : optval; 1888 SOCKBUF_UNLOCK(&so->so_rcv); 1889 break; 1890 } 1891 break; 1892 1893 case SO_SNDTIMEO: 1894 case SO_RCVTIMEO: 1895 #ifdef COMPAT_IA32 1896 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) { 1897 struct timeval32 tv32; 1898 1899 error = sooptcopyin(sopt, &tv32, sizeof tv32, 1900 sizeof tv32); 1901 CP(tv32, tv, tv_sec); 1902 CP(tv32, tv, tv_usec); 1903 } else 1904 #endif 1905 error = sooptcopyin(sopt, &tv, sizeof tv, 1906 sizeof tv); 1907 if (error) 1908 goto bad; 1909 1910 /* assert(hz > 0); */ 1911 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 1912 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 1913 error = EDOM; 1914 goto bad; 1915 } 1916 /* assert(tick > 0); */ 1917 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 1918 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 1919 if (val > INT_MAX) { 1920 error = EDOM; 1921 goto bad; 1922 } 1923 if (val == 0 && tv.tv_usec != 0) 1924 val = 1; 1925 1926 switch (sopt->sopt_name) { 1927 case SO_SNDTIMEO: 1928 so->so_snd.sb_timeo = val; 1929 break; 1930 case SO_RCVTIMEO: 1931 so->so_rcv.sb_timeo = val; 1932 break; 1933 } 1934 break; 1935 1936 case SO_LABEL: 1937 #ifdef MAC 1938 error = sooptcopyin(sopt, &extmac, sizeof extmac, 1939 sizeof extmac); 1940 if (error) 1941 goto bad; 1942 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 1943 so, &extmac); 1944 #else 1945 error = EOPNOTSUPP; 1946 #endif 1947 break; 1948 1949 default: 1950 error = ENOPROTOOPT; 1951 break; 1952 } 1953 if (error == 0 && so->so_proto != NULL && 1954 so->so_proto->pr_ctloutput != NULL) { 1955 (void) ((*so->so_proto->pr_ctloutput) 1956 (so, sopt)); 1957 } 1958 } 1959 bad: 1960 return (error); 1961 } 1962 1963 /* Helper routine for getsockopt */ 1964 int 1965 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 1966 { 1967 int error; 1968 size_t valsize; 1969 1970 error = 0; 1971 1972 /* 1973 * Documented get behavior is that we always return a value, 1974 * possibly truncated to fit in the user's buffer. 1975 * Traditional behavior is that we always tell the user 1976 * precisely how much we copied, rather than something useful 1977 * like the total amount we had available for her. 1978 * Note that this interface is not idempotent; the entire answer must 1979 * generated ahead of time. 1980 */ 1981 valsize = min(len, sopt->sopt_valsize); 1982 sopt->sopt_valsize = valsize; 1983 if (sopt->sopt_val != NULL) { 1984 if (sopt->sopt_td != NULL) 1985 error = copyout(buf, sopt->sopt_val, valsize); 1986 else 1987 bcopy(buf, sopt->sopt_val, valsize); 1988 } 1989 return (error); 1990 } 1991 1992 int 1993 sogetopt(so, sopt) 1994 struct socket *so; 1995 struct sockopt *sopt; 1996 { 1997 int error, optval; 1998 struct linger l; 1999 struct timeval tv; 2000 #ifdef MAC 2001 struct mac extmac; 2002 #endif 2003 2004 error = 0; 2005 if (sopt->sopt_level != SOL_SOCKET) { 2006 if (so->so_proto && so->so_proto->pr_ctloutput) { 2007 return ((*so->so_proto->pr_ctloutput) 2008 (so, sopt)); 2009 } else 2010 return (ENOPROTOOPT); 2011 } else { 2012 switch (sopt->sopt_name) { 2013 #ifdef INET 2014 case SO_ACCEPTFILTER: 2015 error = do_getopt_accept_filter(so, sopt); 2016 break; 2017 #endif 2018 case SO_LINGER: 2019 SOCK_LOCK(so); 2020 l.l_onoff = so->so_options & SO_LINGER; 2021 l.l_linger = so->so_linger; 2022 SOCK_UNLOCK(so); 2023 error = sooptcopyout(sopt, &l, sizeof l); 2024 break; 2025 2026 case SO_USELOOPBACK: 2027 case SO_DONTROUTE: 2028 case SO_DEBUG: 2029 case SO_KEEPALIVE: 2030 case SO_REUSEADDR: 2031 case SO_REUSEPORT: 2032 case SO_BROADCAST: 2033 case SO_OOBINLINE: 2034 case SO_ACCEPTCONN: 2035 case SO_TIMESTAMP: 2036 case SO_BINTIME: 2037 case SO_NOSIGPIPE: 2038 optval = so->so_options & sopt->sopt_name; 2039 integer: 2040 error = sooptcopyout(sopt, &optval, sizeof optval); 2041 break; 2042 2043 case SO_TYPE: 2044 optval = so->so_type; 2045 goto integer; 2046 2047 case SO_ERROR: 2048 optval = so->so_error; 2049 so->so_error = 0; 2050 goto integer; 2051 2052 case SO_SNDBUF: 2053 optval = so->so_snd.sb_hiwat; 2054 goto integer; 2055 2056 case SO_RCVBUF: 2057 optval = so->so_rcv.sb_hiwat; 2058 goto integer; 2059 2060 case SO_SNDLOWAT: 2061 optval = so->so_snd.sb_lowat; 2062 goto integer; 2063 2064 case SO_RCVLOWAT: 2065 optval = so->so_rcv.sb_lowat; 2066 goto integer; 2067 2068 case SO_SNDTIMEO: 2069 case SO_RCVTIMEO: 2070 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2071 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 2072 2073 tv.tv_sec = optval / hz; 2074 tv.tv_usec = (optval % hz) * tick; 2075 #ifdef COMPAT_IA32 2076 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) { 2077 struct timeval32 tv32; 2078 2079 CP(tv, tv32, tv_sec); 2080 CP(tv, tv32, tv_usec); 2081 error = sooptcopyout(sopt, &tv32, sizeof tv32); 2082 } else 2083 #endif 2084 error = sooptcopyout(sopt, &tv, sizeof tv); 2085 break; 2086 2087 case SO_LABEL: 2088 #ifdef MAC 2089 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2090 sizeof(extmac)); 2091 if (error) 2092 return (error); 2093 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 2094 so, &extmac); 2095 if (error) 2096 return (error); 2097 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2098 #else 2099 error = EOPNOTSUPP; 2100 #endif 2101 break; 2102 2103 case SO_PEERLABEL: 2104 #ifdef MAC 2105 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2106 sizeof(extmac)); 2107 if (error) 2108 return (error); 2109 error = mac_getsockopt_peerlabel( 2110 sopt->sopt_td->td_ucred, so, &extmac); 2111 if (error) 2112 return (error); 2113 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2114 #else 2115 error = EOPNOTSUPP; 2116 #endif 2117 break; 2118 2119 case SO_LISTENQLIMIT: 2120 optval = so->so_qlimit; 2121 goto integer; 2122 2123 case SO_LISTENQLEN: 2124 optval = so->so_qlen; 2125 goto integer; 2126 2127 case SO_LISTENINCQLEN: 2128 optval = so->so_incqlen; 2129 goto integer; 2130 2131 default: 2132 error = ENOPROTOOPT; 2133 break; 2134 } 2135 return (error); 2136 } 2137 } 2138 2139 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2140 int 2141 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2142 { 2143 struct mbuf *m, *m_prev; 2144 int sopt_size = sopt->sopt_valsize; 2145 2146 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 2147 if (m == NULL) 2148 return ENOBUFS; 2149 if (sopt_size > MLEN) { 2150 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT); 2151 if ((m->m_flags & M_EXT) == 0) { 2152 m_free(m); 2153 return ENOBUFS; 2154 } 2155 m->m_len = min(MCLBYTES, sopt_size); 2156 } else { 2157 m->m_len = min(MLEN, sopt_size); 2158 } 2159 sopt_size -= m->m_len; 2160 *mp = m; 2161 m_prev = m; 2162 2163 while (sopt_size) { 2164 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 2165 if (m == NULL) { 2166 m_freem(*mp); 2167 return ENOBUFS; 2168 } 2169 if (sopt_size > MLEN) { 2170 MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT : 2171 M_DONTWAIT); 2172 if ((m->m_flags & M_EXT) == 0) { 2173 m_freem(m); 2174 m_freem(*mp); 2175 return ENOBUFS; 2176 } 2177 m->m_len = min(MCLBYTES, sopt_size); 2178 } else { 2179 m->m_len = min(MLEN, sopt_size); 2180 } 2181 sopt_size -= m->m_len; 2182 m_prev->m_next = m; 2183 m_prev = m; 2184 } 2185 return (0); 2186 } 2187 2188 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2189 int 2190 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2191 { 2192 struct mbuf *m0 = m; 2193 2194 if (sopt->sopt_val == NULL) 2195 return (0); 2196 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2197 if (sopt->sopt_td != NULL) { 2198 int error; 2199 2200 error = copyin(sopt->sopt_val, mtod(m, char *), 2201 m->m_len); 2202 if (error != 0) { 2203 m_freem(m0); 2204 return(error); 2205 } 2206 } else 2207 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 2208 sopt->sopt_valsize -= m->m_len; 2209 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2210 m = m->m_next; 2211 } 2212 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2213 panic("ip6_sooptmcopyin"); 2214 return (0); 2215 } 2216 2217 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2218 int 2219 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2220 { 2221 struct mbuf *m0 = m; 2222 size_t valsize = 0; 2223 2224 if (sopt->sopt_val == NULL) 2225 return (0); 2226 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2227 if (sopt->sopt_td != NULL) { 2228 int error; 2229 2230 error = copyout(mtod(m, char *), sopt->sopt_val, 2231 m->m_len); 2232 if (error != 0) { 2233 m_freem(m0); 2234 return(error); 2235 } 2236 } else 2237 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 2238 sopt->sopt_valsize -= m->m_len; 2239 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2240 valsize += m->m_len; 2241 m = m->m_next; 2242 } 2243 if (m != NULL) { 2244 /* enough soopt buffer should be given from user-land */ 2245 m_freem(m0); 2246 return(EINVAL); 2247 } 2248 sopt->sopt_valsize = valsize; 2249 return (0); 2250 } 2251 2252 void 2253 sohasoutofband(so) 2254 struct socket *so; 2255 { 2256 if (so->so_sigio != NULL) 2257 pgsigio(&so->so_sigio, SIGURG, 0); 2258 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 2259 } 2260 2261 int 2262 sopoll(struct socket *so, int events, struct ucred *active_cred, 2263 struct thread *td) 2264 { 2265 int revents = 0; 2266 2267 SOCKBUF_LOCK(&so->so_snd); 2268 SOCKBUF_LOCK(&so->so_rcv); 2269 if (events & (POLLIN | POLLRDNORM)) 2270 if (soreadable(so)) 2271 revents |= events & (POLLIN | POLLRDNORM); 2272 2273 if (events & POLLINIGNEOF) 2274 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || 2275 !TAILQ_EMPTY(&so->so_comp) || so->so_error) 2276 revents |= POLLINIGNEOF; 2277 2278 if (events & (POLLOUT | POLLWRNORM)) 2279 if (sowriteable(so)) 2280 revents |= events & (POLLOUT | POLLWRNORM); 2281 2282 if (events & (POLLPRI | POLLRDBAND)) 2283 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 2284 revents |= events & (POLLPRI | POLLRDBAND); 2285 2286 if (revents == 0) { 2287 if (events & 2288 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | 2289 POLLRDBAND)) { 2290 selrecord(td, &so->so_rcv.sb_sel); 2291 so->so_rcv.sb_flags |= SB_SEL; 2292 } 2293 2294 if (events & (POLLOUT | POLLWRNORM)) { 2295 selrecord(td, &so->so_snd.sb_sel); 2296 so->so_snd.sb_flags |= SB_SEL; 2297 } 2298 } 2299 2300 SOCKBUF_UNLOCK(&so->so_rcv); 2301 SOCKBUF_UNLOCK(&so->so_snd); 2302 return (revents); 2303 } 2304 2305 int 2306 soo_kqfilter(struct file *fp, struct knote *kn) 2307 { 2308 struct socket *so = kn->kn_fp->f_data; 2309 struct sockbuf *sb; 2310 2311 switch (kn->kn_filter) { 2312 case EVFILT_READ: 2313 if (so->so_options & SO_ACCEPTCONN) 2314 kn->kn_fop = &solisten_filtops; 2315 else 2316 kn->kn_fop = &soread_filtops; 2317 sb = &so->so_rcv; 2318 break; 2319 case EVFILT_WRITE: 2320 kn->kn_fop = &sowrite_filtops; 2321 sb = &so->so_snd; 2322 break; 2323 default: 2324 return (EINVAL); 2325 } 2326 2327 SOCKBUF_LOCK(sb); 2328 knlist_add(&sb->sb_sel.si_note, kn, 1); 2329 sb->sb_flags |= SB_KNOTE; 2330 SOCKBUF_UNLOCK(sb); 2331 return (0); 2332 } 2333 2334 static void 2335 filt_sordetach(struct knote *kn) 2336 { 2337 struct socket *so = kn->kn_fp->f_data; 2338 2339 SOCKBUF_LOCK(&so->so_rcv); 2340 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1); 2341 if (knlist_empty(&so->so_rcv.sb_sel.si_note)) 2342 so->so_rcv.sb_flags &= ~SB_KNOTE; 2343 SOCKBUF_UNLOCK(&so->so_rcv); 2344 } 2345 2346 /*ARGSUSED*/ 2347 static int 2348 filt_soread(struct knote *kn, long hint) 2349 { 2350 struct socket *so; 2351 2352 so = kn->kn_fp->f_data; 2353 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2354 2355 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 2356 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2357 kn->kn_flags |= EV_EOF; 2358 kn->kn_fflags = so->so_error; 2359 return (1); 2360 } else if (so->so_error) /* temporary udp error */ 2361 return (1); 2362 else if (kn->kn_sfflags & NOTE_LOWAT) 2363 return (kn->kn_data >= kn->kn_sdata); 2364 else 2365 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 2366 } 2367 2368 static void 2369 filt_sowdetach(struct knote *kn) 2370 { 2371 struct socket *so = kn->kn_fp->f_data; 2372 2373 SOCKBUF_LOCK(&so->so_snd); 2374 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1); 2375 if (knlist_empty(&so->so_snd.sb_sel.si_note)) 2376 so->so_snd.sb_flags &= ~SB_KNOTE; 2377 SOCKBUF_UNLOCK(&so->so_snd); 2378 } 2379 2380 /*ARGSUSED*/ 2381 static int 2382 filt_sowrite(struct knote *kn, long hint) 2383 { 2384 struct socket *so; 2385 2386 so = kn->kn_fp->f_data; 2387 SOCKBUF_LOCK_ASSERT(&so->so_snd); 2388 kn->kn_data = sbspace(&so->so_snd); 2389 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2390 kn->kn_flags |= EV_EOF; 2391 kn->kn_fflags = so->so_error; 2392 return (1); 2393 } else if (so->so_error) /* temporary udp error */ 2394 return (1); 2395 else if (((so->so_state & SS_ISCONNECTED) == 0) && 2396 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2397 return (0); 2398 else if (kn->kn_sfflags & NOTE_LOWAT) 2399 return (kn->kn_data >= kn->kn_sdata); 2400 else 2401 return (kn->kn_data >= so->so_snd.sb_lowat); 2402 } 2403 2404 /*ARGSUSED*/ 2405 static int 2406 filt_solisten(struct knote *kn, long hint) 2407 { 2408 struct socket *so = kn->kn_fp->f_data; 2409 2410 kn->kn_data = so->so_qlen; 2411 return (! TAILQ_EMPTY(&so->so_comp)); 2412 } 2413 2414 int 2415 socheckuid(struct socket *so, uid_t uid) 2416 { 2417 2418 if (so == NULL) 2419 return (EPERM); 2420 if (so->so_cred->cr_uid != uid) 2421 return (EPERM); 2422 return (0); 2423 } 2424 2425 static int 2426 somaxconn_sysctl(SYSCTL_HANDLER_ARGS) 2427 { 2428 int error; 2429 int val; 2430 2431 val = somaxconn; 2432 error = sysctl_handle_int(oidp, &val, sizeof(int), req); 2433 if (error || !req->newptr ) 2434 return (error); 2435 2436 if (val < 1 || val > USHRT_MAX) 2437 return (EINVAL); 2438 2439 somaxconn = val; 2440 return (0); 2441 } 2442