1 /* 2 * Copyright (c) 2004 The FreeBSD Foundation 3 * Copyright (c) 2004 Robert Watson 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_mac.h" 39 #include "opt_zero.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/fcntl.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/mac.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> 49 #include <sys/mutex.h> 50 #include <sys/domain.h> 51 #include <sys/file.h> /* for struct knote */ 52 #include <sys/kernel.h> 53 #include <sys/event.h> 54 #include <sys/poll.h> 55 #include <sys/proc.h> 56 #include <sys/protosw.h> 57 #include <sys/socket.h> 58 #include <sys/socketvar.h> 59 #include <sys/resourcevar.h> 60 #include <sys/signalvar.h> 61 #include <sys/sysctl.h> 62 #include <sys/uio.h> 63 #include <sys/jail.h> 64 65 #include <vm/uma.h> 66 67 68 static int soreceive_rcvoob(struct socket *so, struct uio *uio, 69 int flags); 70 71 #ifdef INET 72 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 73 #endif 74 75 static void filt_sordetach(struct knote *kn); 76 static int filt_soread(struct knote *kn, long hint); 77 static void filt_sowdetach(struct knote *kn); 78 static int filt_sowrite(struct knote *kn, long hint); 79 static int filt_solisten(struct knote *kn, long hint); 80 81 static struct filterops solisten_filtops = 82 { 1, NULL, filt_sordetach, filt_solisten }; 83 static struct filterops soread_filtops = 84 { 1, NULL, filt_sordetach, filt_soread }; 85 static struct filterops sowrite_filtops = 86 { 1, NULL, filt_sowdetach, filt_sowrite }; 87 88 uma_zone_t socket_zone; 89 so_gen_t so_gencnt; /* generation count for sockets */ 90 91 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 92 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 93 94 SYSCTL_DECL(_kern_ipc); 95 96 static int somaxconn = SOMAXCONN; 97 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 98 &somaxconn, 0, "Maximum pending socket connection queue size"); 99 static int numopensockets; 100 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 101 &numopensockets, 0, "Number of open sockets"); 102 #ifdef ZERO_COPY_SOCKETS 103 /* These aren't static because they're used in other files. */ 104 int so_zero_copy_send = 1; 105 int so_zero_copy_receive = 1; 106 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 107 "Zero copy controls"); 108 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 109 &so_zero_copy_receive, 0, "Enable zero copy receive"); 110 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 111 &so_zero_copy_send, 0, "Enable zero copy send"); 112 #endif /* ZERO_COPY_SOCKETS */ 113 114 /* 115 * accept_mtx locks down per-socket fields relating to accept queues. See 116 * socketvar.h for an annotation of the protected fields of struct socket. 117 */ 118 struct mtx accept_mtx; 119 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 120 121 /* 122 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 123 * so_gencnt field. 124 * 125 * XXXRW: These variables might be better manipulated using atomic operations 126 * for improved efficiency. 127 */ 128 static struct mtx so_global_mtx; 129 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 130 131 /* 132 * Socket operation routines. 133 * These routines are called by the routines in 134 * sys_socket.c or from a system process, and 135 * implement the semantics of socket operations by 136 * switching out to the protocol specific routines. 137 */ 138 139 /* 140 * Get a socket structure from our zone, and initialize it. 141 * Note that it would probably be better to allocate socket 142 * and PCB at the same time, but I'm not convinced that all 143 * the protocols can be easily modified to do this. 144 * 145 * soalloc() returns a socket with a ref count of 0. 146 */ 147 struct socket * 148 soalloc(int mflags) 149 { 150 struct socket *so; 151 #ifdef MAC 152 int error; 153 #endif 154 155 so = uma_zalloc(socket_zone, mflags | M_ZERO); 156 if (so != NULL) { 157 #ifdef MAC 158 error = mac_init_socket(so, mflags); 159 if (error != 0) { 160 uma_zfree(socket_zone, so); 161 so = NULL; 162 return so; 163 } 164 #endif 165 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 166 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 167 /* sx_init(&so->so_sxlock, "socket sxlock"); */ 168 TAILQ_INIT(&so->so_aiojobq); 169 mtx_lock(&so_global_mtx); 170 so->so_gencnt = ++so_gencnt; 171 ++numopensockets; 172 mtx_unlock(&so_global_mtx); 173 } 174 return so; 175 } 176 177 /* 178 * socreate returns a socket with a ref count of 1. The socket should be 179 * closed with soclose(). 180 */ 181 int 182 socreate(dom, aso, type, proto, cred, td) 183 int dom; 184 struct socket **aso; 185 int type; 186 int proto; 187 struct ucred *cred; 188 struct thread *td; 189 { 190 struct protosw *prp; 191 struct socket *so; 192 int error; 193 194 if (proto) 195 prp = pffindproto(dom, proto, type); 196 else 197 prp = pffindtype(dom, type); 198 199 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL) 200 return (EPROTONOSUPPORT); 201 202 if (jailed(cred) && jail_socket_unixiproute_only && 203 prp->pr_domain->dom_family != PF_LOCAL && 204 prp->pr_domain->dom_family != PF_INET && 205 prp->pr_domain->dom_family != PF_ROUTE) { 206 return (EPROTONOSUPPORT); 207 } 208 209 if (prp->pr_type != type) 210 return (EPROTOTYPE); 211 so = soalloc(M_WAITOK); 212 if (so == NULL) 213 return (ENOBUFS); 214 215 TAILQ_INIT(&so->so_incomp); 216 TAILQ_INIT(&so->so_comp); 217 so->so_type = type; 218 so->so_cred = crhold(cred); 219 so->so_proto = prp; 220 #ifdef MAC 221 mac_create_socket(cred, so); 222 #endif 223 SOCK_LOCK(so); 224 soref(so); 225 SOCK_UNLOCK(so); 226 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 227 if (error) { 228 SOCK_LOCK(so); 229 so->so_state |= SS_NOFDREF; 230 sorele(so); 231 return (error); 232 } 233 *aso = so; 234 return (0); 235 } 236 237 int 238 sobind(so, nam, td) 239 struct socket *so; 240 struct sockaddr *nam; 241 struct thread *td; 242 { 243 244 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td)); 245 } 246 247 void 248 sodealloc(struct socket *so) 249 { 250 251 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 252 mtx_lock(&so_global_mtx); 253 so->so_gencnt = ++so_gencnt; 254 mtx_unlock(&so_global_mtx); 255 if (so->so_rcv.sb_hiwat) 256 (void)chgsbsize(so->so_cred->cr_uidinfo, 257 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 258 if (so->so_snd.sb_hiwat) 259 (void)chgsbsize(so->so_cred->cr_uidinfo, 260 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 261 #ifdef INET 262 /* remove acccept filter if one is present. */ 263 if (so->so_accf != NULL) 264 do_setopt_accept_filter(so, NULL); 265 #endif 266 #ifdef MAC 267 mac_destroy_socket(so); 268 #endif 269 crfree(so->so_cred); 270 SOCKBUF_LOCK_DESTROY(&so->so_snd); 271 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 272 /* sx_destroy(&so->so_sxlock); */ 273 uma_zfree(socket_zone, so); 274 /* 275 * XXXRW: Seems like a shame to grab the mutex again down here, but 276 * we don't want to decrement the socket count until after we free 277 * the socket, and we can't increment the gencnt on the socket after 278 * we free, it so... 279 */ 280 mtx_lock(&so_global_mtx); 281 --numopensockets; 282 mtx_unlock(&so_global_mtx); 283 } 284 285 int 286 solisten(so, backlog, td) 287 struct socket *so; 288 int backlog; 289 struct thread *td; 290 { 291 int error; 292 293 /* 294 * XXXRW: Ordering issue here -- perhaps we need to set 295 * SO_ACCEPTCONN before the call to pru_listen()? 296 * XXXRW: General atomic test-and-set concerns here also. 297 */ 298 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 299 SS_ISDISCONNECTING)) 300 return (EINVAL); 301 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td); 302 if (error) 303 return (error); 304 ACCEPT_LOCK(); 305 if (TAILQ_EMPTY(&so->so_comp)) { 306 SOCK_LOCK(so); 307 so->so_options |= SO_ACCEPTCONN; 308 SOCK_UNLOCK(so); 309 } 310 if (backlog < 0 || backlog > somaxconn) 311 backlog = somaxconn; 312 so->so_qlimit = backlog; 313 ACCEPT_UNLOCK(); 314 return (0); 315 } 316 317 void 318 sofree(so) 319 struct socket *so; 320 { 321 struct socket *head; 322 323 KASSERT(so->so_count == 0, ("socket %p so_count not 0", so)); 324 SOCK_LOCK_ASSERT(so); 325 326 if (so->so_pcb != NULL || (so->so_state & SS_NOFDREF) == 0) { 327 SOCK_UNLOCK(so); 328 return; 329 } 330 331 SOCK_UNLOCK(so); 332 ACCEPT_LOCK(); 333 head = so->so_head; 334 if (head != NULL) { 335 KASSERT((so->so_qstate & SQ_COMP) != 0 || 336 (so->so_qstate & SQ_INCOMP) != 0, 337 ("sofree: so_head != NULL, but neither SQ_COMP nor " 338 "SQ_INCOMP")); 339 KASSERT((so->so_qstate & SQ_COMP) == 0 || 340 (so->so_qstate & SQ_INCOMP) == 0, 341 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 342 /* 343 * accept(2) is responsible draining the completed 344 * connection queue and freeing those sockets, so 345 * we just return here if this socket is currently 346 * on the completed connection queue. Otherwise, 347 * accept(2) may hang after select(2) has indicating 348 * that a listening socket was ready. If it's an 349 * incomplete connection, we remove it from the queue 350 * and free it; otherwise, it won't be released until 351 * the listening socket is closed. 352 */ 353 if ((so->so_qstate & SQ_COMP) != 0) { 354 ACCEPT_UNLOCK(); 355 return; 356 } 357 TAILQ_REMOVE(&head->so_incomp, so, so_list); 358 head->so_incqlen--; 359 so->so_qstate &= ~SQ_INCOMP; 360 so->so_head = NULL; 361 } 362 KASSERT((so->so_qstate & SQ_COMP) == 0 && 363 (so->so_qstate & SQ_INCOMP) == 0, 364 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 365 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 366 ACCEPT_UNLOCK(); 367 SOCKBUF_LOCK(&so->so_snd); 368 so->so_snd.sb_flags |= SB_NOINTR; 369 (void)sblock(&so->so_snd, M_WAITOK); 370 /* 371 * socantsendmore_locked() drops the socket buffer mutex so that it 372 * can safely perform wakeups. Re-acquire the mutex before 373 * continuing. 374 */ 375 socantsendmore_locked(so); 376 SOCKBUF_LOCK(&so->so_snd); 377 sbunlock(&so->so_snd); 378 sbrelease_locked(&so->so_snd, so); 379 SOCKBUF_UNLOCK(&so->so_snd); 380 sorflush(so); 381 sodealloc(so); 382 } 383 384 /* 385 * Close a socket on last file table reference removal. 386 * Initiate disconnect if connected. 387 * Free socket when disconnect complete. 388 * 389 * This function will sorele() the socket. Note that soclose() may be 390 * called prior to the ref count reaching zero. The actual socket 391 * structure will not be freed until the ref count reaches zero. 392 */ 393 int 394 soclose(so) 395 struct socket *so; 396 { 397 int error = 0; 398 399 funsetown(&so->so_sigio); 400 if (so->so_options & SO_ACCEPTCONN) { 401 struct socket *sp; 402 ACCEPT_LOCK(); 403 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 404 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 405 so->so_incqlen--; 406 sp->so_qstate &= ~SQ_INCOMP; 407 sp->so_head = NULL; 408 ACCEPT_UNLOCK(); 409 (void) soabort(sp); 410 ACCEPT_LOCK(); 411 } 412 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 413 TAILQ_REMOVE(&so->so_comp, sp, so_list); 414 so->so_qlen--; 415 sp->so_qstate &= ~SQ_COMP; 416 sp->so_head = NULL; 417 ACCEPT_UNLOCK(); 418 (void) soabort(sp); 419 ACCEPT_LOCK(); 420 } 421 ACCEPT_UNLOCK(); 422 } 423 if (so->so_pcb == NULL) 424 goto discard; 425 if (so->so_state & SS_ISCONNECTED) { 426 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 427 error = sodisconnect(so); 428 if (error) 429 goto drop; 430 } 431 if (so->so_options & SO_LINGER) { 432 if ((so->so_state & SS_ISDISCONNECTING) && 433 (so->so_state & SS_NBIO)) 434 goto drop; 435 while (so->so_state & SS_ISCONNECTED) { 436 error = tsleep(&so->so_timeo, 437 PSOCK | PCATCH, "soclos", so->so_linger * hz); 438 if (error) 439 break; 440 } 441 } 442 } 443 drop: 444 if (so->so_pcb != NULL) { 445 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so); 446 if (error == 0) 447 error = error2; 448 } 449 discard: 450 SOCK_LOCK(so); 451 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 452 so->so_state |= SS_NOFDREF; 453 sorele(so); 454 return (error); 455 } 456 457 /* 458 * soabort() must not be called with any socket locks held, as it calls 459 * into the protocol, which will call back into the socket code causing 460 * it to acquire additional socket locks that may cause recursion or lock 461 * order reversals. 462 */ 463 int 464 soabort(so) 465 struct socket *so; 466 { 467 int error; 468 469 error = (*so->so_proto->pr_usrreqs->pru_abort)(so); 470 if (error) { 471 SOCK_LOCK(so); 472 sotryfree(so); /* note: does not decrement the ref count */ 473 return error; 474 } 475 return (0); 476 } 477 478 int 479 soaccept(so, nam) 480 struct socket *so; 481 struct sockaddr **nam; 482 { 483 int error; 484 485 SOCK_LOCK(so); 486 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 487 so->so_state &= ~SS_NOFDREF; 488 SOCK_UNLOCK(so); 489 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 490 return (error); 491 } 492 493 int 494 soconnect(so, nam, td) 495 struct socket *so; 496 struct sockaddr *nam; 497 struct thread *td; 498 { 499 int error; 500 501 if (so->so_options & SO_ACCEPTCONN) 502 return (EOPNOTSUPP); 503 /* 504 * If protocol is connection-based, can only connect once. 505 * Otherwise, if connected, try to disconnect first. 506 * This allows user to disconnect by connecting to, e.g., 507 * a null address. 508 */ 509 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 510 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 511 (error = sodisconnect(so)))) 512 error = EISCONN; 513 else 514 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 515 return (error); 516 } 517 518 int 519 soconnect2(so1, so2) 520 struct socket *so1; 521 struct socket *so2; 522 { 523 524 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2)); 525 } 526 527 int 528 sodisconnect(so) 529 struct socket *so; 530 { 531 int error; 532 533 if ((so->so_state & SS_ISCONNECTED) == 0) 534 return (ENOTCONN); 535 if (so->so_state & SS_ISDISCONNECTING) 536 return (EALREADY); 537 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 538 return (error); 539 } 540 541 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 542 /* 543 * Send on a socket. 544 * If send must go all at once and message is larger than 545 * send buffering, then hard error. 546 * Lock against other senders. 547 * If must go all at once and not enough room now, then 548 * inform user that this would block and do nothing. 549 * Otherwise, if nonblocking, send as much as possible. 550 * The data to be sent is described by "uio" if nonzero, 551 * otherwise by the mbuf chain "top" (which must be null 552 * if uio is not). Data provided in mbuf chain must be small 553 * enough to send all at once. 554 * 555 * Returns nonzero on error, timeout or signal; callers 556 * must check for short counts if EINTR/ERESTART are returned. 557 * Data and control buffers are freed on return. 558 */ 559 560 #ifdef ZERO_COPY_SOCKETS 561 struct so_zerocopy_stats{ 562 int size_ok; 563 int align_ok; 564 int found_ifp; 565 }; 566 struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 567 #include <netinet/in.h> 568 #include <net/route.h> 569 #include <netinet/in_pcb.h> 570 #include <vm/vm.h> 571 #include <vm/vm_page.h> 572 #include <vm/vm_object.h> 573 #endif /*ZERO_COPY_SOCKETS*/ 574 575 int 576 sosend(so, addr, uio, top, control, flags, td) 577 struct socket *so; 578 struct sockaddr *addr; 579 struct uio *uio; 580 struct mbuf *top; 581 struct mbuf *control; 582 int flags; 583 struct thread *td; 584 { 585 struct mbuf **mp; 586 struct mbuf *m; 587 long space, len = 0, resid; 588 int clen = 0, error, dontroute; 589 int atomic = sosendallatonce(so) || top; 590 #ifdef ZERO_COPY_SOCKETS 591 int cow_send; 592 #endif /* ZERO_COPY_SOCKETS */ 593 594 if (uio != NULL) 595 resid = uio->uio_resid; 596 else 597 resid = top->m_pkthdr.len; 598 /* 599 * In theory resid should be unsigned. 600 * However, space must be signed, as it might be less than 0 601 * if we over-committed, and we must use a signed comparison 602 * of space and resid. On the other hand, a negative resid 603 * causes us to loop sending 0-length segments to the protocol. 604 * 605 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 606 * type sockets since that's an error. 607 */ 608 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 609 error = EINVAL; 610 goto out; 611 } 612 613 dontroute = 614 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 615 (so->so_proto->pr_flags & PR_ATOMIC); 616 if (td != NULL) 617 td->td_proc->p_stats->p_ru.ru_msgsnd++; 618 if (control != NULL) 619 clen = control->m_len; 620 #define snderr(errno) { error = (errno); goto release; } 621 622 SOCKBUF_LOCK(&so->so_snd); 623 restart: 624 SOCKBUF_LOCK_ASSERT(&so->so_snd); 625 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 626 if (error) 627 goto out_locked; 628 do { 629 SOCKBUF_LOCK_ASSERT(&so->so_snd); 630 if (so->so_snd.sb_state & SBS_CANTSENDMORE) 631 snderr(EPIPE); 632 if (so->so_error) { 633 error = so->so_error; 634 so->so_error = 0; 635 goto release; 636 } 637 if ((so->so_state & SS_ISCONNECTED) == 0) { 638 /* 639 * `sendto' and `sendmsg' is allowed on a connection- 640 * based socket if it supports implied connect. 641 * Return ENOTCONN if not connected and no address is 642 * supplied. 643 */ 644 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 645 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 646 if ((so->so_state & SS_ISCONFIRMING) == 0 && 647 !(resid == 0 && clen != 0)) 648 snderr(ENOTCONN); 649 } else if (addr == NULL) 650 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 651 ENOTCONN : EDESTADDRREQ); 652 } 653 space = sbspace(&so->so_snd); 654 if (flags & MSG_OOB) 655 space += 1024; 656 if ((atomic && resid > so->so_snd.sb_hiwat) || 657 clen > so->so_snd.sb_hiwat) 658 snderr(EMSGSIZE); 659 if (space < resid + clen && 660 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 661 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) 662 snderr(EWOULDBLOCK); 663 sbunlock(&so->so_snd); 664 error = sbwait(&so->so_snd); 665 if (error) 666 goto out_locked; 667 goto restart; 668 } 669 SOCKBUF_UNLOCK(&so->so_snd); 670 mp = ⊤ 671 space -= clen; 672 do { 673 if (uio == NULL) { 674 /* 675 * Data is prepackaged in "top". 676 */ 677 resid = 0; 678 if (flags & MSG_EOR) 679 top->m_flags |= M_EOR; 680 } else do { 681 #ifdef ZERO_COPY_SOCKETS 682 cow_send = 0; 683 #endif /* ZERO_COPY_SOCKETS */ 684 if (resid >= MINCLSIZE) { 685 #ifdef ZERO_COPY_SOCKETS 686 if (top == NULL) { 687 MGETHDR(m, M_TRYWAIT, MT_DATA); 688 if (m == NULL) { 689 error = ENOBUFS; 690 SOCKBUF_LOCK(&so->so_snd); 691 goto release; 692 } 693 m->m_pkthdr.len = 0; 694 m->m_pkthdr.rcvif = (struct ifnet *)0; 695 } else { 696 MGET(m, M_TRYWAIT, MT_DATA); 697 if (m == NULL) { 698 error = ENOBUFS; 699 SOCKBUF_LOCK(&so->so_snd); 700 goto release; 701 } 702 } 703 if (so_zero_copy_send && 704 resid>=PAGE_SIZE && 705 space>=PAGE_SIZE && 706 uio->uio_iov->iov_len>=PAGE_SIZE) { 707 so_zerocp_stats.size_ok++; 708 if (!((vm_offset_t) 709 uio->uio_iov->iov_base & PAGE_MASK)){ 710 so_zerocp_stats.align_ok++; 711 cow_send = socow_setup(m, uio); 712 } 713 } 714 if (!cow_send) { 715 MCLGET(m, M_TRYWAIT); 716 if ((m->m_flags & M_EXT) == 0) { 717 m_free(m); 718 m = NULL; 719 } else { 720 len = min(min(MCLBYTES, resid), space); 721 } 722 } else 723 len = PAGE_SIZE; 724 #else /* ZERO_COPY_SOCKETS */ 725 if (top == NULL) { 726 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 727 m->m_pkthdr.len = 0; 728 m->m_pkthdr.rcvif = (struct ifnet *)0; 729 } else 730 m = m_getcl(M_TRYWAIT, MT_DATA, 0); 731 len = min(min(MCLBYTES, resid), space); 732 #endif /* ZERO_COPY_SOCKETS */ 733 } else { 734 if (top == NULL) { 735 m = m_gethdr(M_TRYWAIT, MT_DATA); 736 m->m_pkthdr.len = 0; 737 m->m_pkthdr.rcvif = (struct ifnet *)0; 738 739 len = min(min(MHLEN, resid), space); 740 /* 741 * For datagram protocols, leave room 742 * for protocol headers in first mbuf. 743 */ 744 if (atomic && m && len < MHLEN) 745 MH_ALIGN(m, len); 746 } else { 747 m = m_get(M_TRYWAIT, MT_DATA); 748 len = min(min(MLEN, resid), space); 749 } 750 } 751 if (m == NULL) { 752 error = ENOBUFS; 753 SOCKBUF_LOCK(&so->so_snd); 754 goto release; 755 } 756 757 space -= len; 758 #ifdef ZERO_COPY_SOCKETS 759 if (cow_send) 760 error = 0; 761 else 762 #endif /* ZERO_COPY_SOCKETS */ 763 error = uiomove(mtod(m, void *), (int)len, uio); 764 resid = uio->uio_resid; 765 m->m_len = len; 766 *mp = m; 767 top->m_pkthdr.len += len; 768 if (error) { 769 SOCKBUF_LOCK(&so->so_snd); 770 goto release; 771 } 772 mp = &m->m_next; 773 if (resid <= 0) { 774 if (flags & MSG_EOR) 775 top->m_flags |= M_EOR; 776 break; 777 } 778 } while (space > 0 && atomic); 779 if (dontroute) { 780 SOCK_LOCK(so); 781 so->so_options |= SO_DONTROUTE; 782 SOCK_UNLOCK(so); 783 } 784 /* 785 * XXX all the SBS_CANTSENDMORE checks previously 786 * done could be out of date. We could have recieved 787 * a reset packet in an interrupt or maybe we slept 788 * while doing page faults in uiomove() etc. We could 789 * probably recheck again inside the splnet() protection 790 * here, but there are probably other places that this 791 * also happens. We must rethink this. 792 */ 793 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 794 (flags & MSG_OOB) ? PRUS_OOB : 795 /* 796 * If the user set MSG_EOF, the protocol 797 * understands this flag and nothing left to 798 * send then use PRU_SEND_EOF instead of PRU_SEND. 799 */ 800 ((flags & MSG_EOF) && 801 (so->so_proto->pr_flags & PR_IMPLOPCL) && 802 (resid <= 0)) ? 803 PRUS_EOF : 804 /* If there is more to send set PRUS_MORETOCOME */ 805 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 806 top, addr, control, td); 807 if (dontroute) { 808 SOCK_LOCK(so); 809 so->so_options &= ~SO_DONTROUTE; 810 SOCK_UNLOCK(so); 811 } 812 clen = 0; 813 control = NULL; 814 top = NULL; 815 mp = ⊤ 816 if (error) { 817 SOCKBUF_LOCK(&so->so_snd); 818 goto release; 819 } 820 } while (resid && space > 0); 821 SOCKBUF_LOCK(&so->so_snd); 822 } while (resid); 823 824 release: 825 SOCKBUF_LOCK_ASSERT(&so->so_snd); 826 sbunlock(&so->so_snd); 827 out_locked: 828 SOCKBUF_LOCK_ASSERT(&so->so_snd); 829 SOCKBUF_UNLOCK(&so->so_snd); 830 out: 831 if (top != NULL) 832 m_freem(top); 833 if (control != NULL) 834 m_freem(control); 835 return (error); 836 } 837 838 /* 839 * The part of soreceive() that implements reading non-inline out-of-band 840 * data from a socket. For more complete comments, see soreceive(), from 841 * which this code originated. 842 * 843 * XXXRW: Note that soreceive_rcvoob(), unlike the remainder of soreiceve(), 844 * is unable to return an mbuf chain to the caller. 845 */ 846 static int 847 soreceive_rcvoob(so, uio, flags) 848 struct socket *so; 849 struct uio *uio; 850 int flags; 851 { 852 struct protosw *pr = so->so_proto; 853 struct mbuf *m; 854 int error; 855 856 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 857 858 m = m_get(M_TRYWAIT, MT_DATA); 859 if (m == NULL) 860 return (ENOBUFS); 861 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 862 if (error) 863 goto bad; 864 do { 865 #ifdef ZERO_COPY_SOCKETS 866 if (so_zero_copy_receive) { 867 vm_page_t pg; 868 int disposable; 869 870 if ((m->m_flags & M_EXT) 871 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 872 disposable = 1; 873 else 874 disposable = 0; 875 876 pg = PHYS_TO_VM_PAGE(vtophys(mtod(m, caddr_t))); 877 if (uio->uio_offset == -1) 878 uio->uio_offset =IDX_TO_OFF(pg->pindex); 879 880 error = uiomoveco(mtod(m, void *), 881 min(uio->uio_resid, m->m_len), 882 uio, pg->object, 883 disposable); 884 } else 885 #endif /* ZERO_COPY_SOCKETS */ 886 error = uiomove(mtod(m, void *), 887 (int) min(uio->uio_resid, m->m_len), uio); 888 m = m_free(m); 889 } while (uio->uio_resid && error == 0 && m); 890 bad: 891 if (m != NULL) 892 m_freem(m); 893 return (error); 894 } 895 896 /* 897 * Following replacement or removal of the first mbuf on the first mbuf chain 898 * of a socket buffer, push necessary state changes back into the socket 899 * buffer so that other consumers see the values consistently. 'nextrecord' 900 * is the callers locally stored value of the original value of 901 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 902 * NOTE: 'nextrecord' may be NULL. 903 */ 904 static __inline void 905 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 906 { 907 908 SOCKBUF_LOCK_ASSERT(sb); 909 /* 910 * First, update for the new value of nextrecord. If necessary, make 911 * it the first record. 912 */ 913 if (sb->sb_mb != NULL) 914 sb->sb_mb->m_nextpkt = nextrecord; 915 else 916 sb->sb_mb = nextrecord; 917 918 /* 919 * Now update any dependent socket buffer fields to reflect the new 920 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 921 * addition of a second clause that takes care of the case where 922 * sb_mb has been updated, but remains the last record. 923 */ 924 if (sb->sb_mb == NULL) { 925 sb->sb_mbtail = NULL; 926 sb->sb_lastrecord = NULL; 927 } else if (sb->sb_mb->m_nextpkt == NULL) 928 sb->sb_lastrecord = sb->sb_mb; 929 } 930 931 932 /* 933 * Implement receive operations on a socket. 934 * We depend on the way that records are added to the sockbuf 935 * by sbappend*. In particular, each record (mbufs linked through m_next) 936 * must begin with an address if the protocol so specifies, 937 * followed by an optional mbuf or mbufs containing ancillary data, 938 * and then zero or more mbufs of data. 939 * In order to avoid blocking network interrupts for the entire time here, 940 * we splx() while doing the actual copy to user space. 941 * Although the sockbuf is locked, new data may still be appended, 942 * and thus we must maintain consistency of the sockbuf during that time. 943 * 944 * The caller may receive the data as a single mbuf chain by supplying 945 * an mbuf **mp0 for use in returning the chain. The uio is then used 946 * only for the count in uio_resid. 947 */ 948 int 949 soreceive(so, psa, uio, mp0, controlp, flagsp) 950 struct socket *so; 951 struct sockaddr **psa; 952 struct uio *uio; 953 struct mbuf **mp0; 954 struct mbuf **controlp; 955 int *flagsp; 956 { 957 struct mbuf *m, **mp; 958 int flags, len, error, offset; 959 struct protosw *pr = so->so_proto; 960 struct mbuf *nextrecord; 961 int moff, type = 0; 962 int orig_resid = uio->uio_resid; 963 964 mp = mp0; 965 if (psa != NULL) 966 *psa = NULL; 967 if (controlp != NULL) 968 *controlp = NULL; 969 if (flagsp != NULL) 970 flags = *flagsp &~ MSG_EOR; 971 else 972 flags = 0; 973 if (flags & MSG_OOB) 974 return (soreceive_rcvoob(so, uio, flags)); 975 if (mp != NULL) 976 *mp = NULL; 977 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) 978 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 979 980 SOCKBUF_LOCK(&so->so_rcv); 981 restart: 982 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 983 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 984 if (error) 985 goto out; 986 987 m = so->so_rcv.sb_mb; 988 /* 989 * If we have less data than requested, block awaiting more 990 * (subject to any timeout) if: 991 * 1. the current count is less than the low water mark, or 992 * 2. MSG_WAITALL is set, and it is possible to do the entire 993 * receive operation at once if we block (resid <= hiwat). 994 * 3. MSG_DONTWAIT is not set 995 * If MSG_WAITALL is set but resid is larger than the receive buffer, 996 * we have to do the receive in sections, and thus risk returning 997 * a short count if a timeout or signal occurs after we start. 998 */ 999 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1000 so->so_rcv.sb_cc < uio->uio_resid) && 1001 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1002 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1003 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1004 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1005 ("receive: m == %p so->so_rcv.sb_cc == %u", 1006 m, so->so_rcv.sb_cc)); 1007 if (so->so_error) { 1008 if (m != NULL) 1009 goto dontblock; 1010 error = so->so_error; 1011 if ((flags & MSG_PEEK) == 0) 1012 so->so_error = 0; 1013 goto release; 1014 } 1015 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1016 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1017 if (m) 1018 goto dontblock; 1019 else 1020 goto release; 1021 } 1022 for (; m != NULL; m = m->m_next) 1023 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1024 m = so->so_rcv.sb_mb; 1025 goto dontblock; 1026 } 1027 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1028 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1029 error = ENOTCONN; 1030 goto release; 1031 } 1032 if (uio->uio_resid == 0) 1033 goto release; 1034 if ((so->so_state & SS_NBIO) || 1035 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1036 error = EWOULDBLOCK; 1037 goto release; 1038 } 1039 SBLASTRECORDCHK(&so->so_rcv); 1040 SBLASTMBUFCHK(&so->so_rcv); 1041 sbunlock(&so->so_rcv); 1042 error = sbwait(&so->so_rcv); 1043 if (error) 1044 goto out; 1045 goto restart; 1046 } 1047 dontblock: 1048 /* 1049 * From this point onward, we maintain 'nextrecord' as a cache of the 1050 * pointer to the next record in the socket buffer. We must keep the 1051 * various socket buffer pointers and local stack versions of the 1052 * pointers in sync, pushing out modifications before dropping the 1053 * socket buffer mutex, and re-reading them when picking it up. 1054 * 1055 * Otherwise, we will race with the network stack appending new data 1056 * or records onto the socket buffer by using inconsistent/stale 1057 * versions of the field, possibly resulting in socket buffer 1058 * corruption. 1059 * 1060 * By holding the high-level sblock(), we prevent simultaneous 1061 * readers from pulling off the front of the socket buffer. 1062 */ 1063 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1064 if (uio->uio_td) 1065 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++; 1066 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1067 SBLASTRECORDCHK(&so->so_rcv); 1068 SBLASTMBUFCHK(&so->so_rcv); 1069 nextrecord = m->m_nextpkt; 1070 if (pr->pr_flags & PR_ADDR) { 1071 KASSERT(m->m_type == MT_SONAME, 1072 ("m->m_type == %d", m->m_type)); 1073 orig_resid = 0; 1074 if (psa != NULL) 1075 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1076 M_NOWAIT); 1077 if (flags & MSG_PEEK) { 1078 m = m->m_next; 1079 } else { 1080 sbfree(&so->so_rcv, m); 1081 so->so_rcv.sb_mb = m_free(m); 1082 m = so->so_rcv.sb_mb; 1083 sockbuf_pushsync(&so->so_rcv, nextrecord); 1084 } 1085 } 1086 1087 /* 1088 * Process one or more MT_CONTROL mbufs present before any data mbufs 1089 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1090 * just copy the data; if !MSG_PEEK, we call into the protocol to 1091 * perform externalization. 1092 */ 1093 if (m != NULL && m->m_type == MT_CONTROL) { 1094 struct mbuf *cm = NULL; 1095 struct mbuf **cme = &cm; 1096 1097 do { 1098 if (flags & MSG_PEEK) { 1099 if (controlp != NULL) { 1100 *controlp = m_copy(m, 0, m->m_len); 1101 controlp = &(*controlp)->m_next; 1102 } 1103 m = m->m_next; 1104 } else { 1105 sbfree(&so->so_rcv, m); 1106 so->so_rcv.sb_mb = m->m_next; 1107 m->m_next = NULL; 1108 if (controlp) { 1109 /* 1110 * Collect mbufs for processing below. 1111 */ 1112 *cme = m; 1113 cme = &(*cme)->m_next; 1114 } else 1115 m_free(m); 1116 m = so->so_rcv.sb_mb; 1117 } 1118 } while (m != NULL && m->m_type == MT_CONTROL); 1119 if ((flags & MSG_PEEK) == 0) 1120 sockbuf_pushsync(&so->so_rcv, nextrecord); 1121 if (cm != NULL) { 1122 if (pr->pr_domain->dom_externalize != NULL) { 1123 SOCKBUF_UNLOCK(&so->so_rcv); 1124 error = (*pr->pr_domain->dom_externalize) 1125 (cm, controlp); 1126 SOCKBUF_LOCK(&so->so_rcv); 1127 } else 1128 m_freem(cm); 1129 } 1130 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1131 orig_resid = 0; 1132 } 1133 if (m != NULL) { 1134 if ((flags & MSG_PEEK) == 0) { 1135 KASSERT(m->m_nextpkt == nextrecord, 1136 ("soreceive: post-control, nextrecord !sync")); 1137 if (nextrecord == NULL) { 1138 KASSERT(so->so_rcv.sb_mb == m, 1139 ("soreceive: post-control, sb_mb!=m")); 1140 KASSERT(so->so_rcv.sb_lastrecord == m, 1141 ("soreceive: post-control, lastrecord!=m")); 1142 } 1143 } 1144 type = m->m_type; 1145 if (type == MT_OOBDATA) 1146 flags |= MSG_OOB; 1147 } else { 1148 if ((flags & MSG_PEEK) == 0) { 1149 KASSERT(so->so_rcv.sb_mb == nextrecord, 1150 ("soreceive: sb_mb != nextrecord")); 1151 if (so->so_rcv.sb_mb == NULL) { 1152 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1153 ("soreceive: sb_lastercord != NULL")); 1154 } 1155 } 1156 } 1157 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1158 SBLASTRECORDCHK(&so->so_rcv); 1159 SBLASTMBUFCHK(&so->so_rcv); 1160 1161 /* 1162 * Now continue to read any data mbufs off of the head of the socket 1163 * buffer until the read request is satisfied. Note that 'type' is 1164 * used to store the type of any mbuf reads that have happened so far 1165 * such that soreceive() can stop reading if the type changes, which 1166 * causes soreceive() to return only one of regular data and inline 1167 * out-of-band data in a single socket receive operation. 1168 */ 1169 moff = 0; 1170 offset = 0; 1171 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1172 /* 1173 * If the type of mbuf has changed since the last mbuf 1174 * examined ('type'), end the receive operation. 1175 */ 1176 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1177 if (m->m_type == MT_OOBDATA) { 1178 if (type != MT_OOBDATA) 1179 break; 1180 } else if (type == MT_OOBDATA) 1181 break; 1182 else 1183 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1184 ("m->m_type == %d", m->m_type)); 1185 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1186 len = uio->uio_resid; 1187 if (so->so_oobmark && len > so->so_oobmark - offset) 1188 len = so->so_oobmark - offset; 1189 if (len > m->m_len - moff) 1190 len = m->m_len - moff; 1191 /* 1192 * If mp is set, just pass back the mbufs. 1193 * Otherwise copy them out via the uio, then free. 1194 * Sockbuf must be consistent here (points to current mbuf, 1195 * it points to next record) when we drop priority; 1196 * we must note any additions to the sockbuf when we 1197 * block interrupts again. 1198 */ 1199 if (mp == NULL) { 1200 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1201 SBLASTRECORDCHK(&so->so_rcv); 1202 SBLASTMBUFCHK(&so->so_rcv); 1203 SOCKBUF_UNLOCK(&so->so_rcv); 1204 #ifdef ZERO_COPY_SOCKETS 1205 if (so_zero_copy_receive) { 1206 vm_page_t pg; 1207 int disposable; 1208 1209 if ((m->m_flags & M_EXT) 1210 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1211 disposable = 1; 1212 else 1213 disposable = 0; 1214 1215 pg = PHYS_TO_VM_PAGE(vtophys(mtod(m, caddr_t) + 1216 moff)); 1217 1218 if (uio->uio_offset == -1) 1219 uio->uio_offset =IDX_TO_OFF(pg->pindex); 1220 1221 error = uiomoveco(mtod(m, char *) + moff, 1222 (int)len, uio,pg->object, 1223 disposable); 1224 } else 1225 #endif /* ZERO_COPY_SOCKETS */ 1226 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1227 SOCKBUF_LOCK(&so->so_rcv); 1228 if (error) 1229 goto release; 1230 } else 1231 uio->uio_resid -= len; 1232 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1233 if (len == m->m_len - moff) { 1234 if (m->m_flags & M_EOR) 1235 flags |= MSG_EOR; 1236 if (flags & MSG_PEEK) { 1237 m = m->m_next; 1238 moff = 0; 1239 } else { 1240 nextrecord = m->m_nextpkt; 1241 sbfree(&so->so_rcv, m); 1242 if (mp != NULL) { 1243 *mp = m; 1244 mp = &m->m_next; 1245 so->so_rcv.sb_mb = m = m->m_next; 1246 *mp = NULL; 1247 } else { 1248 so->so_rcv.sb_mb = m_free(m); 1249 m = so->so_rcv.sb_mb; 1250 } 1251 if (m != NULL) { 1252 m->m_nextpkt = nextrecord; 1253 if (nextrecord == NULL) 1254 so->so_rcv.sb_lastrecord = m; 1255 } else { 1256 so->so_rcv.sb_mb = nextrecord; 1257 SB_EMPTY_FIXUP(&so->so_rcv); 1258 } 1259 SBLASTRECORDCHK(&so->so_rcv); 1260 SBLASTMBUFCHK(&so->so_rcv); 1261 } 1262 } else { 1263 if (flags & MSG_PEEK) 1264 moff += len; 1265 else { 1266 if (mp != NULL) { 1267 SOCKBUF_UNLOCK(&so->so_rcv); 1268 *mp = m_copym(m, 0, len, M_TRYWAIT); 1269 SOCKBUF_LOCK(&so->so_rcv); 1270 } 1271 m->m_data += len; 1272 m->m_len -= len; 1273 so->so_rcv.sb_cc -= len; 1274 } 1275 } 1276 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1277 if (so->so_oobmark) { 1278 if ((flags & MSG_PEEK) == 0) { 1279 so->so_oobmark -= len; 1280 if (so->so_oobmark == 0) { 1281 so->so_rcv.sb_state |= SBS_RCVATMARK; 1282 break; 1283 } 1284 } else { 1285 offset += len; 1286 if (offset == so->so_oobmark) 1287 break; 1288 } 1289 } 1290 if (flags & MSG_EOR) 1291 break; 1292 /* 1293 * If the MSG_WAITALL flag is set (for non-atomic socket), 1294 * we must not quit until "uio->uio_resid == 0" or an error 1295 * termination. If a signal/timeout occurs, return 1296 * with a short count but without error. 1297 * Keep sockbuf locked against other readers. 1298 */ 1299 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1300 !sosendallatonce(so) && nextrecord == NULL) { 1301 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1302 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1303 break; 1304 /* 1305 * Notify the protocol that some data has been 1306 * drained before blocking. 1307 */ 1308 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb != NULL) { 1309 SOCKBUF_UNLOCK(&so->so_rcv); 1310 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1311 SOCKBUF_LOCK(&so->so_rcv); 1312 } 1313 SBLASTRECORDCHK(&so->so_rcv); 1314 SBLASTMBUFCHK(&so->so_rcv); 1315 error = sbwait(&so->so_rcv); 1316 if (error) 1317 goto release; 1318 m = so->so_rcv.sb_mb; 1319 if (m != NULL) 1320 nextrecord = m->m_nextpkt; 1321 } 1322 } 1323 1324 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1325 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1326 flags |= MSG_TRUNC; 1327 if ((flags & MSG_PEEK) == 0) 1328 (void) sbdroprecord_locked(&so->so_rcv); 1329 } 1330 if ((flags & MSG_PEEK) == 0) { 1331 if (m == NULL) { 1332 /* 1333 * First part is an inline SB_EMPTY_FIXUP(). Second 1334 * part makes sure sb_lastrecord is up-to-date if 1335 * there is still data in the socket buffer. 1336 */ 1337 so->so_rcv.sb_mb = nextrecord; 1338 if (so->so_rcv.sb_mb == NULL) { 1339 so->so_rcv.sb_mbtail = NULL; 1340 so->so_rcv.sb_lastrecord = NULL; 1341 } else if (nextrecord->m_nextpkt == NULL) 1342 so->so_rcv.sb_lastrecord = nextrecord; 1343 } 1344 SBLASTRECORDCHK(&so->so_rcv); 1345 SBLASTMBUFCHK(&so->so_rcv); 1346 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) { 1347 SOCKBUF_UNLOCK(&so->so_rcv); 1348 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1349 SOCKBUF_LOCK(&so->so_rcv); 1350 } 1351 } 1352 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1353 if (orig_resid == uio->uio_resid && orig_resid && 1354 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1355 sbunlock(&so->so_rcv); 1356 goto restart; 1357 } 1358 1359 if (flagsp != NULL) 1360 *flagsp |= flags; 1361 release: 1362 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1363 sbunlock(&so->so_rcv); 1364 out: 1365 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1366 SOCKBUF_UNLOCK(&so->so_rcv); 1367 return (error); 1368 } 1369 1370 int 1371 soshutdown(so, how) 1372 struct socket *so; 1373 int how; 1374 { 1375 struct protosw *pr = so->so_proto; 1376 1377 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1378 return (EINVAL); 1379 1380 if (how != SHUT_WR) 1381 sorflush(so); 1382 if (how != SHUT_RD) 1383 return ((*pr->pr_usrreqs->pru_shutdown)(so)); 1384 return (0); 1385 } 1386 1387 void 1388 sorflush(so) 1389 struct socket *so; 1390 { 1391 struct sockbuf *sb = &so->so_rcv; 1392 struct protosw *pr = so->so_proto; 1393 struct sockbuf asb; 1394 1395 /* 1396 * XXXRW: This is quite ugly. The existing code made a copy of the 1397 * socket buffer, then zero'd the original to clear the buffer 1398 * fields. However, with mutexes in the socket buffer, this causes 1399 * problems. We only clear the zeroable bits of the original; 1400 * however, we have to initialize and destroy the mutex in the copy 1401 * so that dom_dispose() and sbrelease() can lock t as needed. 1402 */ 1403 SOCKBUF_LOCK(sb); 1404 sb->sb_flags |= SB_NOINTR; 1405 (void) sblock(sb, M_WAITOK); 1406 /* 1407 * socantrcvmore_locked() drops the socket buffer mutex so that it 1408 * can safely perform wakeups. Re-acquire the mutex before 1409 * continuing. 1410 */ 1411 socantrcvmore_locked(so); 1412 SOCKBUF_LOCK(sb); 1413 sbunlock(sb); 1414 /* 1415 * Invalidate/clear most of the sockbuf structure, but leave 1416 * selinfo and mutex data unchanged. 1417 */ 1418 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 1419 bcopy(&sb->sb_startzero, &asb.sb_startzero, 1420 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1421 bzero(&sb->sb_startzero, 1422 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 1423 SOCKBUF_UNLOCK(sb); 1424 1425 SOCKBUF_LOCK_INIT(&asb, "so_rcv"); 1426 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 1427 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 1428 sbrelease(&asb, so); 1429 SOCKBUF_LOCK_DESTROY(&asb); 1430 } 1431 1432 #ifdef INET 1433 static int 1434 do_setopt_accept_filter(so, sopt) 1435 struct socket *so; 1436 struct sockopt *sopt; 1437 { 1438 struct accept_filter_arg *afap = NULL; 1439 struct accept_filter *afp; 1440 struct so_accf *af = so->so_accf; 1441 int error = 0; 1442 1443 /* do not set/remove accept filters on non listen sockets */ 1444 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1445 error = EINVAL; 1446 goto out; 1447 } 1448 1449 /* removing the filter */ 1450 if (sopt == NULL) { 1451 if (af != NULL) { 1452 if (af->so_accept_filter != NULL && 1453 af->so_accept_filter->accf_destroy != NULL) { 1454 af->so_accept_filter->accf_destroy(so); 1455 } 1456 if (af->so_accept_filter_str != NULL) { 1457 FREE(af->so_accept_filter_str, M_ACCF); 1458 } 1459 FREE(af, M_ACCF); 1460 so->so_accf = NULL; 1461 } 1462 so->so_options &= ~SO_ACCEPTFILTER; 1463 return (0); 1464 } 1465 /* adding a filter */ 1466 /* must remove previous filter first */ 1467 if (af != NULL) { 1468 error = EINVAL; 1469 goto out; 1470 } 1471 /* don't put large objects on the kernel stack */ 1472 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK); 1473 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1474 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1475 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1476 if (error) 1477 goto out; 1478 afp = accept_filt_get(afap->af_name); 1479 if (afp == NULL) { 1480 error = ENOENT; 1481 goto out; 1482 } 1483 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1484 if (afp->accf_create != NULL) { 1485 if (afap->af_name[0] != '\0') { 1486 int len = strlen(afap->af_name) + 1; 1487 1488 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK); 1489 strcpy(af->so_accept_filter_str, afap->af_name); 1490 } 1491 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1492 if (af->so_accept_filter_arg == NULL) { 1493 FREE(af->so_accept_filter_str, M_ACCF); 1494 FREE(af, M_ACCF); 1495 so->so_accf = NULL; 1496 error = EINVAL; 1497 goto out; 1498 } 1499 } 1500 af->so_accept_filter = afp; 1501 so->so_accf = af; 1502 so->so_options |= SO_ACCEPTFILTER; 1503 out: 1504 if (afap != NULL) 1505 FREE(afap, M_TEMP); 1506 return (error); 1507 } 1508 #endif /* INET */ 1509 1510 /* 1511 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1512 * an additional variant to handle the case where the option value needs 1513 * to be some kind of integer, but not a specific size. 1514 * In addition to their use here, these functions are also called by the 1515 * protocol-level pr_ctloutput() routines. 1516 */ 1517 int 1518 sooptcopyin(sopt, buf, len, minlen) 1519 struct sockopt *sopt; 1520 void *buf; 1521 size_t len; 1522 size_t minlen; 1523 { 1524 size_t valsize; 1525 1526 /* 1527 * If the user gives us more than we wanted, we ignore it, 1528 * but if we don't get the minimum length the caller 1529 * wants, we return EINVAL. On success, sopt->sopt_valsize 1530 * is set to however much we actually retrieved. 1531 */ 1532 if ((valsize = sopt->sopt_valsize) < minlen) 1533 return EINVAL; 1534 if (valsize > len) 1535 sopt->sopt_valsize = valsize = len; 1536 1537 if (sopt->sopt_td != NULL) 1538 return (copyin(sopt->sopt_val, buf, valsize)); 1539 1540 bcopy(sopt->sopt_val, buf, valsize); 1541 return 0; 1542 } 1543 1544 /* 1545 * Kernel version of setsockopt(2)/ 1546 * XXX: optlen is size_t, not socklen_t 1547 */ 1548 int 1549 so_setsockopt(struct socket *so, int level, int optname, void *optval, 1550 size_t optlen) 1551 { 1552 struct sockopt sopt; 1553 1554 sopt.sopt_level = level; 1555 sopt.sopt_name = optname; 1556 sopt.sopt_dir = SOPT_SET; 1557 sopt.sopt_val = optval; 1558 sopt.sopt_valsize = optlen; 1559 sopt.sopt_td = NULL; 1560 return (sosetopt(so, &sopt)); 1561 } 1562 1563 int 1564 sosetopt(so, sopt) 1565 struct socket *so; 1566 struct sockopt *sopt; 1567 { 1568 int error, optval; 1569 struct linger l; 1570 struct timeval tv; 1571 u_long val; 1572 #ifdef MAC 1573 struct mac extmac; 1574 #endif 1575 1576 error = 0; 1577 if (sopt->sopt_level != SOL_SOCKET) { 1578 if (so->so_proto && so->so_proto->pr_ctloutput) 1579 return ((*so->so_proto->pr_ctloutput) 1580 (so, sopt)); 1581 error = ENOPROTOOPT; 1582 } else { 1583 switch (sopt->sopt_name) { 1584 #ifdef INET 1585 case SO_ACCEPTFILTER: 1586 error = do_setopt_accept_filter(so, sopt); 1587 if (error) 1588 goto bad; 1589 break; 1590 #endif 1591 case SO_LINGER: 1592 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 1593 if (error) 1594 goto bad; 1595 1596 SOCK_LOCK(so); 1597 so->so_linger = l.l_linger; 1598 if (l.l_onoff) 1599 so->so_options |= SO_LINGER; 1600 else 1601 so->so_options &= ~SO_LINGER; 1602 SOCK_UNLOCK(so); 1603 break; 1604 1605 case SO_DEBUG: 1606 case SO_KEEPALIVE: 1607 case SO_DONTROUTE: 1608 case SO_USELOOPBACK: 1609 case SO_BROADCAST: 1610 case SO_REUSEADDR: 1611 case SO_REUSEPORT: 1612 case SO_OOBINLINE: 1613 case SO_TIMESTAMP: 1614 case SO_BINTIME: 1615 case SO_NOSIGPIPE: 1616 error = sooptcopyin(sopt, &optval, sizeof optval, 1617 sizeof optval); 1618 if (error) 1619 goto bad; 1620 SOCK_LOCK(so); 1621 if (optval) 1622 so->so_options |= sopt->sopt_name; 1623 else 1624 so->so_options &= ~sopt->sopt_name; 1625 SOCK_UNLOCK(so); 1626 break; 1627 1628 case SO_SNDBUF: 1629 case SO_RCVBUF: 1630 case SO_SNDLOWAT: 1631 case SO_RCVLOWAT: 1632 error = sooptcopyin(sopt, &optval, sizeof optval, 1633 sizeof optval); 1634 if (error) 1635 goto bad; 1636 1637 /* 1638 * Values < 1 make no sense for any of these 1639 * options, so disallow them. 1640 */ 1641 if (optval < 1) { 1642 error = EINVAL; 1643 goto bad; 1644 } 1645 1646 switch (sopt->sopt_name) { 1647 case SO_SNDBUF: 1648 case SO_RCVBUF: 1649 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 1650 &so->so_snd : &so->so_rcv, (u_long)optval, 1651 so, curthread) == 0) { 1652 error = ENOBUFS; 1653 goto bad; 1654 } 1655 break; 1656 1657 /* 1658 * Make sure the low-water is never greater than 1659 * the high-water. 1660 */ 1661 case SO_SNDLOWAT: 1662 SOCKBUF_LOCK(&so->so_snd); 1663 so->so_snd.sb_lowat = 1664 (optval > so->so_snd.sb_hiwat) ? 1665 so->so_snd.sb_hiwat : optval; 1666 SOCKBUF_UNLOCK(&so->so_snd); 1667 break; 1668 case SO_RCVLOWAT: 1669 SOCKBUF_LOCK(&so->so_rcv); 1670 so->so_rcv.sb_lowat = 1671 (optval > so->so_rcv.sb_hiwat) ? 1672 so->so_rcv.sb_hiwat : optval; 1673 SOCKBUF_UNLOCK(&so->so_rcv); 1674 break; 1675 } 1676 break; 1677 1678 case SO_SNDTIMEO: 1679 case SO_RCVTIMEO: 1680 error = sooptcopyin(sopt, &tv, sizeof tv, 1681 sizeof tv); 1682 if (error) 1683 goto bad; 1684 1685 /* assert(hz > 0); */ 1686 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz || 1687 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 1688 error = EDOM; 1689 goto bad; 1690 } 1691 /* assert(tick > 0); */ 1692 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */ 1693 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 1694 if (val > SHRT_MAX) { 1695 error = EDOM; 1696 goto bad; 1697 } 1698 if (val == 0 && tv.tv_usec != 0) 1699 val = 1; 1700 1701 switch (sopt->sopt_name) { 1702 case SO_SNDTIMEO: 1703 so->so_snd.sb_timeo = val; 1704 break; 1705 case SO_RCVTIMEO: 1706 so->so_rcv.sb_timeo = val; 1707 break; 1708 } 1709 break; 1710 case SO_LABEL: 1711 #ifdef MAC 1712 error = sooptcopyin(sopt, &extmac, sizeof extmac, 1713 sizeof extmac); 1714 if (error) 1715 goto bad; 1716 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 1717 so, &extmac); 1718 #else 1719 error = EOPNOTSUPP; 1720 #endif 1721 break; 1722 default: 1723 error = ENOPROTOOPT; 1724 break; 1725 } 1726 if (error == 0 && so->so_proto != NULL && 1727 so->so_proto->pr_ctloutput != NULL) { 1728 (void) ((*so->so_proto->pr_ctloutput) 1729 (so, sopt)); 1730 } 1731 } 1732 bad: 1733 return (error); 1734 } 1735 1736 /* Helper routine for getsockopt */ 1737 int 1738 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 1739 { 1740 int error; 1741 size_t valsize; 1742 1743 error = 0; 1744 1745 /* 1746 * Documented get behavior is that we always return a value, 1747 * possibly truncated to fit in the user's buffer. 1748 * Traditional behavior is that we always tell the user 1749 * precisely how much we copied, rather than something useful 1750 * like the total amount we had available for her. 1751 * Note that this interface is not idempotent; the entire answer must 1752 * generated ahead of time. 1753 */ 1754 valsize = min(len, sopt->sopt_valsize); 1755 sopt->sopt_valsize = valsize; 1756 if (sopt->sopt_val != NULL) { 1757 if (sopt->sopt_td != NULL) 1758 error = copyout(buf, sopt->sopt_val, valsize); 1759 else 1760 bcopy(buf, sopt->sopt_val, valsize); 1761 } 1762 return error; 1763 } 1764 1765 int 1766 sogetopt(so, sopt) 1767 struct socket *so; 1768 struct sockopt *sopt; 1769 { 1770 int error, optval; 1771 struct linger l; 1772 struct timeval tv; 1773 #ifdef INET 1774 struct accept_filter_arg *afap; 1775 #endif 1776 #ifdef MAC 1777 struct mac extmac; 1778 #endif 1779 1780 error = 0; 1781 if (sopt->sopt_level != SOL_SOCKET) { 1782 if (so->so_proto && so->so_proto->pr_ctloutput) { 1783 return ((*so->so_proto->pr_ctloutput) 1784 (so, sopt)); 1785 } else 1786 return (ENOPROTOOPT); 1787 } else { 1788 switch (sopt->sopt_name) { 1789 #ifdef INET 1790 case SO_ACCEPTFILTER: 1791 if ((so->so_options & SO_ACCEPTCONN) == 0) 1792 return (EINVAL); 1793 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), 1794 M_TEMP, M_WAITOK | M_ZERO); 1795 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 1796 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 1797 if (so->so_accf->so_accept_filter_str != NULL) 1798 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 1799 } 1800 error = sooptcopyout(sopt, afap, sizeof(*afap)); 1801 FREE(afap, M_TEMP); 1802 break; 1803 #endif 1804 1805 case SO_LINGER: 1806 /* 1807 * XXXRW: We grab the lock here to get a consistent 1808 * snapshot of both fields. This may not really 1809 * be necessary. 1810 */ 1811 SOCK_LOCK(so); 1812 l.l_onoff = so->so_options & SO_LINGER; 1813 l.l_linger = so->so_linger; 1814 SOCK_UNLOCK(so); 1815 error = sooptcopyout(sopt, &l, sizeof l); 1816 break; 1817 1818 case SO_USELOOPBACK: 1819 case SO_DONTROUTE: 1820 case SO_DEBUG: 1821 case SO_KEEPALIVE: 1822 case SO_REUSEADDR: 1823 case SO_REUSEPORT: 1824 case SO_BROADCAST: 1825 case SO_OOBINLINE: 1826 case SO_TIMESTAMP: 1827 case SO_BINTIME: 1828 case SO_NOSIGPIPE: 1829 optval = so->so_options & sopt->sopt_name; 1830 integer: 1831 error = sooptcopyout(sopt, &optval, sizeof optval); 1832 break; 1833 1834 case SO_TYPE: 1835 optval = so->so_type; 1836 goto integer; 1837 1838 case SO_ERROR: 1839 optval = so->so_error; 1840 so->so_error = 0; 1841 goto integer; 1842 1843 case SO_SNDBUF: 1844 optval = so->so_snd.sb_hiwat; 1845 goto integer; 1846 1847 case SO_RCVBUF: 1848 optval = so->so_rcv.sb_hiwat; 1849 goto integer; 1850 1851 case SO_SNDLOWAT: 1852 optval = so->so_snd.sb_lowat; 1853 goto integer; 1854 1855 case SO_RCVLOWAT: 1856 optval = so->so_rcv.sb_lowat; 1857 goto integer; 1858 1859 case SO_SNDTIMEO: 1860 case SO_RCVTIMEO: 1861 optval = (sopt->sopt_name == SO_SNDTIMEO ? 1862 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 1863 1864 tv.tv_sec = optval / hz; 1865 tv.tv_usec = (optval % hz) * tick; 1866 error = sooptcopyout(sopt, &tv, sizeof tv); 1867 break; 1868 case SO_LABEL: 1869 #ifdef MAC 1870 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 1871 sizeof(extmac)); 1872 if (error) 1873 return (error); 1874 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 1875 so, &extmac); 1876 if (error) 1877 return (error); 1878 error = sooptcopyout(sopt, &extmac, sizeof extmac); 1879 #else 1880 error = EOPNOTSUPP; 1881 #endif 1882 break; 1883 case SO_PEERLABEL: 1884 #ifdef MAC 1885 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 1886 sizeof(extmac)); 1887 if (error) 1888 return (error); 1889 error = mac_getsockopt_peerlabel( 1890 sopt->sopt_td->td_ucred, so, &extmac); 1891 if (error) 1892 return (error); 1893 error = sooptcopyout(sopt, &extmac, sizeof extmac); 1894 #else 1895 error = EOPNOTSUPP; 1896 #endif 1897 break; 1898 default: 1899 error = ENOPROTOOPT; 1900 break; 1901 } 1902 return (error); 1903 } 1904 } 1905 1906 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 1907 int 1908 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 1909 { 1910 struct mbuf *m, *m_prev; 1911 int sopt_size = sopt->sopt_valsize; 1912 1913 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 1914 if (m == NULL) 1915 return ENOBUFS; 1916 if (sopt_size > MLEN) { 1917 MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT); 1918 if ((m->m_flags & M_EXT) == 0) { 1919 m_free(m); 1920 return ENOBUFS; 1921 } 1922 m->m_len = min(MCLBYTES, sopt_size); 1923 } else { 1924 m->m_len = min(MLEN, sopt_size); 1925 } 1926 sopt_size -= m->m_len; 1927 *mp = m; 1928 m_prev = m; 1929 1930 while (sopt_size) { 1931 MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA); 1932 if (m == NULL) { 1933 m_freem(*mp); 1934 return ENOBUFS; 1935 } 1936 if (sopt_size > MLEN) { 1937 MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT : 1938 M_DONTWAIT); 1939 if ((m->m_flags & M_EXT) == 0) { 1940 m_freem(m); 1941 m_freem(*mp); 1942 return ENOBUFS; 1943 } 1944 m->m_len = min(MCLBYTES, sopt_size); 1945 } else { 1946 m->m_len = min(MLEN, sopt_size); 1947 } 1948 sopt_size -= m->m_len; 1949 m_prev->m_next = m; 1950 m_prev = m; 1951 } 1952 return 0; 1953 } 1954 1955 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 1956 int 1957 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 1958 { 1959 struct mbuf *m0 = m; 1960 1961 if (sopt->sopt_val == NULL) 1962 return 0; 1963 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 1964 if (sopt->sopt_td != NULL) { 1965 int error; 1966 1967 error = copyin(sopt->sopt_val, mtod(m, char *), 1968 m->m_len); 1969 if (error != 0) { 1970 m_freem(m0); 1971 return(error); 1972 } 1973 } else 1974 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 1975 sopt->sopt_valsize -= m->m_len; 1976 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 1977 m = m->m_next; 1978 } 1979 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 1980 panic("ip6_sooptmcopyin"); 1981 return 0; 1982 } 1983 1984 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 1985 int 1986 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 1987 { 1988 struct mbuf *m0 = m; 1989 size_t valsize = 0; 1990 1991 if (sopt->sopt_val == NULL) 1992 return 0; 1993 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 1994 if (sopt->sopt_td != NULL) { 1995 int error; 1996 1997 error = copyout(mtod(m, char *), sopt->sopt_val, 1998 m->m_len); 1999 if (error != 0) { 2000 m_freem(m0); 2001 return(error); 2002 } 2003 } else 2004 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 2005 sopt->sopt_valsize -= m->m_len; 2006 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2007 valsize += m->m_len; 2008 m = m->m_next; 2009 } 2010 if (m != NULL) { 2011 /* enough soopt buffer should be given from user-land */ 2012 m_freem(m0); 2013 return(EINVAL); 2014 } 2015 sopt->sopt_valsize = valsize; 2016 return 0; 2017 } 2018 2019 void 2020 sohasoutofband(so) 2021 struct socket *so; 2022 { 2023 if (so->so_sigio != NULL) 2024 pgsigio(&so->so_sigio, SIGURG, 0); 2025 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 2026 } 2027 2028 int 2029 sopoll(struct socket *so, int events, struct ucred *active_cred, 2030 struct thread *td) 2031 { 2032 int revents = 0; 2033 2034 if (events & (POLLIN | POLLRDNORM)) 2035 if (soreadable(so)) 2036 revents |= events & (POLLIN | POLLRDNORM); 2037 2038 if (events & POLLINIGNEOF) 2039 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || 2040 !TAILQ_EMPTY(&so->so_comp) || so->so_error) 2041 revents |= POLLINIGNEOF; 2042 2043 if (events & (POLLOUT | POLLWRNORM)) 2044 if (sowriteable(so)) 2045 revents |= events & (POLLOUT | POLLWRNORM); 2046 2047 if (events & (POLLPRI | POLLRDBAND)) 2048 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 2049 revents |= events & (POLLPRI | POLLRDBAND); 2050 2051 if (revents == 0) { 2052 if (events & 2053 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | 2054 POLLRDBAND)) { 2055 SOCKBUF_LOCK(&so->so_rcv); 2056 selrecord(td, &so->so_rcv.sb_sel); 2057 so->so_rcv.sb_flags |= SB_SEL; 2058 SOCKBUF_UNLOCK(&so->so_rcv); 2059 } 2060 2061 if (events & (POLLOUT | POLLWRNORM)) { 2062 SOCKBUF_LOCK(&so->so_snd); 2063 selrecord(td, &so->so_snd.sb_sel); 2064 so->so_snd.sb_flags |= SB_SEL; 2065 SOCKBUF_UNLOCK(&so->so_snd); 2066 } 2067 } 2068 2069 return (revents); 2070 } 2071 2072 int 2073 soo_kqfilter(struct file *fp, struct knote *kn) 2074 { 2075 struct socket *so = kn->kn_fp->f_data; 2076 struct sockbuf *sb; 2077 2078 switch (kn->kn_filter) { 2079 case EVFILT_READ: 2080 if (so->so_options & SO_ACCEPTCONN) 2081 kn->kn_fop = &solisten_filtops; 2082 else 2083 kn->kn_fop = &soread_filtops; 2084 sb = &so->so_rcv; 2085 break; 2086 case EVFILT_WRITE: 2087 kn->kn_fop = &sowrite_filtops; 2088 sb = &so->so_snd; 2089 break; 2090 default: 2091 return (1); 2092 } 2093 2094 SOCKBUF_LOCK(sb); 2095 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext); 2096 sb->sb_flags |= SB_KNOTE; 2097 SOCKBUF_UNLOCK(sb); 2098 return (0); 2099 } 2100 2101 static void 2102 filt_sordetach(struct knote *kn) 2103 { 2104 struct socket *so = kn->kn_fp->f_data; 2105 2106 SOCKBUF_LOCK(&so->so_rcv); 2107 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext); 2108 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note)) 2109 so->so_rcv.sb_flags &= ~SB_KNOTE; 2110 SOCKBUF_UNLOCK(&so->so_rcv); 2111 } 2112 2113 /*ARGSUSED*/ 2114 static int 2115 filt_soread(struct knote *kn, long hint) 2116 { 2117 struct socket *so = kn->kn_fp->f_data; 2118 int need_lock, result; 2119 2120 /* 2121 * XXXRW: Conditional locking because filt_soread() can be called 2122 * either from KNOTE() in the socket context where the socket buffer 2123 * lock is already held, or from kqueue() itself. 2124 */ 2125 need_lock = !SOCKBUF_OWNED(&so->so_rcv); 2126 if (need_lock) 2127 SOCKBUF_LOCK(&so->so_rcv); 2128 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 2129 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2130 kn->kn_flags |= EV_EOF; 2131 kn->kn_fflags = so->so_error; 2132 result = 1; 2133 } else if (so->so_error) /* temporary udp error */ 2134 result = 1; 2135 else if (kn->kn_sfflags & NOTE_LOWAT) 2136 result = (kn->kn_data >= kn->kn_sdata); 2137 else 2138 result = (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 2139 if (need_lock) 2140 SOCKBUF_UNLOCK(&so->so_rcv); 2141 return (result); 2142 } 2143 2144 static void 2145 filt_sowdetach(struct knote *kn) 2146 { 2147 struct socket *so = kn->kn_fp->f_data; 2148 2149 SOCKBUF_LOCK(&so->so_snd); 2150 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext); 2151 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note)) 2152 so->so_snd.sb_flags &= ~SB_KNOTE; 2153 SOCKBUF_UNLOCK(&so->so_snd); 2154 } 2155 2156 /*ARGSUSED*/ 2157 static int 2158 filt_sowrite(struct knote *kn, long hint) 2159 { 2160 struct socket *so = kn->kn_fp->f_data; 2161 int need_lock, result; 2162 2163 /* 2164 * XXXRW: Conditional locking because filt_soread() can be called 2165 * either from KNOTE() in the socket context where the socket buffer 2166 * lock is already held, or from kqueue() itself. 2167 */ 2168 need_lock = !SOCKBUF_OWNED(&so->so_snd); 2169 if (need_lock) 2170 SOCKBUF_LOCK(&so->so_snd); 2171 kn->kn_data = sbspace(&so->so_snd); 2172 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2173 kn->kn_flags |= EV_EOF; 2174 kn->kn_fflags = so->so_error; 2175 result = 1; 2176 } else if (so->so_error) /* temporary udp error */ 2177 result = 1; 2178 else if (((so->so_state & SS_ISCONNECTED) == 0) && 2179 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2180 result = 0; 2181 else if (kn->kn_sfflags & NOTE_LOWAT) 2182 result = (kn->kn_data >= kn->kn_sdata); 2183 else 2184 result = (kn->kn_data >= so->so_snd.sb_lowat); 2185 if (need_lock) 2186 SOCKBUF_UNLOCK(&so->so_snd); 2187 return (result); 2188 } 2189 2190 /*ARGSUSED*/ 2191 static int 2192 filt_solisten(struct knote *kn, long hint) 2193 { 2194 struct socket *so = kn->kn_fp->f_data; 2195 2196 kn->kn_data = so->so_qlen; 2197 return (! TAILQ_EMPTY(&so->so_comp)); 2198 } 2199 2200 int 2201 socheckuid(struct socket *so, uid_t uid) 2202 { 2203 2204 if (so == NULL) 2205 return (EPERM); 2206 if (so->so_cred->cr_uid == uid) 2207 return (0); 2208 return (EPERM); 2209 } 2210