1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2004 The FreeBSD Foundation 5 * Copyright (c) 2004-2008 Robert N. M. Watson 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 /* 36 * Comments on the socket life cycle: 37 * 38 * soalloc() sets of socket layer state for a socket, called only by 39 * socreate() and sonewconn(). Socket layer private. 40 * 41 * sodealloc() tears down socket layer state for a socket, called only by 42 * sofree() and sonewconn(). Socket layer private. 43 * 44 * pru_attach() associates protocol layer state with an allocated socket; 45 * called only once, may fail, aborting socket allocation. This is called 46 * from socreate() and sonewconn(). Socket layer private. 47 * 48 * pru_detach() disassociates protocol layer state from an attached socket, 49 * and will be called exactly once for sockets in which pru_attach() has 50 * been successfully called. If pru_attach() returned an error, 51 * pru_detach() will not be called. Socket layer private. 52 * 53 * pru_abort() and pru_close() notify the protocol layer that the last 54 * consumer of a socket is starting to tear down the socket, and that the 55 * protocol should terminate the connection. Historically, pru_abort() also 56 * detached protocol state from the socket state, but this is no longer the 57 * case. 58 * 59 * socreate() creates a socket and attaches protocol state. This is a public 60 * interface that may be used by socket layer consumers to create new 61 * sockets. 62 * 63 * sonewconn() creates a socket and attaches protocol state. This is a 64 * public interface that may be used by protocols to create new sockets when 65 * a new connection is received and will be available for accept() on a 66 * listen socket. 67 * 68 * soclose() destroys a socket after possibly waiting for it to disconnect. 69 * This is a public interface that socket consumers should use to close and 70 * release a socket when done with it. 71 * 72 * soabort() destroys a socket without waiting for it to disconnect (used 73 * only for incoming connections that are already partially or fully 74 * connected). This is used internally by the socket layer when clearing 75 * listen socket queues (due to overflow or close on the listen socket), but 76 * is also a public interface protocols may use to abort connections in 77 * their incomplete listen queues should they no longer be required. Sockets 78 * placed in completed connection listen queues should not be aborted for 79 * reasons described in the comment above the soclose() implementation. This 80 * is not a general purpose close routine, and except in the specific 81 * circumstances described here, should not be used. 82 * 83 * sofree() will free a socket and its protocol state if all references on 84 * the socket have been released, and is the public interface to attempt to 85 * free a socket when a reference is removed. This is a socket layer private 86 * interface. 87 * 88 * NOTE: In addition to socreate() and soclose(), which provide a single 89 * socket reference to the consumer to be managed as required, there are two 90 * calls to explicitly manage socket references, soref(), and sorele(). 91 * Currently, these are generally required only when transitioning a socket 92 * from a listen queue to a file descriptor, in order to prevent garbage 93 * collection of the socket at an untimely moment. For a number of reasons, 94 * these interfaces are not preferred, and should be avoided. 95 */ 96 97 #include <sys/cdefs.h> 98 __FBSDID("$FreeBSD$"); 99 100 #include "opt_inet.h" 101 #include "opt_mac.h" 102 #include "opt_zero.h" 103 #include "opt_compat.h" 104 105 #include <sys/param.h> 106 #include <sys/systm.h> 107 #include <sys/fcntl.h> 108 #include <sys/limits.h> 109 #include <sys/lock.h> 110 #include <sys/mac.h> 111 #include <sys/malloc.h> 112 #include <sys/mbuf.h> 113 #include <sys/mutex.h> 114 #include <sys/domain.h> 115 #include <sys/file.h> /* for struct knote */ 116 #include <sys/kernel.h> 117 #include <sys/event.h> 118 #include <sys/eventhandler.h> 119 #include <sys/poll.h> 120 #include <sys/proc.h> 121 #include <sys/protosw.h> 122 #include <sys/socket.h> 123 #include <sys/socketvar.h> 124 #include <sys/resourcevar.h> 125 #include <net/route.h> 126 #include <sys/signalvar.h> 127 #include <sys/stat.h> 128 #include <sys/sx.h> 129 #include <sys/sysctl.h> 130 #include <sys/uio.h> 131 #include <sys/jail.h> 132 133 #include <security/mac/mac_framework.h> 134 135 #include <vm/uma.h> 136 137 #ifdef COMPAT_IA32 138 #include <sys/mount.h> 139 #include <compat/freebsd32/freebsd32.h> 140 141 extern struct sysentvec ia32_freebsd_sysvec; 142 #endif 143 144 static int soreceive_rcvoob(struct socket *so, struct uio *uio, 145 int flags); 146 147 static void filt_sordetach(struct knote *kn); 148 static int filt_soread(struct knote *kn, long hint); 149 static void filt_sowdetach(struct knote *kn); 150 static int filt_sowrite(struct knote *kn, long hint); 151 static int filt_solisten(struct knote *kn, long hint); 152 153 static struct filterops solisten_filtops = 154 { 1, NULL, filt_sordetach, filt_solisten }; 155 static struct filterops soread_filtops = 156 { 1, NULL, filt_sordetach, filt_soread }; 157 static struct filterops sowrite_filtops = 158 { 1, NULL, filt_sowdetach, filt_sowrite }; 159 160 uma_zone_t socket_zone; 161 so_gen_t so_gencnt; /* generation count for sockets */ 162 163 int maxsockets; 164 165 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 166 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 167 168 static int somaxconn = SOMAXCONN; 169 static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS); 170 /* XXX: we dont have SYSCTL_USHORT */ 171 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW, 172 0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection " 173 "queue size"); 174 static int numopensockets; 175 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD, 176 &numopensockets, 0, "Number of open sockets"); 177 #ifdef ZERO_COPY_SOCKETS 178 /* These aren't static because they're used in other files. */ 179 int so_zero_copy_send = 1; 180 int so_zero_copy_receive = 1; 181 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0, 182 "Zero copy controls"); 183 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW, 184 &so_zero_copy_receive, 0, "Enable zero copy receive"); 185 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW, 186 &so_zero_copy_send, 0, "Enable zero copy send"); 187 #endif /* ZERO_COPY_SOCKETS */ 188 189 /* 190 * accept_mtx locks down per-socket fields relating to accept queues. See 191 * socketvar.h for an annotation of the protected fields of struct socket. 192 */ 193 struct mtx accept_mtx; 194 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF); 195 196 /* 197 * so_global_mtx protects so_gencnt, numopensockets, and the per-socket 198 * so_gencnt field. 199 */ 200 static struct mtx so_global_mtx; 201 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF); 202 203 /* 204 * General IPC sysctl name space, used by sockets and a variety of other IPC 205 * types. 206 */ 207 SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); 208 209 /* 210 * Sysctl to get and set the maximum global sockets limit. Notify protocols 211 * of the change so that they can update their dependent limits as required. 212 */ 213 static int 214 sysctl_maxsockets(SYSCTL_HANDLER_ARGS) 215 { 216 int error, newmaxsockets; 217 218 newmaxsockets = maxsockets; 219 error = sysctl_handle_int(oidp, &newmaxsockets, 0, req); 220 if (error == 0 && req->newptr) { 221 if (newmaxsockets > maxsockets) { 222 maxsockets = newmaxsockets; 223 if (maxsockets > ((maxfiles / 4) * 3)) { 224 maxfiles = (maxsockets * 5) / 4; 225 maxfilesperproc = (maxfiles * 9) / 10; 226 } 227 EVENTHANDLER_INVOKE(maxsockets_change); 228 } else 229 error = EINVAL; 230 } 231 return (error); 232 } 233 234 SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW, 235 &maxsockets, 0, sysctl_maxsockets, "IU", 236 "Maximum number of sockets avaliable"); 237 238 /* 239 * Initialise maxsockets. 240 */ 241 static void init_maxsockets(void *ignored) 242 { 243 TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets); 244 maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters)); 245 } 246 SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL); 247 248 /* 249 * Socket operation routines. These routines are called by the routines in 250 * sys_socket.c or from a system process, and implement the semantics of 251 * socket operations by switching out to the protocol specific routines. 252 */ 253 254 /* 255 * Get a socket structure from our zone, and initialize it. Note that it 256 * would probably be better to allocate socket and PCB at the same time, but 257 * I'm not convinced that all the protocols can be easily modified to do 258 * this. 259 * 260 * soalloc() returns a socket with a ref count of 0. 261 */ 262 static struct socket * 263 soalloc(void) 264 { 265 struct socket *so; 266 267 so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO); 268 if (so == NULL) 269 return (NULL); 270 #ifdef MAC 271 if (mac_socket_init(so, M_NOWAIT) != 0) { 272 uma_zfree(socket_zone, so); 273 return (NULL); 274 } 275 #endif 276 SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd"); 277 SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv"); 278 sx_init(&so->so_snd.sb_sx, "so_snd_sx"); 279 sx_init(&so->so_rcv.sb_sx, "so_rcv_sx"); 280 TAILQ_INIT(&so->so_aiojobq); 281 mtx_lock(&so_global_mtx); 282 so->so_gencnt = ++so_gencnt; 283 ++numopensockets; 284 mtx_unlock(&so_global_mtx); 285 return (so); 286 } 287 288 /* 289 * Free the storage associated with a socket at the socket layer, tear down 290 * locks, labels, etc. All protocol state is assumed already to have been 291 * torn down (and possibly never set up) by the caller. 292 */ 293 static void 294 sodealloc(struct socket *so) 295 { 296 297 KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count)); 298 KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL")); 299 300 mtx_lock(&so_global_mtx); 301 so->so_gencnt = ++so_gencnt; 302 --numopensockets; /* Could be below, but faster here. */ 303 mtx_unlock(&so_global_mtx); 304 if (so->so_rcv.sb_hiwat) 305 (void)chgsbsize(so->so_cred->cr_uidinfo, 306 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY); 307 if (so->so_snd.sb_hiwat) 308 (void)chgsbsize(so->so_cred->cr_uidinfo, 309 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY); 310 #ifdef INET 311 /* remove acccept filter if one is present. */ 312 if (so->so_accf != NULL) 313 do_setopt_accept_filter(so, NULL); 314 #endif 315 #ifdef MAC 316 mac_socket_destroy(so); 317 #endif 318 crfree(so->so_cred); 319 sx_destroy(&so->so_snd.sb_sx); 320 sx_destroy(&so->so_rcv.sb_sx); 321 SOCKBUF_LOCK_DESTROY(&so->so_snd); 322 SOCKBUF_LOCK_DESTROY(&so->so_rcv); 323 uma_zfree(socket_zone, so); 324 } 325 326 /* 327 * socreate returns a socket with a ref count of 1. The socket should be 328 * closed with soclose(). 329 */ 330 int 331 socreate(int dom, struct socket **aso, int type, int proto, 332 struct ucred *cred, struct thread *td) 333 { 334 struct protosw *prp; 335 struct socket *so; 336 int error; 337 338 if (proto) 339 prp = pffindproto(dom, proto, type); 340 else 341 prp = pffindtype(dom, type); 342 343 if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL || 344 prp->pr_usrreqs->pru_attach == pru_attach_notsupp) 345 return (EPROTONOSUPPORT); 346 347 if (jailed(cred) && jail_socket_unixiproute_only && 348 prp->pr_domain->dom_family != PF_LOCAL && 349 prp->pr_domain->dom_family != PF_INET && 350 prp->pr_domain->dom_family != PF_ROUTE) { 351 return (EPROTONOSUPPORT); 352 } 353 354 if (prp->pr_type != type) 355 return (EPROTOTYPE); 356 so = soalloc(); 357 if (so == NULL) 358 return (ENOBUFS); 359 360 TAILQ_INIT(&so->so_incomp); 361 TAILQ_INIT(&so->so_comp); 362 so->so_type = type; 363 so->so_cred = crhold(cred); 364 if ((prp->pr_domain->dom_family == PF_INET) || 365 (prp->pr_domain->dom_family == PF_ROUTE)) 366 so->so_fibnum = td->td_proc->p_fibnum; 367 else 368 so->so_fibnum = 0; 369 so->so_proto = prp; 370 #ifdef MAC 371 mac_socket_create(cred, so); 372 #endif 373 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), 374 NULL, NULL, NULL); 375 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), 376 NULL, NULL, NULL); 377 so->so_count = 1; 378 /* 379 * Auto-sizing of socket buffers is managed by the protocols and 380 * the appropriate flags must be set in the pru_attach function. 381 */ 382 error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); 383 if (error) { 384 KASSERT(so->so_count == 1, ("socreate: so_count %d", 385 so->so_count)); 386 so->so_count = 0; 387 sodealloc(so); 388 return (error); 389 } 390 *aso = so; 391 return (0); 392 } 393 394 #ifdef REGRESSION 395 static int regression_sonewconn_earlytest = 1; 396 SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW, 397 ®ression_sonewconn_earlytest, 0, "Perform early sonewconn limit test"); 398 #endif 399 400 /* 401 * When an attempt at a new connection is noted on a socket which accepts 402 * connections, sonewconn is called. If the connection is possible (subject 403 * to space constraints, etc.) then we allocate a new structure, propoerly 404 * linked into the data structure of the original socket, and return this. 405 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 406 * 407 * Note: the ref count on the socket is 0 on return. 408 */ 409 struct socket * 410 sonewconn(struct socket *head, int connstatus) 411 { 412 struct socket *so; 413 int over; 414 415 ACCEPT_LOCK(); 416 over = (head->so_qlen > 3 * head->so_qlimit / 2); 417 ACCEPT_UNLOCK(); 418 #ifdef REGRESSION 419 if (regression_sonewconn_earlytest && over) 420 #else 421 if (over) 422 #endif 423 return (NULL); 424 so = soalloc(); 425 if (so == NULL) 426 return (NULL); 427 if ((head->so_options & SO_ACCEPTFILTER) != 0) 428 connstatus = 0; 429 so->so_head = head; 430 so->so_type = head->so_type; 431 so->so_options = head->so_options &~ SO_ACCEPTCONN; 432 so->so_linger = head->so_linger; 433 so->so_state = head->so_state | SS_NOFDREF; 434 so->so_proto = head->so_proto; 435 so->so_cred = crhold(head->so_cred); 436 #ifdef MAC 437 SOCK_LOCK(head); 438 mac_socket_newconn(head, so); 439 SOCK_UNLOCK(head); 440 #endif 441 knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), 442 NULL, NULL, NULL); 443 knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), 444 NULL, NULL, NULL); 445 if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) || 446 (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { 447 sodealloc(so); 448 return (NULL); 449 } 450 so->so_rcv.sb_lowat = head->so_rcv.sb_lowat; 451 so->so_snd.sb_lowat = head->so_snd.sb_lowat; 452 so->so_rcv.sb_timeo = head->so_rcv.sb_timeo; 453 so->so_snd.sb_timeo = head->so_snd.sb_timeo; 454 so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE; 455 so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE; 456 so->so_state |= connstatus; 457 ACCEPT_LOCK(); 458 if (connstatus) { 459 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 460 so->so_qstate |= SQ_COMP; 461 head->so_qlen++; 462 } else { 463 /* 464 * Keep removing sockets from the head until there's room for 465 * us to insert on the tail. In pre-locking revisions, this 466 * was a simple if(), but as we could be racing with other 467 * threads and soabort() requires dropping locks, we must 468 * loop waiting for the condition to be true. 469 */ 470 while (head->so_incqlen > head->so_qlimit) { 471 struct socket *sp; 472 sp = TAILQ_FIRST(&head->so_incomp); 473 TAILQ_REMOVE(&head->so_incomp, sp, so_list); 474 head->so_incqlen--; 475 sp->so_qstate &= ~SQ_INCOMP; 476 sp->so_head = NULL; 477 ACCEPT_UNLOCK(); 478 soabort(sp); 479 ACCEPT_LOCK(); 480 } 481 TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); 482 so->so_qstate |= SQ_INCOMP; 483 head->so_incqlen++; 484 } 485 ACCEPT_UNLOCK(); 486 if (connstatus) { 487 sorwakeup(head); 488 wakeup_one(&head->so_timeo); 489 } 490 return (so); 491 } 492 493 int 494 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 495 { 496 497 return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td)); 498 } 499 500 /* 501 * solisten() transitions a socket from a non-listening state to a listening 502 * state, but can also be used to update the listen queue depth on an 503 * existing listen socket. The protocol will call back into the sockets 504 * layer using solisten_proto_check() and solisten_proto() to check and set 505 * socket-layer listen state. Call backs are used so that the protocol can 506 * acquire both protocol and socket layer locks in whatever order is required 507 * by the protocol. 508 * 509 * Protocol implementors are advised to hold the socket lock across the 510 * socket-layer test and set to avoid races at the socket layer. 511 */ 512 int 513 solisten(struct socket *so, int backlog, struct thread *td) 514 { 515 516 return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td)); 517 } 518 519 int 520 solisten_proto_check(struct socket *so) 521 { 522 523 SOCK_LOCK_ASSERT(so); 524 525 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | 526 SS_ISDISCONNECTING)) 527 return (EINVAL); 528 return (0); 529 } 530 531 void 532 solisten_proto(struct socket *so, int backlog) 533 { 534 535 SOCK_LOCK_ASSERT(so); 536 537 if (backlog < 0 || backlog > somaxconn) 538 backlog = somaxconn; 539 so->so_qlimit = backlog; 540 so->so_options |= SO_ACCEPTCONN; 541 } 542 543 /* 544 * Attempt to free a socket. This should really be sotryfree(). 545 * 546 * sofree() will succeed if: 547 * 548 * - There are no outstanding file descriptor references or related consumers 549 * (so_count == 0). 550 * 551 * - The socket has been closed by user space, if ever open (SS_NOFDREF). 552 * 553 * - The protocol does not have an outstanding strong reference on the socket 554 * (SS_PROTOREF). 555 * 556 * - The socket is not in a completed connection queue, so a process has been 557 * notified that it is present. If it is removed, the user process may 558 * block in accept() despite select() saying the socket was ready. 559 * 560 * Otherwise, it will quietly abort so that a future call to sofree(), when 561 * conditions are right, can succeed. 562 */ 563 void 564 sofree(struct socket *so) 565 { 566 struct protosw *pr = so->so_proto; 567 struct socket *head; 568 569 ACCEPT_LOCK_ASSERT(); 570 SOCK_LOCK_ASSERT(so); 571 572 if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 || 573 (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) { 574 SOCK_UNLOCK(so); 575 ACCEPT_UNLOCK(); 576 return; 577 } 578 579 head = so->so_head; 580 if (head != NULL) { 581 KASSERT((so->so_qstate & SQ_COMP) != 0 || 582 (so->so_qstate & SQ_INCOMP) != 0, 583 ("sofree: so_head != NULL, but neither SQ_COMP nor " 584 "SQ_INCOMP")); 585 KASSERT((so->so_qstate & SQ_COMP) == 0 || 586 (so->so_qstate & SQ_INCOMP) == 0, 587 ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP")); 588 TAILQ_REMOVE(&head->so_incomp, so, so_list); 589 head->so_incqlen--; 590 so->so_qstate &= ~SQ_INCOMP; 591 so->so_head = NULL; 592 } 593 KASSERT((so->so_qstate & SQ_COMP) == 0 && 594 (so->so_qstate & SQ_INCOMP) == 0, 595 ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)", 596 so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP)); 597 if (so->so_options & SO_ACCEPTCONN) { 598 KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated")); 599 KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated")); 600 } 601 SOCK_UNLOCK(so); 602 ACCEPT_UNLOCK(); 603 604 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 605 (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb); 606 if (pr->pr_usrreqs->pru_detach != NULL) 607 (*pr->pr_usrreqs->pru_detach)(so); 608 609 /* 610 * From this point on, we assume that no other references to this 611 * socket exist anywhere else in the stack. Therefore, no locks need 612 * to be acquired or held. 613 * 614 * We used to do a lot of socket buffer and socket locking here, as 615 * well as invoke sorflush() and perform wakeups. The direct call to 616 * dom_dispose() and sbrelease_internal() are an inlining of what was 617 * necessary from sorflush(). 618 * 619 * Notice that the socket buffer and kqueue state are torn down 620 * before calling pru_detach. This means that protocols shold not 621 * assume they can perform socket wakeups, etc, in their detach code. 622 */ 623 sbdestroy(&so->so_snd, so); 624 sbdestroy(&so->so_rcv, so); 625 knlist_destroy(&so->so_rcv.sb_sel.si_note); 626 knlist_destroy(&so->so_snd.sb_sel.si_note); 627 sodealloc(so); 628 } 629 630 /* 631 * Close a socket on last file table reference removal. Initiate disconnect 632 * if connected. Free socket when disconnect complete. 633 * 634 * This function will sorele() the socket. Note that soclose() may be called 635 * prior to the ref count reaching zero. The actual socket structure will 636 * not be freed until the ref count reaches zero. 637 */ 638 int 639 soclose(struct socket *so) 640 { 641 int error = 0; 642 643 KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter")); 644 645 funsetown(&so->so_sigio); 646 if (so->so_state & SS_ISCONNECTED) { 647 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 648 error = sodisconnect(so); 649 if (error) 650 goto drop; 651 } 652 if (so->so_options & SO_LINGER) { 653 if ((so->so_state & SS_ISDISCONNECTING) && 654 (so->so_state & SS_NBIO)) 655 goto drop; 656 while (so->so_state & SS_ISCONNECTED) { 657 error = tsleep(&so->so_timeo, 658 PSOCK | PCATCH, "soclos", so->so_linger * hz); 659 if (error) 660 break; 661 } 662 } 663 } 664 665 drop: 666 if (so->so_proto->pr_usrreqs->pru_close != NULL) 667 (*so->so_proto->pr_usrreqs->pru_close)(so); 668 if (so->so_options & SO_ACCEPTCONN) { 669 struct socket *sp; 670 ACCEPT_LOCK(); 671 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 672 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 673 so->so_incqlen--; 674 sp->so_qstate &= ~SQ_INCOMP; 675 sp->so_head = NULL; 676 ACCEPT_UNLOCK(); 677 soabort(sp); 678 ACCEPT_LOCK(); 679 } 680 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 681 TAILQ_REMOVE(&so->so_comp, sp, so_list); 682 so->so_qlen--; 683 sp->so_qstate &= ~SQ_COMP; 684 sp->so_head = NULL; 685 ACCEPT_UNLOCK(); 686 soabort(sp); 687 ACCEPT_LOCK(); 688 } 689 ACCEPT_UNLOCK(); 690 } 691 ACCEPT_LOCK(); 692 SOCK_LOCK(so); 693 KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF")); 694 so->so_state |= SS_NOFDREF; 695 sorele(so); 696 return (error); 697 } 698 699 /* 700 * soabort() is used to abruptly tear down a connection, such as when a 701 * resource limit is reached (listen queue depth exceeded), or if a listen 702 * socket is closed while there are sockets waiting to be accepted. 703 * 704 * This interface is tricky, because it is called on an unreferenced socket, 705 * and must be called only by a thread that has actually removed the socket 706 * from the listen queue it was on, or races with other threads are risked. 707 * 708 * This interface will call into the protocol code, so must not be called 709 * with any socket locks held. Protocols do call it while holding their own 710 * recursible protocol mutexes, but this is something that should be subject 711 * to review in the future. 712 */ 713 void 714 soabort(struct socket *so) 715 { 716 717 /* 718 * In as much as is possible, assert that no references to this 719 * socket are held. This is not quite the same as asserting that the 720 * current thread is responsible for arranging for no references, but 721 * is as close as we can get for now. 722 */ 723 KASSERT(so->so_count == 0, ("soabort: so_count")); 724 KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF")); 725 KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF")); 726 KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP")); 727 KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP")); 728 729 if (so->so_proto->pr_usrreqs->pru_abort != NULL) 730 (*so->so_proto->pr_usrreqs->pru_abort)(so); 731 ACCEPT_LOCK(); 732 SOCK_LOCK(so); 733 sofree(so); 734 } 735 736 int 737 soaccept(struct socket *so, struct sockaddr **nam) 738 { 739 int error; 740 741 SOCK_LOCK(so); 742 KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF")); 743 so->so_state &= ~SS_NOFDREF; 744 SOCK_UNLOCK(so); 745 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); 746 return (error); 747 } 748 749 int 750 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 751 { 752 int error; 753 754 if (so->so_options & SO_ACCEPTCONN) 755 return (EOPNOTSUPP); 756 /* 757 * If protocol is connection-based, can only connect once. 758 * Otherwise, if connected, try to disconnect first. This allows 759 * user to disconnect by connecting to, e.g., a null address. 760 */ 761 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 762 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 763 (error = sodisconnect(so)))) { 764 error = EISCONN; 765 } else { 766 /* 767 * Prevent accumulated error from previous connection from 768 * biting us. 769 */ 770 so->so_error = 0; 771 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td); 772 } 773 774 return (error); 775 } 776 777 int 778 soconnect2(struct socket *so1, struct socket *so2) 779 { 780 781 return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2)); 782 } 783 784 int 785 sodisconnect(struct socket *so) 786 { 787 int error; 788 789 if ((so->so_state & SS_ISCONNECTED) == 0) 790 return (ENOTCONN); 791 if (so->so_state & SS_ISDISCONNECTING) 792 return (EALREADY); 793 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); 794 return (error); 795 } 796 797 #ifdef ZERO_COPY_SOCKETS 798 struct so_zerocopy_stats{ 799 int size_ok; 800 int align_ok; 801 int found_ifp; 802 }; 803 struct so_zerocopy_stats so_zerocp_stats = {0,0,0}; 804 #include <netinet/in.h> 805 #include <net/route.h> 806 #include <netinet/in_pcb.h> 807 #include <vm/vm.h> 808 #include <vm/vm_page.h> 809 #include <vm/vm_object.h> 810 811 /* 812 * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise 813 * sosend_dgram() and sosend_generic() use m_uiotombuf(). 814 * 815 * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or 816 * all of the data referenced by the uio. If desired, it uses zero-copy. 817 * *space will be updated to reflect data copied in. 818 * 819 * NB: If atomic I/O is requested, the caller must already have checked that 820 * space can hold resid bytes. 821 * 822 * NB: In the event of an error, the caller may need to free the partial 823 * chain pointed to by *mpp. The contents of both *uio and *space may be 824 * modified even in the case of an error. 825 */ 826 static int 827 sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space, 828 int flags) 829 { 830 struct mbuf *m, **mp, *top; 831 long len, resid; 832 int error; 833 #ifdef ZERO_COPY_SOCKETS 834 int cow_send; 835 #endif 836 837 *retmp = top = NULL; 838 mp = ⊤ 839 len = 0; 840 resid = uio->uio_resid; 841 error = 0; 842 do { 843 #ifdef ZERO_COPY_SOCKETS 844 cow_send = 0; 845 #endif /* ZERO_COPY_SOCKETS */ 846 if (resid >= MINCLSIZE) { 847 #ifdef ZERO_COPY_SOCKETS 848 if (top == NULL) { 849 m = m_gethdr(M_WAITOK, MT_DATA); 850 m->m_pkthdr.len = 0; 851 m->m_pkthdr.rcvif = NULL; 852 } else 853 m = m_get(M_WAITOK, MT_DATA); 854 if (so_zero_copy_send && 855 resid>=PAGE_SIZE && 856 *space>=PAGE_SIZE && 857 uio->uio_iov->iov_len>=PAGE_SIZE) { 858 so_zerocp_stats.size_ok++; 859 so_zerocp_stats.align_ok++; 860 cow_send = socow_setup(m, uio); 861 len = cow_send; 862 } 863 if (!cow_send) { 864 m_clget(m, M_WAITOK); 865 len = min(min(MCLBYTES, resid), *space); 866 } 867 #else /* ZERO_COPY_SOCKETS */ 868 if (top == NULL) { 869 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 870 m->m_pkthdr.len = 0; 871 m->m_pkthdr.rcvif = NULL; 872 } else 873 m = m_getcl(M_WAIT, MT_DATA, 0); 874 len = min(min(MCLBYTES, resid), *space); 875 #endif /* ZERO_COPY_SOCKETS */ 876 } else { 877 if (top == NULL) { 878 m = m_gethdr(M_WAIT, MT_DATA); 879 m->m_pkthdr.len = 0; 880 m->m_pkthdr.rcvif = NULL; 881 882 len = min(min(MHLEN, resid), *space); 883 /* 884 * For datagram protocols, leave room 885 * for protocol headers in first mbuf. 886 */ 887 if (atomic && m && len < MHLEN) 888 MH_ALIGN(m, len); 889 } else { 890 m = m_get(M_WAIT, MT_DATA); 891 len = min(min(MLEN, resid), *space); 892 } 893 } 894 if (m == NULL) { 895 error = ENOBUFS; 896 goto out; 897 } 898 899 *space -= len; 900 #ifdef ZERO_COPY_SOCKETS 901 if (cow_send) 902 error = 0; 903 else 904 #endif /* ZERO_COPY_SOCKETS */ 905 error = uiomove(mtod(m, void *), (int)len, uio); 906 resid = uio->uio_resid; 907 m->m_len = len; 908 *mp = m; 909 top->m_pkthdr.len += len; 910 if (error) 911 goto out; 912 mp = &m->m_next; 913 if (resid <= 0) { 914 if (flags & MSG_EOR) 915 top->m_flags |= M_EOR; 916 break; 917 } 918 } while (*space > 0 && atomic); 919 out: 920 *retmp = top; 921 return (error); 922 } 923 #endif /*ZERO_COPY_SOCKETS*/ 924 925 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 926 927 int 928 sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio, 929 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 930 { 931 long space, resid; 932 int clen = 0, error, dontroute; 933 #ifdef ZERO_COPY_SOCKETS 934 int atomic = sosendallatonce(so) || top; 935 #endif 936 937 KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM")); 938 KASSERT(so->so_proto->pr_flags & PR_ATOMIC, 939 ("sodgram_send: !PR_ATOMIC")); 940 941 if (uio != NULL) 942 resid = uio->uio_resid; 943 else 944 resid = top->m_pkthdr.len; 945 /* 946 * In theory resid should be unsigned. However, space must be 947 * signed, as it might be less than 0 if we over-committed, and we 948 * must use a signed comparison of space and resid. On the other 949 * hand, a negative resid causes us to loop sending 0-length 950 * segments to the protocol. 951 * 952 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 953 * type sockets since that's an error. 954 */ 955 if (resid < 0) { 956 error = EINVAL; 957 goto out; 958 } 959 960 dontroute = 961 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0; 962 if (td != NULL) 963 td->td_ru.ru_msgsnd++; 964 if (control != NULL) 965 clen = control->m_len; 966 967 SOCKBUF_LOCK(&so->so_snd); 968 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 969 SOCKBUF_UNLOCK(&so->so_snd); 970 error = EPIPE; 971 goto out; 972 } 973 if (so->so_error) { 974 error = so->so_error; 975 so->so_error = 0; 976 SOCKBUF_UNLOCK(&so->so_snd); 977 goto out; 978 } 979 if ((so->so_state & SS_ISCONNECTED) == 0) { 980 /* 981 * `sendto' and `sendmsg' is allowed on a connection-based 982 * socket if it supports implied connect. Return ENOTCONN if 983 * not connected and no address is supplied. 984 */ 985 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 986 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 987 if ((so->so_state & SS_ISCONFIRMING) == 0 && 988 !(resid == 0 && clen != 0)) { 989 SOCKBUF_UNLOCK(&so->so_snd); 990 error = ENOTCONN; 991 goto out; 992 } 993 } else if (addr == NULL) { 994 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 995 error = ENOTCONN; 996 else 997 error = EDESTADDRREQ; 998 SOCKBUF_UNLOCK(&so->so_snd); 999 goto out; 1000 } 1001 } 1002 1003 /* 1004 * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a 1005 * problem and need fixing. 1006 */ 1007 space = sbspace(&so->so_snd); 1008 if (flags & MSG_OOB) 1009 space += 1024; 1010 space -= clen; 1011 SOCKBUF_UNLOCK(&so->so_snd); 1012 if (resid > space) { 1013 error = EMSGSIZE; 1014 goto out; 1015 } 1016 if (uio == NULL) { 1017 resid = 0; 1018 if (flags & MSG_EOR) 1019 top->m_flags |= M_EOR; 1020 } else { 1021 #ifdef ZERO_COPY_SOCKETS 1022 error = sosend_copyin(uio, &top, atomic, &space, flags); 1023 if (error) 1024 goto out; 1025 #else 1026 /* 1027 * Copy the data from userland into a mbuf chain. 1028 * If no data is to be copied in, a single empty mbuf 1029 * is returned. 1030 */ 1031 top = m_uiotombuf(uio, M_WAITOK, space, max_hdr, 1032 (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0))); 1033 if (top == NULL) { 1034 error = EFAULT; /* only possible error */ 1035 goto out; 1036 } 1037 space -= resid - uio->uio_resid; 1038 #endif 1039 resid = uio->uio_resid; 1040 } 1041 KASSERT(resid == 0, ("sosend_dgram: resid != 0")); 1042 /* 1043 * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock 1044 * than with. 1045 */ 1046 if (dontroute) { 1047 SOCK_LOCK(so); 1048 so->so_options |= SO_DONTROUTE; 1049 SOCK_UNLOCK(so); 1050 } 1051 /* 1052 * XXX all the SBS_CANTSENDMORE checks previously done could be out 1053 * of date. We could have recieved a reset packet in an interrupt or 1054 * maybe we slept while doing page faults in uiomove() etc. We could 1055 * probably recheck again inside the locking protection here, but 1056 * there are probably other places that this also happens. We must 1057 * rethink this. 1058 */ 1059 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1060 (flags & MSG_OOB) ? PRUS_OOB : 1061 /* 1062 * If the user set MSG_EOF, the protocol understands this flag and 1063 * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND. 1064 */ 1065 ((flags & MSG_EOF) && 1066 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1067 (resid <= 0)) ? 1068 PRUS_EOF : 1069 /* If there is more to send set PRUS_MORETOCOME */ 1070 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1071 top, addr, control, td); 1072 if (dontroute) { 1073 SOCK_LOCK(so); 1074 so->so_options &= ~SO_DONTROUTE; 1075 SOCK_UNLOCK(so); 1076 } 1077 clen = 0; 1078 control = NULL; 1079 top = NULL; 1080 out: 1081 if (top != NULL) 1082 m_freem(top); 1083 if (control != NULL) 1084 m_freem(control); 1085 return (error); 1086 } 1087 1088 /* 1089 * Send on a socket. If send must go all at once and message is larger than 1090 * send buffering, then hard error. Lock against other senders. If must go 1091 * all at once and not enough room now, then inform user that this would 1092 * block and do nothing. Otherwise, if nonblocking, send as much as 1093 * possible. The data to be sent is described by "uio" if nonzero, otherwise 1094 * by the mbuf chain "top" (which must be null if uio is not). Data provided 1095 * in mbuf chain must be small enough to send all at once. 1096 * 1097 * Returns nonzero on error, timeout or signal; callers must check for short 1098 * counts if EINTR/ERESTART are returned. Data and control buffers are freed 1099 * on return. 1100 */ 1101 int 1102 sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio, 1103 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1104 { 1105 long space, resid; 1106 int clen = 0, error, dontroute; 1107 int atomic = sosendallatonce(so) || top; 1108 1109 if (uio != NULL) 1110 resid = uio->uio_resid; 1111 else 1112 resid = top->m_pkthdr.len; 1113 /* 1114 * In theory resid should be unsigned. However, space must be 1115 * signed, as it might be less than 0 if we over-committed, and we 1116 * must use a signed comparison of space and resid. On the other 1117 * hand, a negative resid causes us to loop sending 0-length 1118 * segments to the protocol. 1119 * 1120 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 1121 * type sockets since that's an error. 1122 */ 1123 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { 1124 error = EINVAL; 1125 goto out; 1126 } 1127 1128 dontroute = 1129 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 1130 (so->so_proto->pr_flags & PR_ATOMIC); 1131 if (td != NULL) 1132 td->td_ru.ru_msgsnd++; 1133 if (control != NULL) 1134 clen = control->m_len; 1135 1136 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1137 if (error) 1138 goto out; 1139 1140 restart: 1141 do { 1142 SOCKBUF_LOCK(&so->so_snd); 1143 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 1144 SOCKBUF_UNLOCK(&so->so_snd); 1145 error = EPIPE; 1146 goto release; 1147 } 1148 if (so->so_error) { 1149 error = so->so_error; 1150 so->so_error = 0; 1151 SOCKBUF_UNLOCK(&so->so_snd); 1152 goto release; 1153 } 1154 if ((so->so_state & SS_ISCONNECTED) == 0) { 1155 /* 1156 * `sendto' and `sendmsg' is allowed on a connection- 1157 * based socket if it supports implied connect. 1158 * Return ENOTCONN if not connected and no address is 1159 * supplied. 1160 */ 1161 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 1162 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 1163 if ((so->so_state & SS_ISCONFIRMING) == 0 && 1164 !(resid == 0 && clen != 0)) { 1165 SOCKBUF_UNLOCK(&so->so_snd); 1166 error = ENOTCONN; 1167 goto release; 1168 } 1169 } else if (addr == NULL) { 1170 SOCKBUF_UNLOCK(&so->so_snd); 1171 if (so->so_proto->pr_flags & PR_CONNREQUIRED) 1172 error = ENOTCONN; 1173 else 1174 error = EDESTADDRREQ; 1175 goto release; 1176 } 1177 } 1178 space = sbspace(&so->so_snd); 1179 if (flags & MSG_OOB) 1180 space += 1024; 1181 if ((atomic && resid > so->so_snd.sb_hiwat) || 1182 clen > so->so_snd.sb_hiwat) { 1183 SOCKBUF_UNLOCK(&so->so_snd); 1184 error = EMSGSIZE; 1185 goto release; 1186 } 1187 if (space < resid + clen && 1188 (atomic || space < so->so_snd.sb_lowat || space < clen)) { 1189 if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) { 1190 SOCKBUF_UNLOCK(&so->so_snd); 1191 error = EWOULDBLOCK; 1192 goto release; 1193 } 1194 error = sbwait(&so->so_snd); 1195 SOCKBUF_UNLOCK(&so->so_snd); 1196 if (error) 1197 goto release; 1198 goto restart; 1199 } 1200 SOCKBUF_UNLOCK(&so->so_snd); 1201 space -= clen; 1202 do { 1203 if (uio == NULL) { 1204 resid = 0; 1205 if (flags & MSG_EOR) 1206 top->m_flags |= M_EOR; 1207 } else { 1208 #ifdef ZERO_COPY_SOCKETS 1209 error = sosend_copyin(uio, &top, atomic, 1210 &space, flags); 1211 if (error != 0) 1212 goto release; 1213 #else 1214 /* 1215 * Copy the data from userland into a mbuf 1216 * chain. If no data is to be copied in, 1217 * a single empty mbuf is returned. 1218 */ 1219 top = m_uiotombuf(uio, M_WAITOK, space, 1220 (atomic ? max_hdr : 0), 1221 (atomic ? M_PKTHDR : 0) | 1222 ((flags & MSG_EOR) ? M_EOR : 0)); 1223 if (top == NULL) { 1224 error = EFAULT; /* only possible error */ 1225 goto release; 1226 } 1227 space -= resid - uio->uio_resid; 1228 #endif 1229 resid = uio->uio_resid; 1230 } 1231 if (dontroute) { 1232 SOCK_LOCK(so); 1233 so->so_options |= SO_DONTROUTE; 1234 SOCK_UNLOCK(so); 1235 } 1236 /* 1237 * XXX all the SBS_CANTSENDMORE checks previously 1238 * done could be out of date. We could have recieved 1239 * a reset packet in an interrupt or maybe we slept 1240 * while doing page faults in uiomove() etc. We 1241 * could probably recheck again inside the locking 1242 * protection here, but there are probably other 1243 * places that this also happens. We must rethink 1244 * this. 1245 */ 1246 error = (*so->so_proto->pr_usrreqs->pru_send)(so, 1247 (flags & MSG_OOB) ? PRUS_OOB : 1248 /* 1249 * If the user set MSG_EOF, the protocol understands 1250 * this flag and nothing left to send then use 1251 * PRU_SEND_EOF instead of PRU_SEND. 1252 */ 1253 ((flags & MSG_EOF) && 1254 (so->so_proto->pr_flags & PR_IMPLOPCL) && 1255 (resid <= 0)) ? 1256 PRUS_EOF : 1257 /* If there is more to send set PRUS_MORETOCOME. */ 1258 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, 1259 top, addr, control, td); 1260 if (dontroute) { 1261 SOCK_LOCK(so); 1262 so->so_options &= ~SO_DONTROUTE; 1263 SOCK_UNLOCK(so); 1264 } 1265 clen = 0; 1266 control = NULL; 1267 top = NULL; 1268 if (error) 1269 goto release; 1270 } while (resid && space > 0); 1271 } while (resid); 1272 1273 release: 1274 sbunlock(&so->so_snd); 1275 out: 1276 if (top != NULL) 1277 m_freem(top); 1278 if (control != NULL) 1279 m_freem(control); 1280 return (error); 1281 } 1282 1283 int 1284 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 1285 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 1286 { 1287 1288 return (so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top, 1289 control, flags, td)); 1290 } 1291 1292 /* 1293 * The part of soreceive() that implements reading non-inline out-of-band 1294 * data from a socket. For more complete comments, see soreceive(), from 1295 * which this code originated. 1296 * 1297 * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is 1298 * unable to return an mbuf chain to the caller. 1299 */ 1300 static int 1301 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) 1302 { 1303 struct protosw *pr = so->so_proto; 1304 struct mbuf *m; 1305 int error; 1306 1307 KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); 1308 1309 m = m_get(M_WAIT, MT_DATA); 1310 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); 1311 if (error) 1312 goto bad; 1313 do { 1314 #ifdef ZERO_COPY_SOCKETS 1315 if (so_zero_copy_receive) { 1316 int disposable; 1317 1318 if ((m->m_flags & M_EXT) 1319 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1320 disposable = 1; 1321 else 1322 disposable = 0; 1323 1324 error = uiomoveco(mtod(m, void *), 1325 min(uio->uio_resid, m->m_len), 1326 uio, disposable); 1327 } else 1328 #endif /* ZERO_COPY_SOCKETS */ 1329 error = uiomove(mtod(m, void *), 1330 (int) min(uio->uio_resid, m->m_len), uio); 1331 m = m_free(m); 1332 } while (uio->uio_resid && error == 0 && m); 1333 bad: 1334 if (m != NULL) 1335 m_freem(m); 1336 return (error); 1337 } 1338 1339 /* 1340 * Following replacement or removal of the first mbuf on the first mbuf chain 1341 * of a socket buffer, push necessary state changes back into the socket 1342 * buffer so that other consumers see the values consistently. 'nextrecord' 1343 * is the callers locally stored value of the original value of 1344 * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes. 1345 * NOTE: 'nextrecord' may be NULL. 1346 */ 1347 static __inline void 1348 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord) 1349 { 1350 1351 SOCKBUF_LOCK_ASSERT(sb); 1352 /* 1353 * First, update for the new value of nextrecord. If necessary, make 1354 * it the first record. 1355 */ 1356 if (sb->sb_mb != NULL) 1357 sb->sb_mb->m_nextpkt = nextrecord; 1358 else 1359 sb->sb_mb = nextrecord; 1360 1361 /* 1362 * Now update any dependent socket buffer fields to reflect the new 1363 * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the 1364 * addition of a second clause that takes care of the case where 1365 * sb_mb has been updated, but remains the last record. 1366 */ 1367 if (sb->sb_mb == NULL) { 1368 sb->sb_mbtail = NULL; 1369 sb->sb_lastrecord = NULL; 1370 } else if (sb->sb_mb->m_nextpkt == NULL) 1371 sb->sb_lastrecord = sb->sb_mb; 1372 } 1373 1374 1375 /* 1376 * Implement receive operations on a socket. We depend on the way that 1377 * records are added to the sockbuf by sbappend. In particular, each record 1378 * (mbufs linked through m_next) must begin with an address if the protocol 1379 * so specifies, followed by an optional mbuf or mbufs containing ancillary 1380 * data, and then zero or more mbufs of data. In order to allow parallelism 1381 * between network receive and copying to user space, as well as avoid 1382 * sleeping with a mutex held, we release the socket buffer mutex during the 1383 * user space copy. Although the sockbuf is locked, new data may still be 1384 * appended, and thus we must maintain consistency of the sockbuf during that 1385 * time. 1386 * 1387 * The caller may receive the data as a single mbuf chain by supplying an 1388 * mbuf **mp0 for use in returning the chain. The uio is then used only for 1389 * the count in uio_resid. 1390 */ 1391 int 1392 soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio, 1393 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1394 { 1395 struct mbuf *m, **mp; 1396 int flags, len, error, offset; 1397 struct protosw *pr = so->so_proto; 1398 struct mbuf *nextrecord; 1399 int moff, type = 0; 1400 int orig_resid = uio->uio_resid; 1401 1402 mp = mp0; 1403 if (psa != NULL) 1404 *psa = NULL; 1405 if (controlp != NULL) 1406 *controlp = NULL; 1407 if (flagsp != NULL) 1408 flags = *flagsp &~ MSG_EOR; 1409 else 1410 flags = 0; 1411 if (flags & MSG_OOB) 1412 return (soreceive_rcvoob(so, uio, flags)); 1413 if (mp != NULL) 1414 *mp = NULL; 1415 if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING) 1416 && uio->uio_resid) 1417 (*pr->pr_usrreqs->pru_rcvd)(so, 0); 1418 1419 error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); 1420 if (error) 1421 return (error); 1422 1423 restart: 1424 SOCKBUF_LOCK(&so->so_rcv); 1425 m = so->so_rcv.sb_mb; 1426 /* 1427 * If we have less data than requested, block awaiting more (subject 1428 * to any timeout) if: 1429 * 1. the current count is less than the low water mark, or 1430 * 2. MSG_WAITALL is set, and it is possible to do the entire 1431 * receive operation at once if we block (resid <= hiwat). 1432 * 3. MSG_DONTWAIT is not set 1433 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1434 * we have to do the receive in sections, and thus risk returning a 1435 * short count if a timeout or signal occurs after we start. 1436 */ 1437 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1438 so->so_rcv.sb_cc < uio->uio_resid) && 1439 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 1440 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 1441 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 1442 KASSERT(m != NULL || !so->so_rcv.sb_cc, 1443 ("receive: m == %p so->so_rcv.sb_cc == %u", 1444 m, so->so_rcv.sb_cc)); 1445 if (so->so_error) { 1446 if (m != NULL) 1447 goto dontblock; 1448 error = so->so_error; 1449 if ((flags & MSG_PEEK) == 0) 1450 so->so_error = 0; 1451 SOCKBUF_UNLOCK(&so->so_rcv); 1452 goto release; 1453 } 1454 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1455 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1456 if (m == NULL) { 1457 SOCKBUF_UNLOCK(&so->so_rcv); 1458 goto release; 1459 } else 1460 goto dontblock; 1461 } 1462 for (; m != NULL; m = m->m_next) 1463 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1464 m = so->so_rcv.sb_mb; 1465 goto dontblock; 1466 } 1467 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1468 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1469 SOCKBUF_UNLOCK(&so->so_rcv); 1470 error = ENOTCONN; 1471 goto release; 1472 } 1473 if (uio->uio_resid == 0) { 1474 SOCKBUF_UNLOCK(&so->so_rcv); 1475 goto release; 1476 } 1477 if ((so->so_state & SS_NBIO) || 1478 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1479 SOCKBUF_UNLOCK(&so->so_rcv); 1480 error = EWOULDBLOCK; 1481 goto release; 1482 } 1483 SBLASTRECORDCHK(&so->so_rcv); 1484 SBLASTMBUFCHK(&so->so_rcv); 1485 error = sbwait(&so->so_rcv); 1486 SOCKBUF_UNLOCK(&so->so_rcv); 1487 if (error) 1488 goto release; 1489 goto restart; 1490 } 1491 dontblock: 1492 /* 1493 * From this point onward, we maintain 'nextrecord' as a cache of the 1494 * pointer to the next record in the socket buffer. We must keep the 1495 * various socket buffer pointers and local stack versions of the 1496 * pointers in sync, pushing out modifications before dropping the 1497 * socket buffer mutex, and re-reading them when picking it up. 1498 * 1499 * Otherwise, we will race with the network stack appending new data 1500 * or records onto the socket buffer by using inconsistent/stale 1501 * versions of the field, possibly resulting in socket buffer 1502 * corruption. 1503 * 1504 * By holding the high-level sblock(), we prevent simultaneous 1505 * readers from pulling off the front of the socket buffer. 1506 */ 1507 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1508 if (uio->uio_td) 1509 uio->uio_td->td_ru.ru_msgrcv++; 1510 KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb")); 1511 SBLASTRECORDCHK(&so->so_rcv); 1512 SBLASTMBUFCHK(&so->so_rcv); 1513 nextrecord = m->m_nextpkt; 1514 if (pr->pr_flags & PR_ADDR) { 1515 KASSERT(m->m_type == MT_SONAME, 1516 ("m->m_type == %d", m->m_type)); 1517 orig_resid = 0; 1518 if (psa != NULL) 1519 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1520 M_NOWAIT); 1521 if (flags & MSG_PEEK) { 1522 m = m->m_next; 1523 } else { 1524 sbfree(&so->so_rcv, m); 1525 so->so_rcv.sb_mb = m_free(m); 1526 m = so->so_rcv.sb_mb; 1527 sockbuf_pushsync(&so->so_rcv, nextrecord); 1528 } 1529 } 1530 1531 /* 1532 * Process one or more MT_CONTROL mbufs present before any data mbufs 1533 * in the first mbuf chain on the socket buffer. If MSG_PEEK, we 1534 * just copy the data; if !MSG_PEEK, we call into the protocol to 1535 * perform externalization (or freeing if controlp == NULL). 1536 */ 1537 if (m != NULL && m->m_type == MT_CONTROL) { 1538 struct mbuf *cm = NULL, *cmn; 1539 struct mbuf **cme = &cm; 1540 1541 do { 1542 if (flags & MSG_PEEK) { 1543 if (controlp != NULL) { 1544 *controlp = m_copy(m, 0, m->m_len); 1545 controlp = &(*controlp)->m_next; 1546 } 1547 m = m->m_next; 1548 } else { 1549 sbfree(&so->so_rcv, m); 1550 so->so_rcv.sb_mb = m->m_next; 1551 m->m_next = NULL; 1552 *cme = m; 1553 cme = &(*cme)->m_next; 1554 m = so->so_rcv.sb_mb; 1555 } 1556 } while (m != NULL && m->m_type == MT_CONTROL); 1557 if ((flags & MSG_PEEK) == 0) 1558 sockbuf_pushsync(&so->so_rcv, nextrecord); 1559 while (cm != NULL) { 1560 cmn = cm->m_next; 1561 cm->m_next = NULL; 1562 if (pr->pr_domain->dom_externalize != NULL) { 1563 SOCKBUF_UNLOCK(&so->so_rcv); 1564 error = (*pr->pr_domain->dom_externalize) 1565 (cm, controlp); 1566 SOCKBUF_LOCK(&so->so_rcv); 1567 } else if (controlp != NULL) 1568 *controlp = cm; 1569 else 1570 m_freem(cm); 1571 if (controlp != NULL) { 1572 orig_resid = 0; 1573 while (*controlp != NULL) 1574 controlp = &(*controlp)->m_next; 1575 } 1576 cm = cmn; 1577 } 1578 if (m != NULL) 1579 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1580 else 1581 nextrecord = so->so_rcv.sb_mb; 1582 orig_resid = 0; 1583 } 1584 if (m != NULL) { 1585 if ((flags & MSG_PEEK) == 0) { 1586 KASSERT(m->m_nextpkt == nextrecord, 1587 ("soreceive: post-control, nextrecord !sync")); 1588 if (nextrecord == NULL) { 1589 KASSERT(so->so_rcv.sb_mb == m, 1590 ("soreceive: post-control, sb_mb!=m")); 1591 KASSERT(so->so_rcv.sb_lastrecord == m, 1592 ("soreceive: post-control, lastrecord!=m")); 1593 } 1594 } 1595 type = m->m_type; 1596 if (type == MT_OOBDATA) 1597 flags |= MSG_OOB; 1598 } else { 1599 if ((flags & MSG_PEEK) == 0) { 1600 KASSERT(so->so_rcv.sb_mb == nextrecord, 1601 ("soreceive: sb_mb != nextrecord")); 1602 if (so->so_rcv.sb_mb == NULL) { 1603 KASSERT(so->so_rcv.sb_lastrecord == NULL, 1604 ("soreceive: sb_lastercord != NULL")); 1605 } 1606 } 1607 } 1608 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1609 SBLASTRECORDCHK(&so->so_rcv); 1610 SBLASTMBUFCHK(&so->so_rcv); 1611 1612 /* 1613 * Now continue to read any data mbufs off of the head of the socket 1614 * buffer until the read request is satisfied. Note that 'type' is 1615 * used to store the type of any mbuf reads that have happened so far 1616 * such that soreceive() can stop reading if the type changes, which 1617 * causes soreceive() to return only one of regular data and inline 1618 * out-of-band data in a single socket receive operation. 1619 */ 1620 moff = 0; 1621 offset = 0; 1622 while (m != NULL && uio->uio_resid > 0 && error == 0) { 1623 /* 1624 * If the type of mbuf has changed since the last mbuf 1625 * examined ('type'), end the receive operation. 1626 */ 1627 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1628 if (m->m_type == MT_OOBDATA) { 1629 if (type != MT_OOBDATA) 1630 break; 1631 } else if (type == MT_OOBDATA) 1632 break; 1633 else 1634 KASSERT(m->m_type == MT_DATA, 1635 ("m->m_type == %d", m->m_type)); 1636 so->so_rcv.sb_state &= ~SBS_RCVATMARK; 1637 len = uio->uio_resid; 1638 if (so->so_oobmark && len > so->so_oobmark - offset) 1639 len = so->so_oobmark - offset; 1640 if (len > m->m_len - moff) 1641 len = m->m_len - moff; 1642 /* 1643 * If mp is set, just pass back the mbufs. Otherwise copy 1644 * them out via the uio, then free. Sockbuf must be 1645 * consistent here (points to current mbuf, it points to next 1646 * record) when we drop priority; we must note any additions 1647 * to the sockbuf when we block interrupts again. 1648 */ 1649 if (mp == NULL) { 1650 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1651 SBLASTRECORDCHK(&so->so_rcv); 1652 SBLASTMBUFCHK(&so->so_rcv); 1653 SOCKBUF_UNLOCK(&so->so_rcv); 1654 #ifdef ZERO_COPY_SOCKETS 1655 if (so_zero_copy_receive) { 1656 int disposable; 1657 1658 if ((m->m_flags & M_EXT) 1659 && (m->m_ext.ext_type == EXT_DISPOSABLE)) 1660 disposable = 1; 1661 else 1662 disposable = 0; 1663 1664 error = uiomoveco(mtod(m, char *) + moff, 1665 (int)len, uio, 1666 disposable); 1667 } else 1668 #endif /* ZERO_COPY_SOCKETS */ 1669 error = uiomove(mtod(m, char *) + moff, (int)len, uio); 1670 SOCKBUF_LOCK(&so->so_rcv); 1671 if (error) { 1672 /* 1673 * The MT_SONAME mbuf has already been removed 1674 * from the record, so it is necessary to 1675 * remove the data mbufs, if any, to preserve 1676 * the invariant in the case of PR_ADDR that 1677 * requires MT_SONAME mbufs at the head of 1678 * each record. 1679 */ 1680 if (m && pr->pr_flags & PR_ATOMIC && 1681 ((flags & MSG_PEEK) == 0)) 1682 (void)sbdroprecord_locked(&so->so_rcv); 1683 SOCKBUF_UNLOCK(&so->so_rcv); 1684 goto release; 1685 } 1686 } else 1687 uio->uio_resid -= len; 1688 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1689 if (len == m->m_len - moff) { 1690 if (m->m_flags & M_EOR) 1691 flags |= MSG_EOR; 1692 if (flags & MSG_PEEK) { 1693 m = m->m_next; 1694 moff = 0; 1695 } else { 1696 nextrecord = m->m_nextpkt; 1697 sbfree(&so->so_rcv, m); 1698 if (mp != NULL) { 1699 *mp = m; 1700 mp = &m->m_next; 1701 so->so_rcv.sb_mb = m = m->m_next; 1702 *mp = NULL; 1703 } else { 1704 so->so_rcv.sb_mb = m_free(m); 1705 m = so->so_rcv.sb_mb; 1706 } 1707 sockbuf_pushsync(&so->so_rcv, nextrecord); 1708 SBLASTRECORDCHK(&so->so_rcv); 1709 SBLASTMBUFCHK(&so->so_rcv); 1710 } 1711 } else { 1712 if (flags & MSG_PEEK) 1713 moff += len; 1714 else { 1715 if (mp != NULL) { 1716 int copy_flag; 1717 1718 if (flags & MSG_DONTWAIT) 1719 copy_flag = M_DONTWAIT; 1720 else 1721 copy_flag = M_WAIT; 1722 if (copy_flag == M_WAIT) 1723 SOCKBUF_UNLOCK(&so->so_rcv); 1724 *mp = m_copym(m, 0, len, copy_flag); 1725 if (copy_flag == M_WAIT) 1726 SOCKBUF_LOCK(&so->so_rcv); 1727 if (*mp == NULL) { 1728 /* 1729 * m_copym() couldn't 1730 * allocate an mbuf. Adjust 1731 * uio_resid back (it was 1732 * adjusted down by len 1733 * bytes, which we didn't end 1734 * up "copying" over). 1735 */ 1736 uio->uio_resid += len; 1737 break; 1738 } 1739 } 1740 m->m_data += len; 1741 m->m_len -= len; 1742 so->so_rcv.sb_cc -= len; 1743 } 1744 } 1745 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1746 if (so->so_oobmark) { 1747 if ((flags & MSG_PEEK) == 0) { 1748 so->so_oobmark -= len; 1749 if (so->so_oobmark == 0) { 1750 so->so_rcv.sb_state |= SBS_RCVATMARK; 1751 break; 1752 } 1753 } else { 1754 offset += len; 1755 if (offset == so->so_oobmark) 1756 break; 1757 } 1758 } 1759 if (flags & MSG_EOR) 1760 break; 1761 /* 1762 * If the MSG_WAITALL flag is set (for non-atomic socket), we 1763 * must not quit until "uio->uio_resid == 0" or an error 1764 * termination. If a signal/timeout occurs, return with a 1765 * short count but without error. Keep sockbuf locked 1766 * against other readers. 1767 */ 1768 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1769 !sosendallatonce(so) && nextrecord == NULL) { 1770 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1771 if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE) 1772 break; 1773 /* 1774 * Notify the protocol that some data has been 1775 * drained before blocking. 1776 */ 1777 if (pr->pr_flags & PR_WANTRCVD) { 1778 SOCKBUF_UNLOCK(&so->so_rcv); 1779 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1780 SOCKBUF_LOCK(&so->so_rcv); 1781 } 1782 SBLASTRECORDCHK(&so->so_rcv); 1783 SBLASTMBUFCHK(&so->so_rcv); 1784 error = sbwait(&so->so_rcv); 1785 if (error) { 1786 SOCKBUF_UNLOCK(&so->so_rcv); 1787 goto release; 1788 } 1789 m = so->so_rcv.sb_mb; 1790 if (m != NULL) 1791 nextrecord = m->m_nextpkt; 1792 } 1793 } 1794 1795 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1796 if (m != NULL && pr->pr_flags & PR_ATOMIC) { 1797 flags |= MSG_TRUNC; 1798 if ((flags & MSG_PEEK) == 0) 1799 (void) sbdroprecord_locked(&so->so_rcv); 1800 } 1801 if ((flags & MSG_PEEK) == 0) { 1802 if (m == NULL) { 1803 /* 1804 * First part is an inline SB_EMPTY_FIXUP(). Second 1805 * part makes sure sb_lastrecord is up-to-date if 1806 * there is still data in the socket buffer. 1807 */ 1808 so->so_rcv.sb_mb = nextrecord; 1809 if (so->so_rcv.sb_mb == NULL) { 1810 so->so_rcv.sb_mbtail = NULL; 1811 so->so_rcv.sb_lastrecord = NULL; 1812 } else if (nextrecord->m_nextpkt == NULL) 1813 so->so_rcv.sb_lastrecord = nextrecord; 1814 } 1815 SBLASTRECORDCHK(&so->so_rcv); 1816 SBLASTMBUFCHK(&so->so_rcv); 1817 /* 1818 * If soreceive() is being done from the socket callback, 1819 * then don't need to generate ACK to peer to update window, 1820 * since ACK will be generated on return to TCP. 1821 */ 1822 if (!(flags & MSG_SOCALLBCK) && 1823 (pr->pr_flags & PR_WANTRCVD)) { 1824 SOCKBUF_UNLOCK(&so->so_rcv); 1825 (*pr->pr_usrreqs->pru_rcvd)(so, flags); 1826 SOCKBUF_LOCK(&so->so_rcv); 1827 } 1828 } 1829 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1830 if (orig_resid == uio->uio_resid && orig_resid && 1831 (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) { 1832 SOCKBUF_UNLOCK(&so->so_rcv); 1833 goto restart; 1834 } 1835 SOCKBUF_UNLOCK(&so->so_rcv); 1836 1837 if (flagsp != NULL) 1838 *flagsp |= flags; 1839 release: 1840 sbunlock(&so->so_rcv); 1841 return (error); 1842 } 1843 1844 /* 1845 * Optimized version of soreceive() for simple datagram cases from userspace. 1846 * Unlike in the stream case, we're able to drop a datagram if copyout() 1847 * fails, and because we handle datagrams atomically, we don't need to use a 1848 * sleep lock to prevent I/O interlacing. 1849 */ 1850 int 1851 soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio, 1852 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 1853 { 1854 struct mbuf *m, *m2; 1855 int flags, len, error, offset; 1856 struct protosw *pr = so->so_proto; 1857 struct mbuf *nextrecord; 1858 1859 if (psa != NULL) 1860 *psa = NULL; 1861 if (controlp != NULL) 1862 *controlp = NULL; 1863 if (flagsp != NULL) 1864 flags = *flagsp &~ MSG_EOR; 1865 else 1866 flags = 0; 1867 1868 /* 1869 * For any complicated cases, fall back to the full 1870 * soreceive_generic(). 1871 */ 1872 if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB)) 1873 return (soreceive_generic(so, psa, uio, mp0, controlp, 1874 flagsp)); 1875 1876 /* 1877 * Enforce restrictions on use. 1878 */ 1879 KASSERT((pr->pr_flags & PR_WANTRCVD) == 0, 1880 ("soreceive_dgram: wantrcvd")); 1881 KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic")); 1882 KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0, 1883 ("soreceive_dgram: SBS_RCVATMARK")); 1884 KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0, 1885 ("soreceive_dgram: P_CONNREQUIRED")); 1886 1887 /* 1888 * Loop blocking while waiting for a datagram. 1889 */ 1890 SOCKBUF_LOCK(&so->so_rcv); 1891 while ((m = so->so_rcv.sb_mb) == NULL) { 1892 KASSERT(so->so_rcv.sb_cc == 0, 1893 ("soreceive_dgram: sb_mb NULL but sb_cc %u", 1894 so->so_rcv.sb_cc)); 1895 if (so->so_error) { 1896 error = so->so_error; 1897 so->so_error = 0; 1898 SOCKBUF_UNLOCK(&so->so_rcv); 1899 return (error); 1900 } 1901 if (so->so_rcv.sb_state & SBS_CANTRCVMORE || 1902 uio->uio_resid == 0) { 1903 SOCKBUF_UNLOCK(&so->so_rcv); 1904 return (0); 1905 } 1906 if ((so->so_state & SS_NBIO) || 1907 (flags & (MSG_DONTWAIT|MSG_NBIO))) { 1908 SOCKBUF_UNLOCK(&so->so_rcv); 1909 return (EWOULDBLOCK); 1910 } 1911 SBLASTRECORDCHK(&so->so_rcv); 1912 SBLASTMBUFCHK(&so->so_rcv); 1913 error = sbwait(&so->so_rcv); 1914 if (error) { 1915 SOCKBUF_UNLOCK(&so->so_rcv); 1916 return (error); 1917 } 1918 } 1919 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 1920 1921 if (uio->uio_td) 1922 uio->uio_td->td_ru.ru_msgrcv++; 1923 SBLASTRECORDCHK(&so->so_rcv); 1924 SBLASTMBUFCHK(&so->so_rcv); 1925 nextrecord = m->m_nextpkt; 1926 if (nextrecord == NULL) { 1927 KASSERT(so->so_rcv.sb_lastrecord == m, 1928 ("soreceive_dgram: lastrecord != m")); 1929 } 1930 1931 KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord, 1932 ("soreceive_dgram: m_nextpkt != nextrecord")); 1933 1934 /* 1935 * Pull 'm' and its chain off the front of the packet queue. 1936 */ 1937 so->so_rcv.sb_mb = NULL; 1938 sockbuf_pushsync(&so->so_rcv, nextrecord); 1939 1940 /* 1941 * Walk 'm's chain and free that many bytes from the socket buffer. 1942 */ 1943 for (m2 = m; m2 != NULL; m2 = m2->m_next) 1944 sbfree(&so->so_rcv, m2); 1945 1946 /* 1947 * Do a few last checks before we let go of the lock. 1948 */ 1949 SBLASTRECORDCHK(&so->so_rcv); 1950 SBLASTMBUFCHK(&so->so_rcv); 1951 SOCKBUF_UNLOCK(&so->so_rcv); 1952 1953 if (pr->pr_flags & PR_ADDR) { 1954 KASSERT(m->m_type == MT_SONAME, 1955 ("m->m_type == %d", m->m_type)); 1956 if (psa != NULL) 1957 *psa = sodupsockaddr(mtod(m, struct sockaddr *), 1958 M_NOWAIT); 1959 m = m_free(m); 1960 } 1961 if (m == NULL) { 1962 /* XXXRW: Can this happen? */ 1963 return (0); 1964 } 1965 1966 /* 1967 * Packet to copyout() is now in 'm' and it is disconnected from the 1968 * queue. 1969 * 1970 * Process one or more MT_CONTROL mbufs present before any data mbufs 1971 * in the first mbuf chain on the socket buffer. We call into the 1972 * protocol to perform externalization (or freeing if controlp == 1973 * NULL). 1974 */ 1975 if (m->m_type == MT_CONTROL) { 1976 struct mbuf *cm = NULL, *cmn; 1977 struct mbuf **cme = &cm; 1978 1979 do { 1980 m2 = m->m_next; 1981 m->m_next = NULL; 1982 *cme = m; 1983 cme = &(*cme)->m_next; 1984 m = m2; 1985 } while (m != NULL && m->m_type == MT_CONTROL); 1986 while (cm != NULL) { 1987 cmn = cm->m_next; 1988 cm->m_next = NULL; 1989 if (pr->pr_domain->dom_externalize != NULL) { 1990 error = (*pr->pr_domain->dom_externalize) 1991 (cm, controlp); 1992 } else if (controlp != NULL) 1993 *controlp = cm; 1994 else 1995 m_freem(cm); 1996 if (controlp != NULL) { 1997 while (*controlp != NULL) 1998 controlp = &(*controlp)->m_next; 1999 } 2000 cm = cmn; 2001 } 2002 } 2003 KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data")); 2004 2005 offset = 0; 2006 while (m != NULL && uio->uio_resid > 0) { 2007 len = uio->uio_resid; 2008 if (len > m->m_len) 2009 len = m->m_len; 2010 error = uiomove(mtod(m, char *), (int)len, uio); 2011 if (error) { 2012 m_freem(m); 2013 return (error); 2014 } 2015 m = m_free(m); 2016 } 2017 if (m != NULL) 2018 flags |= MSG_TRUNC; 2019 m_freem(m); 2020 if (flagsp != NULL) 2021 *flagsp |= flags; 2022 return (0); 2023 } 2024 2025 int 2026 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 2027 struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2028 { 2029 2030 return (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0, 2031 controlp, flagsp)); 2032 } 2033 2034 int 2035 soshutdown(struct socket *so, int how) 2036 { 2037 struct protosw *pr = so->so_proto; 2038 2039 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 2040 return (EINVAL); 2041 if (pr->pr_usrreqs->pru_flush != NULL) { 2042 (*pr->pr_usrreqs->pru_flush)(so, how); 2043 } 2044 if (how != SHUT_WR) 2045 sorflush(so); 2046 if (how != SHUT_RD) 2047 return ((*pr->pr_usrreqs->pru_shutdown)(so)); 2048 return (0); 2049 } 2050 2051 void 2052 sorflush(struct socket *so) 2053 { 2054 struct sockbuf *sb = &so->so_rcv; 2055 struct protosw *pr = so->so_proto; 2056 struct sockbuf asb; 2057 2058 /* 2059 * In order to avoid calling dom_dispose with the socket buffer mutex 2060 * held, and in order to generally avoid holding the lock for a long 2061 * time, we make a copy of the socket buffer and clear the original 2062 * (except locks, state). The new socket buffer copy won't have 2063 * initialized locks so we can only call routines that won't use or 2064 * assert those locks. 2065 * 2066 * Dislodge threads currently blocked in receive and wait to acquire 2067 * a lock against other simultaneous readers before clearing the 2068 * socket buffer. Don't let our acquire be interrupted by a signal 2069 * despite any existing socket disposition on interruptable waiting. 2070 */ 2071 socantrcvmore(so); 2072 (void) sblock(sb, SBL_WAIT | SBL_NOINTR); 2073 2074 /* 2075 * Invalidate/clear most of the sockbuf structure, but leave selinfo 2076 * and mutex data unchanged. 2077 */ 2078 SOCKBUF_LOCK(sb); 2079 bzero(&asb, offsetof(struct sockbuf, sb_startzero)); 2080 bcopy(&sb->sb_startzero, &asb.sb_startzero, 2081 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2082 bzero(&sb->sb_startzero, 2083 sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); 2084 SOCKBUF_UNLOCK(sb); 2085 sbunlock(sb); 2086 2087 /* 2088 * Dispose of special rights and flush the socket buffer. Don't call 2089 * any unsafe routines (that rely on locks being initialized) on asb. 2090 */ 2091 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL) 2092 (*pr->pr_domain->dom_dispose)(asb.sb_mb); 2093 sbrelease_internal(&asb, so); 2094 } 2095 2096 /* 2097 * Perhaps this routine, and sooptcopyout(), below, ought to come in an 2098 * additional variant to handle the case where the option value needs to be 2099 * some kind of integer, but not a specific size. In addition to their use 2100 * here, these functions are also called by the protocol-level pr_ctloutput() 2101 * routines. 2102 */ 2103 int 2104 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2105 { 2106 size_t valsize; 2107 2108 /* 2109 * If the user gives us more than we wanted, we ignore it, but if we 2110 * don't get the minimum length the caller wants, we return EINVAL. 2111 * On success, sopt->sopt_valsize is set to however much we actually 2112 * retrieved. 2113 */ 2114 if ((valsize = sopt->sopt_valsize) < minlen) 2115 return EINVAL; 2116 if (valsize > len) 2117 sopt->sopt_valsize = valsize = len; 2118 2119 if (sopt->sopt_td != NULL) 2120 return (copyin(sopt->sopt_val, buf, valsize)); 2121 2122 bcopy(sopt->sopt_val, buf, valsize); 2123 return (0); 2124 } 2125 2126 /* 2127 * Kernel version of setsockopt(2). 2128 * 2129 * XXX: optlen is size_t, not socklen_t 2130 */ 2131 int 2132 so_setsockopt(struct socket *so, int level, int optname, void *optval, 2133 size_t optlen) 2134 { 2135 struct sockopt sopt; 2136 2137 sopt.sopt_level = level; 2138 sopt.sopt_name = optname; 2139 sopt.sopt_dir = SOPT_SET; 2140 sopt.sopt_val = optval; 2141 sopt.sopt_valsize = optlen; 2142 sopt.sopt_td = NULL; 2143 return (sosetopt(so, &sopt)); 2144 } 2145 2146 int 2147 sosetopt(struct socket *so, struct sockopt *sopt) 2148 { 2149 int error, optval; 2150 struct linger l; 2151 struct timeval tv; 2152 u_long val; 2153 #ifdef MAC 2154 struct mac extmac; 2155 #endif 2156 2157 error = 0; 2158 if (sopt->sopt_level != SOL_SOCKET) { 2159 if (so->so_proto && so->so_proto->pr_ctloutput) 2160 return ((*so->so_proto->pr_ctloutput) 2161 (so, sopt)); 2162 error = ENOPROTOOPT; 2163 } else { 2164 switch (sopt->sopt_name) { 2165 #ifdef INET 2166 case SO_ACCEPTFILTER: 2167 error = do_setopt_accept_filter(so, sopt); 2168 if (error) 2169 goto bad; 2170 break; 2171 #endif 2172 case SO_LINGER: 2173 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2174 if (error) 2175 goto bad; 2176 2177 SOCK_LOCK(so); 2178 so->so_linger = l.l_linger; 2179 if (l.l_onoff) 2180 so->so_options |= SO_LINGER; 2181 else 2182 so->so_options &= ~SO_LINGER; 2183 SOCK_UNLOCK(so); 2184 break; 2185 2186 case SO_DEBUG: 2187 case SO_KEEPALIVE: 2188 case SO_DONTROUTE: 2189 case SO_USELOOPBACK: 2190 case SO_BROADCAST: 2191 case SO_REUSEADDR: 2192 case SO_REUSEPORT: 2193 case SO_OOBINLINE: 2194 case SO_TIMESTAMP: 2195 case SO_BINTIME: 2196 case SO_NOSIGPIPE: 2197 case SO_NO_DDP: 2198 case SO_NO_OFFLOAD: 2199 error = sooptcopyin(sopt, &optval, sizeof optval, 2200 sizeof optval); 2201 if (error) 2202 goto bad; 2203 SOCK_LOCK(so); 2204 if (optval) 2205 so->so_options |= sopt->sopt_name; 2206 else 2207 so->so_options &= ~sopt->sopt_name; 2208 SOCK_UNLOCK(so); 2209 break; 2210 2211 case SO_SETFIB: 2212 error = sooptcopyin(sopt, &optval, sizeof optval, 2213 sizeof optval); 2214 if (optval < 1 || optval > rt_numfibs) { 2215 error = EINVAL; 2216 goto bad; 2217 } 2218 if ((so->so_proto->pr_domain->dom_family == PF_INET) || 2219 (so->so_proto->pr_domain->dom_family == PF_ROUTE)) { 2220 so->so_fibnum = optval; 2221 } else { 2222 so->so_fibnum = 0; 2223 } 2224 break; 2225 case SO_SNDBUF: 2226 case SO_RCVBUF: 2227 case SO_SNDLOWAT: 2228 case SO_RCVLOWAT: 2229 error = sooptcopyin(sopt, &optval, sizeof optval, 2230 sizeof optval); 2231 if (error) 2232 goto bad; 2233 2234 /* 2235 * Values < 1 make no sense for any of these options, 2236 * so disallow them. 2237 */ 2238 if (optval < 1) { 2239 error = EINVAL; 2240 goto bad; 2241 } 2242 2243 switch (sopt->sopt_name) { 2244 case SO_SNDBUF: 2245 case SO_RCVBUF: 2246 if (sbreserve(sopt->sopt_name == SO_SNDBUF ? 2247 &so->so_snd : &so->so_rcv, (u_long)optval, 2248 so, curthread) == 0) { 2249 error = ENOBUFS; 2250 goto bad; 2251 } 2252 (sopt->sopt_name == SO_SNDBUF ? &so->so_snd : 2253 &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE; 2254 break; 2255 2256 /* 2257 * Make sure the low-water is never greater than the 2258 * high-water. 2259 */ 2260 case SO_SNDLOWAT: 2261 SOCKBUF_LOCK(&so->so_snd); 2262 so->so_snd.sb_lowat = 2263 (optval > so->so_snd.sb_hiwat) ? 2264 so->so_snd.sb_hiwat : optval; 2265 SOCKBUF_UNLOCK(&so->so_snd); 2266 break; 2267 case SO_RCVLOWAT: 2268 SOCKBUF_LOCK(&so->so_rcv); 2269 so->so_rcv.sb_lowat = 2270 (optval > so->so_rcv.sb_hiwat) ? 2271 so->so_rcv.sb_hiwat : optval; 2272 SOCKBUF_UNLOCK(&so->so_rcv); 2273 break; 2274 } 2275 break; 2276 2277 case SO_SNDTIMEO: 2278 case SO_RCVTIMEO: 2279 #ifdef COMPAT_IA32 2280 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) { 2281 struct timeval32 tv32; 2282 2283 error = sooptcopyin(sopt, &tv32, sizeof tv32, 2284 sizeof tv32); 2285 CP(tv32, tv, tv_sec); 2286 CP(tv32, tv, tv_usec); 2287 } else 2288 #endif 2289 error = sooptcopyin(sopt, &tv, sizeof tv, 2290 sizeof tv); 2291 if (error) 2292 goto bad; 2293 2294 /* assert(hz > 0); */ 2295 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2296 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2297 error = EDOM; 2298 goto bad; 2299 } 2300 /* assert(tick > 0); */ 2301 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2302 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick; 2303 if (val > INT_MAX) { 2304 error = EDOM; 2305 goto bad; 2306 } 2307 if (val == 0 && tv.tv_usec != 0) 2308 val = 1; 2309 2310 switch (sopt->sopt_name) { 2311 case SO_SNDTIMEO: 2312 so->so_snd.sb_timeo = val; 2313 break; 2314 case SO_RCVTIMEO: 2315 so->so_rcv.sb_timeo = val; 2316 break; 2317 } 2318 break; 2319 2320 case SO_LABEL: 2321 #ifdef MAC 2322 error = sooptcopyin(sopt, &extmac, sizeof extmac, 2323 sizeof extmac); 2324 if (error) 2325 goto bad; 2326 error = mac_setsockopt_label(sopt->sopt_td->td_ucred, 2327 so, &extmac); 2328 #else 2329 error = EOPNOTSUPP; 2330 #endif 2331 break; 2332 2333 default: 2334 error = ENOPROTOOPT; 2335 break; 2336 } 2337 if (error == 0 && so->so_proto != NULL && 2338 so->so_proto->pr_ctloutput != NULL) { 2339 (void) ((*so->so_proto->pr_ctloutput) 2340 (so, sopt)); 2341 } 2342 } 2343 bad: 2344 return (error); 2345 } 2346 2347 /* 2348 * Helper routine for getsockopt. 2349 */ 2350 int 2351 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2352 { 2353 int error; 2354 size_t valsize; 2355 2356 error = 0; 2357 2358 /* 2359 * Documented get behavior is that we always return a value, possibly 2360 * truncated to fit in the user's buffer. Traditional behavior is 2361 * that we always tell the user precisely how much we copied, rather 2362 * than something useful like the total amount we had available for 2363 * her. Note that this interface is not idempotent; the entire 2364 * answer must generated ahead of time. 2365 */ 2366 valsize = min(len, sopt->sopt_valsize); 2367 sopt->sopt_valsize = valsize; 2368 if (sopt->sopt_val != NULL) { 2369 if (sopt->sopt_td != NULL) 2370 error = copyout(buf, sopt->sopt_val, valsize); 2371 else 2372 bcopy(buf, sopt->sopt_val, valsize); 2373 } 2374 return (error); 2375 } 2376 2377 int 2378 sogetopt(struct socket *so, struct sockopt *sopt) 2379 { 2380 int error, optval; 2381 struct linger l; 2382 struct timeval tv; 2383 #ifdef MAC 2384 struct mac extmac; 2385 #endif 2386 2387 error = 0; 2388 if (sopt->sopt_level != SOL_SOCKET) { 2389 if (so->so_proto && so->so_proto->pr_ctloutput) { 2390 return ((*so->so_proto->pr_ctloutput) 2391 (so, sopt)); 2392 } else 2393 return (ENOPROTOOPT); 2394 } else { 2395 switch (sopt->sopt_name) { 2396 #ifdef INET 2397 case SO_ACCEPTFILTER: 2398 error = do_getopt_accept_filter(so, sopt); 2399 break; 2400 #endif 2401 case SO_LINGER: 2402 SOCK_LOCK(so); 2403 l.l_onoff = so->so_options & SO_LINGER; 2404 l.l_linger = so->so_linger; 2405 SOCK_UNLOCK(so); 2406 error = sooptcopyout(sopt, &l, sizeof l); 2407 break; 2408 2409 case SO_USELOOPBACK: 2410 case SO_DONTROUTE: 2411 case SO_DEBUG: 2412 case SO_KEEPALIVE: 2413 case SO_REUSEADDR: 2414 case SO_REUSEPORT: 2415 case SO_BROADCAST: 2416 case SO_OOBINLINE: 2417 case SO_ACCEPTCONN: 2418 case SO_TIMESTAMP: 2419 case SO_BINTIME: 2420 case SO_NOSIGPIPE: 2421 optval = so->so_options & sopt->sopt_name; 2422 integer: 2423 error = sooptcopyout(sopt, &optval, sizeof optval); 2424 break; 2425 2426 case SO_TYPE: 2427 optval = so->so_type; 2428 goto integer; 2429 2430 case SO_ERROR: 2431 SOCK_LOCK(so); 2432 optval = so->so_error; 2433 so->so_error = 0; 2434 SOCK_UNLOCK(so); 2435 goto integer; 2436 2437 case SO_SNDBUF: 2438 optval = so->so_snd.sb_hiwat; 2439 goto integer; 2440 2441 case SO_RCVBUF: 2442 optval = so->so_rcv.sb_hiwat; 2443 goto integer; 2444 2445 case SO_SNDLOWAT: 2446 optval = so->so_snd.sb_lowat; 2447 goto integer; 2448 2449 case SO_RCVLOWAT: 2450 optval = so->so_rcv.sb_lowat; 2451 goto integer; 2452 2453 case SO_SNDTIMEO: 2454 case SO_RCVTIMEO: 2455 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2456 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 2457 2458 tv.tv_sec = optval / hz; 2459 tv.tv_usec = (optval % hz) * tick; 2460 #ifdef COMPAT_IA32 2461 if (curthread->td_proc->p_sysent == &ia32_freebsd_sysvec) { 2462 struct timeval32 tv32; 2463 2464 CP(tv, tv32, tv_sec); 2465 CP(tv, tv32, tv_usec); 2466 error = sooptcopyout(sopt, &tv32, sizeof tv32); 2467 } else 2468 #endif 2469 error = sooptcopyout(sopt, &tv, sizeof tv); 2470 break; 2471 2472 case SO_LABEL: 2473 #ifdef MAC 2474 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2475 sizeof(extmac)); 2476 if (error) 2477 return (error); 2478 error = mac_getsockopt_label(sopt->sopt_td->td_ucred, 2479 so, &extmac); 2480 if (error) 2481 return (error); 2482 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2483 #else 2484 error = EOPNOTSUPP; 2485 #endif 2486 break; 2487 2488 case SO_PEERLABEL: 2489 #ifdef MAC 2490 error = sooptcopyin(sopt, &extmac, sizeof(extmac), 2491 sizeof(extmac)); 2492 if (error) 2493 return (error); 2494 error = mac_getsockopt_peerlabel( 2495 sopt->sopt_td->td_ucred, so, &extmac); 2496 if (error) 2497 return (error); 2498 error = sooptcopyout(sopt, &extmac, sizeof extmac); 2499 #else 2500 error = EOPNOTSUPP; 2501 #endif 2502 break; 2503 2504 case SO_LISTENQLIMIT: 2505 optval = so->so_qlimit; 2506 goto integer; 2507 2508 case SO_LISTENQLEN: 2509 optval = so->so_qlen; 2510 goto integer; 2511 2512 case SO_LISTENINCQLEN: 2513 optval = so->so_incqlen; 2514 goto integer; 2515 2516 default: 2517 error = ENOPROTOOPT; 2518 break; 2519 } 2520 return (error); 2521 } 2522 } 2523 2524 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2525 int 2526 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2527 { 2528 struct mbuf *m, *m_prev; 2529 int sopt_size = sopt->sopt_valsize; 2530 2531 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA); 2532 if (m == NULL) 2533 return ENOBUFS; 2534 if (sopt_size > MLEN) { 2535 MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT); 2536 if ((m->m_flags & M_EXT) == 0) { 2537 m_free(m); 2538 return ENOBUFS; 2539 } 2540 m->m_len = min(MCLBYTES, sopt_size); 2541 } else { 2542 m->m_len = min(MLEN, sopt_size); 2543 } 2544 sopt_size -= m->m_len; 2545 *mp = m; 2546 m_prev = m; 2547 2548 while (sopt_size) { 2549 MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA); 2550 if (m == NULL) { 2551 m_freem(*mp); 2552 return ENOBUFS; 2553 } 2554 if (sopt_size > MLEN) { 2555 MCLGET(m, sopt->sopt_td != NULL ? M_WAIT : 2556 M_DONTWAIT); 2557 if ((m->m_flags & M_EXT) == 0) { 2558 m_freem(m); 2559 m_freem(*mp); 2560 return ENOBUFS; 2561 } 2562 m->m_len = min(MCLBYTES, sopt_size); 2563 } else { 2564 m->m_len = min(MLEN, sopt_size); 2565 } 2566 sopt_size -= m->m_len; 2567 m_prev->m_next = m; 2568 m_prev = m; 2569 } 2570 return (0); 2571 } 2572 2573 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2574 int 2575 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2576 { 2577 struct mbuf *m0 = m; 2578 2579 if (sopt->sopt_val == NULL) 2580 return (0); 2581 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2582 if (sopt->sopt_td != NULL) { 2583 int error; 2584 2585 error = copyin(sopt->sopt_val, mtod(m, char *), 2586 m->m_len); 2587 if (error != 0) { 2588 m_freem(m0); 2589 return(error); 2590 } 2591 } else 2592 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); 2593 sopt->sopt_valsize -= m->m_len; 2594 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2595 m = m->m_next; 2596 } 2597 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2598 panic("ip6_sooptmcopyin"); 2599 return (0); 2600 } 2601 2602 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2603 int 2604 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2605 { 2606 struct mbuf *m0 = m; 2607 size_t valsize = 0; 2608 2609 if (sopt->sopt_val == NULL) 2610 return (0); 2611 while (m != NULL && sopt->sopt_valsize >= m->m_len) { 2612 if (sopt->sopt_td != NULL) { 2613 int error; 2614 2615 error = copyout(mtod(m, char *), sopt->sopt_val, 2616 m->m_len); 2617 if (error != 0) { 2618 m_freem(m0); 2619 return(error); 2620 } 2621 } else 2622 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); 2623 sopt->sopt_valsize -= m->m_len; 2624 sopt->sopt_val = (char *)sopt->sopt_val + m->m_len; 2625 valsize += m->m_len; 2626 m = m->m_next; 2627 } 2628 if (m != NULL) { 2629 /* enough soopt buffer should be given from user-land */ 2630 m_freem(m0); 2631 return(EINVAL); 2632 } 2633 sopt->sopt_valsize = valsize; 2634 return (0); 2635 } 2636 2637 /* 2638 * sohasoutofband(): protocol notifies socket layer of the arrival of new 2639 * out-of-band data, which will then notify socket consumers. 2640 */ 2641 void 2642 sohasoutofband(struct socket *so) 2643 { 2644 2645 if (so->so_sigio != NULL) 2646 pgsigio(&so->so_sigio, SIGURG, 0); 2647 selwakeuppri(&so->so_rcv.sb_sel, PSOCK); 2648 } 2649 2650 int 2651 sopoll(struct socket *so, int events, struct ucred *active_cred, 2652 struct thread *td) 2653 { 2654 2655 return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred, 2656 td)); 2657 } 2658 2659 int 2660 sopoll_generic(struct socket *so, int events, struct ucred *active_cred, 2661 struct thread *td) 2662 { 2663 int revents = 0; 2664 2665 SOCKBUF_LOCK(&so->so_snd); 2666 SOCKBUF_LOCK(&so->so_rcv); 2667 if (events & (POLLIN | POLLRDNORM)) 2668 if (soreadable(so)) 2669 revents |= events & (POLLIN | POLLRDNORM); 2670 2671 if (events & POLLINIGNEOF) 2672 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || 2673 !TAILQ_EMPTY(&so->so_comp) || so->so_error) 2674 revents |= POLLINIGNEOF; 2675 2676 if (events & (POLLOUT | POLLWRNORM)) 2677 if (sowriteable(so)) 2678 revents |= events & (POLLOUT | POLLWRNORM); 2679 2680 if (events & (POLLPRI | POLLRDBAND)) 2681 if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK)) 2682 revents |= events & (POLLPRI | POLLRDBAND); 2683 2684 if (revents == 0) { 2685 if (events & 2686 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | 2687 POLLRDBAND)) { 2688 selrecord(td, &so->so_rcv.sb_sel); 2689 so->so_rcv.sb_flags |= SB_SEL; 2690 } 2691 2692 if (events & (POLLOUT | POLLWRNORM)) { 2693 selrecord(td, &so->so_snd.sb_sel); 2694 so->so_snd.sb_flags |= SB_SEL; 2695 } 2696 } 2697 2698 SOCKBUF_UNLOCK(&so->so_rcv); 2699 SOCKBUF_UNLOCK(&so->so_snd); 2700 return (revents); 2701 } 2702 2703 int 2704 soo_kqfilter(struct file *fp, struct knote *kn) 2705 { 2706 struct socket *so = kn->kn_fp->f_data; 2707 struct sockbuf *sb; 2708 2709 switch (kn->kn_filter) { 2710 case EVFILT_READ: 2711 if (so->so_options & SO_ACCEPTCONN) 2712 kn->kn_fop = &solisten_filtops; 2713 else 2714 kn->kn_fop = &soread_filtops; 2715 sb = &so->so_rcv; 2716 break; 2717 case EVFILT_WRITE: 2718 kn->kn_fop = &sowrite_filtops; 2719 sb = &so->so_snd; 2720 break; 2721 default: 2722 return (EINVAL); 2723 } 2724 2725 SOCKBUF_LOCK(sb); 2726 knlist_add(&sb->sb_sel.si_note, kn, 1); 2727 sb->sb_flags |= SB_KNOTE; 2728 SOCKBUF_UNLOCK(sb); 2729 return (0); 2730 } 2731 2732 /* 2733 * Some routines that return EOPNOTSUPP for entry points that are not 2734 * supported by a protocol. Fill in as needed. 2735 */ 2736 int 2737 pru_accept_notsupp(struct socket *so, struct sockaddr **nam) 2738 { 2739 2740 return EOPNOTSUPP; 2741 } 2742 2743 int 2744 pru_attach_notsupp(struct socket *so, int proto, struct thread *td) 2745 { 2746 2747 return EOPNOTSUPP; 2748 } 2749 2750 int 2751 pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 2752 { 2753 2754 return EOPNOTSUPP; 2755 } 2756 2757 int 2758 pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td) 2759 { 2760 2761 return EOPNOTSUPP; 2762 } 2763 2764 int 2765 pru_connect2_notsupp(struct socket *so1, struct socket *so2) 2766 { 2767 2768 return EOPNOTSUPP; 2769 } 2770 2771 int 2772 pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, 2773 struct ifnet *ifp, struct thread *td) 2774 { 2775 2776 return EOPNOTSUPP; 2777 } 2778 2779 int 2780 pru_disconnect_notsupp(struct socket *so) 2781 { 2782 2783 return EOPNOTSUPP; 2784 } 2785 2786 int 2787 pru_listen_notsupp(struct socket *so, int backlog, struct thread *td) 2788 { 2789 2790 return EOPNOTSUPP; 2791 } 2792 2793 int 2794 pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) 2795 { 2796 2797 return EOPNOTSUPP; 2798 } 2799 2800 int 2801 pru_rcvd_notsupp(struct socket *so, int flags) 2802 { 2803 2804 return EOPNOTSUPP; 2805 } 2806 2807 int 2808 pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) 2809 { 2810 2811 return EOPNOTSUPP; 2812 } 2813 2814 int 2815 pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, 2816 struct sockaddr *addr, struct mbuf *control, struct thread *td) 2817 { 2818 2819 return EOPNOTSUPP; 2820 } 2821 2822 /* 2823 * This isn't really a ``null'' operation, but it's the default one and 2824 * doesn't do anything destructive. 2825 */ 2826 int 2827 pru_sense_null(struct socket *so, struct stat *sb) 2828 { 2829 2830 sb->st_blksize = so->so_snd.sb_hiwat; 2831 return 0; 2832 } 2833 2834 int 2835 pru_shutdown_notsupp(struct socket *so) 2836 { 2837 2838 return EOPNOTSUPP; 2839 } 2840 2841 int 2842 pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) 2843 { 2844 2845 return EOPNOTSUPP; 2846 } 2847 2848 int 2849 pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, 2850 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 2851 { 2852 2853 return EOPNOTSUPP; 2854 } 2855 2856 int 2857 pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, 2858 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) 2859 { 2860 2861 return EOPNOTSUPP; 2862 } 2863 2864 int 2865 pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred, 2866 struct thread *td) 2867 { 2868 2869 return EOPNOTSUPP; 2870 } 2871 2872 static void 2873 filt_sordetach(struct knote *kn) 2874 { 2875 struct socket *so = kn->kn_fp->f_data; 2876 2877 SOCKBUF_LOCK(&so->so_rcv); 2878 knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1); 2879 if (knlist_empty(&so->so_rcv.sb_sel.si_note)) 2880 so->so_rcv.sb_flags &= ~SB_KNOTE; 2881 SOCKBUF_UNLOCK(&so->so_rcv); 2882 } 2883 2884 /*ARGSUSED*/ 2885 static int 2886 filt_soread(struct knote *kn, long hint) 2887 { 2888 struct socket *so; 2889 2890 so = kn->kn_fp->f_data; 2891 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 2892 2893 kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; 2894 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2895 kn->kn_flags |= EV_EOF; 2896 kn->kn_fflags = so->so_error; 2897 return (1); 2898 } else if (so->so_error) /* temporary udp error */ 2899 return (1); 2900 else if (kn->kn_sfflags & NOTE_LOWAT) 2901 return (kn->kn_data >= kn->kn_sdata); 2902 else 2903 return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat); 2904 } 2905 2906 static void 2907 filt_sowdetach(struct knote *kn) 2908 { 2909 struct socket *so = kn->kn_fp->f_data; 2910 2911 SOCKBUF_LOCK(&so->so_snd); 2912 knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1); 2913 if (knlist_empty(&so->so_snd.sb_sel.si_note)) 2914 so->so_snd.sb_flags &= ~SB_KNOTE; 2915 SOCKBUF_UNLOCK(&so->so_snd); 2916 } 2917 2918 /*ARGSUSED*/ 2919 static int 2920 filt_sowrite(struct knote *kn, long hint) 2921 { 2922 struct socket *so; 2923 2924 so = kn->kn_fp->f_data; 2925 SOCKBUF_LOCK_ASSERT(&so->so_snd); 2926 kn->kn_data = sbspace(&so->so_snd); 2927 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2928 kn->kn_flags |= EV_EOF; 2929 kn->kn_fflags = so->so_error; 2930 return (1); 2931 } else if (so->so_error) /* temporary udp error */ 2932 return (1); 2933 else if (((so->so_state & SS_ISCONNECTED) == 0) && 2934 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2935 return (0); 2936 else if (kn->kn_sfflags & NOTE_LOWAT) 2937 return (kn->kn_data >= kn->kn_sdata); 2938 else 2939 return (kn->kn_data >= so->so_snd.sb_lowat); 2940 } 2941 2942 /*ARGSUSED*/ 2943 static int 2944 filt_solisten(struct knote *kn, long hint) 2945 { 2946 struct socket *so = kn->kn_fp->f_data; 2947 2948 kn->kn_data = so->so_qlen; 2949 return (! TAILQ_EMPTY(&so->so_comp)); 2950 } 2951 2952 int 2953 socheckuid(struct socket *so, uid_t uid) 2954 { 2955 2956 if (so == NULL) 2957 return (EPERM); 2958 if (so->so_cred->cr_uid != uid) 2959 return (EPERM); 2960 return (0); 2961 } 2962 2963 static int 2964 sysctl_somaxconn(SYSCTL_HANDLER_ARGS) 2965 { 2966 int error; 2967 int val; 2968 2969 val = somaxconn; 2970 error = sysctl_handle_int(oidp, &val, 0, req); 2971 if (error || !req->newptr ) 2972 return (error); 2973 2974 if (val < 1 || val > USHRT_MAX) 2975 return (EINVAL); 2976 2977 somaxconn = val; 2978 return (0); 2979 } 2980 2981 /* 2982 * These functions are used by protocols to notify the socket layer (and its 2983 * consumers) of state changes in the sockets driven by protocol-side events. 2984 */ 2985 2986 /* 2987 * Procedures to manipulate state flags of socket and do appropriate wakeups. 2988 * 2989 * Normal sequence from the active (originating) side is that 2990 * soisconnecting() is called during processing of connect() call, resulting 2991 * in an eventual call to soisconnected() if/when the connection is 2992 * established. When the connection is torn down soisdisconnecting() is 2993 * called during processing of disconnect() call, and soisdisconnected() is 2994 * called when the connection to the peer is totally severed. The semantics 2995 * of these routines are such that connectionless protocols can call 2996 * soisconnected() and soisdisconnected() only, bypassing the in-progress 2997 * calls when setting up a ``connection'' takes no time. 2998 * 2999 * From the passive side, a socket is created with two queues of sockets: 3000 * so_incomp for connections in progress and so_comp for connections already 3001 * made and awaiting user acceptance. As a protocol is preparing incoming 3002 * connections, it creates a socket structure queued on so_incomp by calling 3003 * sonewconn(). When the connection is established, soisconnected() is 3004 * called, and transfers the socket structure to so_comp, making it available 3005 * to accept(). 3006 * 3007 * If a socket is closed with sockets on either so_incomp or so_comp, these 3008 * sockets are dropped. 3009 * 3010 * If higher-level protocols are implemented in the kernel, the wakeups done 3011 * here will sometimes cause software-interrupt process scheduling. 3012 */ 3013 void 3014 soisconnecting(struct socket *so) 3015 { 3016 3017 SOCK_LOCK(so); 3018 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 3019 so->so_state |= SS_ISCONNECTING; 3020 SOCK_UNLOCK(so); 3021 } 3022 3023 void 3024 soisconnected(struct socket *so) 3025 { 3026 struct socket *head; 3027 3028 ACCEPT_LOCK(); 3029 SOCK_LOCK(so); 3030 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 3031 so->so_state |= SS_ISCONNECTED; 3032 head = so->so_head; 3033 if (head != NULL && (so->so_qstate & SQ_INCOMP)) { 3034 if ((so->so_options & SO_ACCEPTFILTER) == 0) { 3035 SOCK_UNLOCK(so); 3036 TAILQ_REMOVE(&head->so_incomp, so, so_list); 3037 head->so_incqlen--; 3038 so->so_qstate &= ~SQ_INCOMP; 3039 TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); 3040 head->so_qlen++; 3041 so->so_qstate |= SQ_COMP; 3042 ACCEPT_UNLOCK(); 3043 sorwakeup(head); 3044 wakeup_one(&head->so_timeo); 3045 } else { 3046 ACCEPT_UNLOCK(); 3047 so->so_upcall = 3048 head->so_accf->so_accept_filter->accf_callback; 3049 so->so_upcallarg = head->so_accf->so_accept_filter_arg; 3050 so->so_rcv.sb_flags |= SB_UPCALL; 3051 so->so_options &= ~SO_ACCEPTFILTER; 3052 SOCK_UNLOCK(so); 3053 so->so_upcall(so, so->so_upcallarg, M_DONTWAIT); 3054 } 3055 return; 3056 } 3057 SOCK_UNLOCK(so); 3058 ACCEPT_UNLOCK(); 3059 wakeup(&so->so_timeo); 3060 sorwakeup(so); 3061 sowwakeup(so); 3062 } 3063 3064 void 3065 soisdisconnecting(struct socket *so) 3066 { 3067 3068 /* 3069 * Note: This code assumes that SOCK_LOCK(so) and 3070 * SOCKBUF_LOCK(&so->so_rcv) are the same. 3071 */ 3072 SOCKBUF_LOCK(&so->so_rcv); 3073 so->so_state &= ~SS_ISCONNECTING; 3074 so->so_state |= SS_ISDISCONNECTING; 3075 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 3076 sorwakeup_locked(so); 3077 SOCKBUF_LOCK(&so->so_snd); 3078 so->so_snd.sb_state |= SBS_CANTSENDMORE; 3079 sowwakeup_locked(so); 3080 wakeup(&so->so_timeo); 3081 } 3082 3083 void 3084 soisdisconnected(struct socket *so) 3085 { 3086 3087 /* 3088 * Note: This code assumes that SOCK_LOCK(so) and 3089 * SOCKBUF_LOCK(&so->so_rcv) are the same. 3090 */ 3091 SOCKBUF_LOCK(&so->so_rcv); 3092 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 3093 so->so_state |= SS_ISDISCONNECTED; 3094 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 3095 sorwakeup_locked(so); 3096 SOCKBUF_LOCK(&so->so_snd); 3097 so->so_snd.sb_state |= SBS_CANTSENDMORE; 3098 sbdrop_locked(&so->so_snd, so->so_snd.sb_cc); 3099 sowwakeup_locked(so); 3100 wakeup(&so->so_timeo); 3101 } 3102 3103 /* 3104 * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. 3105 */ 3106 struct sockaddr * 3107 sodupsockaddr(const struct sockaddr *sa, int mflags) 3108 { 3109 struct sockaddr *sa2; 3110 3111 sa2 = malloc(sa->sa_len, M_SONAME, mflags); 3112 if (sa2) 3113 bcopy(sa, sa2, sa->sa_len); 3114 return sa2; 3115 } 3116 3117 /* 3118 * Create an external-format (``xsocket'') structure using the information in 3119 * the kernel-format socket structure pointed to by so. This is done to 3120 * reduce the spew of irrelevant information over this interface, to isolate 3121 * user code from changes in the kernel structure, and potentially to provide 3122 * information-hiding if we decide that some of this information should be 3123 * hidden from users. 3124 */ 3125 void 3126 sotoxsocket(struct socket *so, struct xsocket *xso) 3127 { 3128 3129 xso->xso_len = sizeof *xso; 3130 xso->xso_so = so; 3131 xso->so_type = so->so_type; 3132 xso->so_options = so->so_options; 3133 xso->so_linger = so->so_linger; 3134 xso->so_state = so->so_state; 3135 xso->so_pcb = so->so_pcb; 3136 xso->xso_protocol = so->so_proto->pr_protocol; 3137 xso->xso_family = so->so_proto->pr_domain->dom_family; 3138 xso->so_qlen = so->so_qlen; 3139 xso->so_incqlen = so->so_incqlen; 3140 xso->so_qlimit = so->so_qlimit; 3141 xso->so_timeo = so->so_timeo; 3142 xso->so_error = so->so_error; 3143 xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0; 3144 xso->so_oobmark = so->so_oobmark; 3145 sbtoxsockbuf(&so->so_snd, &xso->so_snd); 3146 sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); 3147 xso->so_uid = so->so_cred->cr_uid; 3148 } 3149 3150 3151 /* 3152 * Socket accessor functions to provide external consumers with 3153 * a safe interface to socket state 3154 * 3155 */ 3156 3157 void 3158 so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *), void *arg) 3159 { 3160 3161 TAILQ_FOREACH(so, &so->so_comp, so_list) 3162 func(so, arg); 3163 } 3164 3165 struct sockbuf * 3166 so_sockbuf_rcv(struct socket *so) 3167 { 3168 3169 return (&so->so_rcv); 3170 } 3171 3172 struct sockbuf * 3173 so_sockbuf_snd(struct socket *so) 3174 { 3175 3176 return (&so->so_snd); 3177 } 3178 3179 int 3180 so_state_get(const struct socket *so) 3181 { 3182 3183 return (so->so_state); 3184 } 3185 3186 void 3187 so_state_set(struct socket *so, int val) 3188 { 3189 3190 so->so_state = val; 3191 } 3192 3193 int 3194 so_options_get(const struct socket *so) 3195 { 3196 3197 return (so->so_options); 3198 } 3199 3200 void 3201 so_options_set(struct socket *so, int val) 3202 { 3203 3204 so->so_options = val; 3205 } 3206 3207 int 3208 so_error_get(const struct socket *so) 3209 { 3210 3211 return (so->so_error); 3212 } 3213 3214 void 3215 so_error_set(struct socket *so, int val) 3216 { 3217 3218 so->so_error = val; 3219 } 3220 3221 int 3222 so_linger_get(const struct socket *so) 3223 { 3224 3225 return (so->so_linger); 3226 } 3227 3228 void 3229 so_linger_set(struct socket *so, int val) 3230 { 3231 3232 so->so_linger = val; 3233 } 3234 3235 struct protosw * 3236 so_protosw_get(const struct socket *so) 3237 { 3238 3239 return (so->so_proto); 3240 } 3241 3242 void 3243 so_protosw_set(struct socket *so, struct protosw *val) 3244 { 3245 3246 so->so_proto = val; 3247 } 3248 3249 void 3250 so_sorwakeup(struct socket *so) 3251 { 3252 3253 sorwakeup(so); 3254 } 3255 3256 void 3257 so_sowwakeup(struct socket *so) 3258 { 3259 3260 sowwakeup(so); 3261 } 3262 3263 void 3264 so_sorwakeup_locked(struct socket *so) 3265 { 3266 3267 sorwakeup_locked(so); 3268 } 3269 3270 void 3271 so_sowwakeup_locked(struct socket *so) 3272 { 3273 3274 sowwakeup_locked(so); 3275 } 3276 3277 void 3278 so_lock(struct socket *so) 3279 { 3280 SOCK_LOCK(so); 3281 } 3282 3283 void 3284 so_unlock(struct socket *so) 3285 { 3286 SOCK_UNLOCK(so); 3287 } 3288