xref: /freebsd/sys/kern/uipc_socket.c (revision 71651a2743acfa3756ab1e7c3983e6861c6fada1)
1 /*-
2  * Copyright (c) 2004 The FreeBSD Foundation
3  * Copyright (c) 2004-2005 Robert N. M. Watson
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 4. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 #include "opt_mac.h"
39 #include "opt_zero.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/fcntl.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/mac.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/domain.h>
51 #include <sys/file.h>			/* for struct knote */
52 #include <sys/kernel.h>
53 #include <sys/event.h>
54 #include <sys/poll.h>
55 #include <sys/proc.h>
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/resourcevar.h>
60 #include <sys/signalvar.h>
61 #include <sys/sysctl.h>
62 #include <sys/uio.h>
63 #include <sys/jail.h>
64 
65 #include <vm/uma.h>
66 
67 
68 static int	soreceive_rcvoob(struct socket *so, struct uio *uio,
69 		    int flags);
70 
71 static void	filt_sordetach(struct knote *kn);
72 static int	filt_soread(struct knote *kn, long hint);
73 static void	filt_sowdetach(struct knote *kn);
74 static int	filt_sowrite(struct knote *kn, long hint);
75 static int	filt_solisten(struct knote *kn, long hint);
76 
77 static struct filterops solisten_filtops =
78 	{ 1, NULL, filt_sordetach, filt_solisten };
79 static struct filterops soread_filtops =
80 	{ 1, NULL, filt_sordetach, filt_soread };
81 static struct filterops sowrite_filtops =
82 	{ 1, NULL, filt_sowdetach, filt_sowrite };
83 
84 uma_zone_t socket_zone;
85 so_gen_t	so_gencnt;	/* generation count for sockets */
86 
87 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
88 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
89 
90 SYSCTL_DECL(_kern_ipc);
91 
92 static int somaxconn = SOMAXCONN;
93 static int somaxconn_sysctl(SYSCTL_HANDLER_ARGS);
94 /* XXX: we dont have SYSCTL_USHORT */
95 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
96     0, sizeof(int), somaxconn_sysctl, "I", "Maximum pending socket connection "
97     "queue size");
98 static int numopensockets;
99 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
100     &numopensockets, 0, "Number of open sockets");
101 #ifdef ZERO_COPY_SOCKETS
102 /* These aren't static because they're used in other files. */
103 int so_zero_copy_send = 1;
104 int so_zero_copy_receive = 1;
105 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
106     "Zero copy controls");
107 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
108     &so_zero_copy_receive, 0, "Enable zero copy receive");
109 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
110     &so_zero_copy_send, 0, "Enable zero copy send");
111 #endif /* ZERO_COPY_SOCKETS */
112 
113 /*
114  * accept_mtx locks down per-socket fields relating to accept queues.  See
115  * socketvar.h for an annotation of the protected fields of struct socket.
116  */
117 struct mtx accept_mtx;
118 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
119 
120 /*
121  * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
122  * so_gencnt field.
123  */
124 static struct mtx so_global_mtx;
125 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
126 
127 /*
128  * Socket operation routines.
129  * These routines are called by the routines in
130  * sys_socket.c or from a system process, and
131  * implement the semantics of socket operations by
132  * switching out to the protocol specific routines.
133  */
134 
135 /*
136  * Get a socket structure from our zone, and initialize it.
137  * Note that it would probably be better to allocate socket
138  * and PCB at the same time, but I'm not convinced that all
139  * the protocols can be easily modified to do this.
140  *
141  * soalloc() returns a socket with a ref count of 0.
142  */
143 struct socket *
144 soalloc(int mflags)
145 {
146 	struct socket *so;
147 
148 	so = uma_zalloc(socket_zone, mflags | M_ZERO);
149 	if (so != NULL) {
150 #ifdef MAC
151 		if (mac_init_socket(so, mflags) != 0) {
152 			uma_zfree(socket_zone, so);
153 			return (NULL);
154 		}
155 #endif
156 		SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
157 		SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
158 		/* sx_init(&so->so_sxlock, "socket sxlock"); */
159 		TAILQ_INIT(&so->so_aiojobq);
160 		mtx_lock(&so_global_mtx);
161 		so->so_gencnt = ++so_gencnt;
162 		++numopensockets;
163 		mtx_unlock(&so_global_mtx);
164 	}
165 	return (so);
166 }
167 
168 /*
169  * socreate returns a socket with a ref count of 1.  The socket should be
170  * closed with soclose().
171  */
172 int
173 socreate(dom, aso, type, proto, cred, td)
174 	int dom;
175 	struct socket **aso;
176 	int type;
177 	int proto;
178 	struct ucred *cred;
179 	struct thread *td;
180 {
181 	struct protosw *prp;
182 	struct socket *so;
183 	int error;
184 
185 	if (proto)
186 		prp = pffindproto(dom, proto, type);
187 	else
188 		prp = pffindtype(dom, type);
189 
190 	if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
191 	    prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
192 		return (EPROTONOSUPPORT);
193 
194 	if (jailed(cred) && jail_socket_unixiproute_only &&
195 	    prp->pr_domain->dom_family != PF_LOCAL &&
196 	    prp->pr_domain->dom_family != PF_INET &&
197 	    prp->pr_domain->dom_family != PF_ROUTE) {
198 		return (EPROTONOSUPPORT);
199 	}
200 
201 	if (prp->pr_type != type)
202 		return (EPROTOTYPE);
203 	so = soalloc(M_WAITOK);
204 	if (so == NULL)
205 		return (ENOBUFS);
206 
207 	TAILQ_INIT(&so->so_incomp);
208 	TAILQ_INIT(&so->so_comp);
209 	so->so_type = type;
210 	so->so_cred = crhold(cred);
211 	so->so_proto = prp;
212 #ifdef MAC
213 	mac_create_socket(cred, so);
214 #endif
215 	SOCK_LOCK(so);
216 	knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
217 	knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
218 	soref(so);
219 	SOCK_UNLOCK(so);
220 	error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
221 	if (error) {
222 		ACCEPT_LOCK();
223 		SOCK_LOCK(so);
224 		so->so_state |= SS_NOFDREF;
225 		sorele(so);
226 		return (error);
227 	}
228 	*aso = so;
229 	return (0);
230 }
231 
232 int
233 sobind(so, nam, td)
234 	struct socket *so;
235 	struct sockaddr *nam;
236 	struct thread *td;
237 {
238 
239 	return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td));
240 }
241 
242 void
243 sodealloc(struct socket *so)
244 {
245 
246 	KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
247 	mtx_lock(&so_global_mtx);
248 	so->so_gencnt = ++so_gencnt;
249 	mtx_unlock(&so_global_mtx);
250 	if (so->so_rcv.sb_hiwat)
251 		(void)chgsbsize(so->so_cred->cr_uidinfo,
252 		    &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
253 	if (so->so_snd.sb_hiwat)
254 		(void)chgsbsize(so->so_cred->cr_uidinfo,
255 		    &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
256 #ifdef INET
257 	/* remove acccept filter if one is present. */
258 	if (so->so_accf != NULL)
259 		do_setopt_accept_filter(so, NULL);
260 #endif
261 #ifdef MAC
262 	mac_destroy_socket(so);
263 #endif
264 	crfree(so->so_cred);
265 	SOCKBUF_LOCK_DESTROY(&so->so_snd);
266 	SOCKBUF_LOCK_DESTROY(&so->so_rcv);
267 	/* sx_destroy(&so->so_sxlock); */
268 	uma_zfree(socket_zone, so);
269 	mtx_lock(&so_global_mtx);
270 	--numopensockets;
271 	mtx_unlock(&so_global_mtx);
272 }
273 
274 /*
275  * solisten() transitions a socket from a non-listening state to a listening
276  * state, but can also be used to update the listen queue depth on an
277  * existing listen socket.  The protocol will call back into the sockets
278  * layer using solisten_proto_check() and solisten_proto() to check and set
279  * socket-layer listen state.  Call backs are used so that the protocol can
280  * acquire both protocol and socket layer locks in whatever order is reuiqred
281  * by the protocol.
282  *
283  * Protocol implementors are advised to hold the socket lock across the
284  * socket-layer test and set to avoid races at the socket layer.
285  */
286 int
287 solisten(so, backlog, td)
288 	struct socket *so;
289 	int backlog;
290 	struct thread *td;
291 {
292 	int error;
293 
294 	error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td);
295 	if (error)
296 		return (error);
297 
298 	/*
299 	 * XXXRW: The following state adjustment should occur in
300 	 * solisten_proto(), but we don't currently pass the backlog request
301 	 * to the protocol via pru_listen().
302 	 */
303 	if (backlog < 0 || backlog > somaxconn)
304 		backlog = somaxconn;
305 	so->so_qlimit = backlog;
306 	return (0);
307 }
308 
309 int
310 solisten_proto_check(so)
311 	struct socket *so;
312 {
313 
314 	SOCK_LOCK_ASSERT(so);
315 
316 	if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
317 	    SS_ISDISCONNECTING))
318 		return (EINVAL);
319 	return (0);
320 }
321 
322 void
323 solisten_proto(so)
324 	struct socket *so;
325 {
326 
327 	SOCK_LOCK_ASSERT(so);
328 
329 	so->so_options |= SO_ACCEPTCONN;
330 }
331 
332 /*
333  * Attempt to free a socket.  This should really be sotryfree().
334  *
335  * We free the socket if the protocol is no longer interested in the socket,
336  * there's no file descriptor reference, and the refcount is 0.  While the
337  * calling macro sotryfree() tests the refcount, sofree() has to test it
338  * again as it's possible to race with an accept()ing thread if the socket is
339  * in an listen queue of a listen socket, as being in the listen queue
340  * doesn't elevate the reference count.  sofree() acquires the accept mutex
341  * early for this test in order to avoid that race.
342  */
343 void
344 sofree(so)
345 	struct socket *so;
346 {
347 	struct socket *head;
348 
349 	ACCEPT_LOCK_ASSERT();
350 	SOCK_LOCK_ASSERT(so);
351 
352 	if (so->so_pcb != NULL || (so->so_state & SS_NOFDREF) == 0 ||
353 	    so->so_count != 0) {
354 		SOCK_UNLOCK(so);
355 		ACCEPT_UNLOCK();
356 		return;
357 	}
358 
359 	head = so->so_head;
360 	if (head != NULL) {
361 		KASSERT((so->so_qstate & SQ_COMP) != 0 ||
362 		    (so->so_qstate & SQ_INCOMP) != 0,
363 		    ("sofree: so_head != NULL, but neither SQ_COMP nor "
364 		    "SQ_INCOMP"));
365 		KASSERT((so->so_qstate & SQ_COMP) == 0 ||
366 		    (so->so_qstate & SQ_INCOMP) == 0,
367 		    ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
368 		/*
369 		 * accept(2) is responsible draining the completed
370 		 * connection queue and freeing those sockets, so
371 		 * we just return here if this socket is currently
372 		 * on the completed connection queue.  Otherwise,
373 		 * accept(2) may hang after select(2) has indicating
374 		 * that a listening socket was ready.  If it's an
375 		 * incomplete connection, we remove it from the queue
376 		 * and free it; otherwise, it won't be released until
377 		 * the listening socket is closed.
378 		 */
379 		if ((so->so_qstate & SQ_COMP) != 0) {
380 			SOCK_UNLOCK(so);
381 			ACCEPT_UNLOCK();
382 			return;
383 		}
384 		TAILQ_REMOVE(&head->so_incomp, so, so_list);
385 		head->so_incqlen--;
386 		so->so_qstate &= ~SQ_INCOMP;
387 		so->so_head = NULL;
388 	}
389 	KASSERT((so->so_qstate & SQ_COMP) == 0 &&
390 	    (so->so_qstate & SQ_INCOMP) == 0,
391 	    ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
392 	    so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
393 	SOCK_UNLOCK(so);
394 	ACCEPT_UNLOCK();
395 	SOCKBUF_LOCK(&so->so_snd);
396 	so->so_snd.sb_flags |= SB_NOINTR;
397 	(void)sblock(&so->so_snd, M_WAITOK);
398 	/*
399 	 * socantsendmore_locked() drops the socket buffer mutex so that it
400 	 * can safely perform wakeups.  Re-acquire the mutex before
401 	 * continuing.
402 	 */
403 	socantsendmore_locked(so);
404 	SOCKBUF_LOCK(&so->so_snd);
405 	sbunlock(&so->so_snd);
406 	sbrelease_locked(&so->so_snd, so);
407 	SOCKBUF_UNLOCK(&so->so_snd);
408 	sorflush(so);
409 	knlist_destroy(&so->so_rcv.sb_sel.si_note);
410 	knlist_destroy(&so->so_snd.sb_sel.si_note);
411 	sodealloc(so);
412 }
413 
414 /*
415  * Close a socket on last file table reference removal.
416  * Initiate disconnect if connected.
417  * Free socket when disconnect complete.
418  *
419  * This function will sorele() the socket.  Note that soclose() may be
420  * called prior to the ref count reaching zero.  The actual socket
421  * structure will not be freed until the ref count reaches zero.
422  */
423 int
424 soclose(so)
425 	struct socket *so;
426 {
427 	int error = 0;
428 
429 	KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
430 
431 	funsetown(&so->so_sigio);
432 	if (so->so_options & SO_ACCEPTCONN) {
433 		struct socket *sp;
434 		ACCEPT_LOCK();
435 		while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
436 			TAILQ_REMOVE(&so->so_incomp, sp, so_list);
437 			so->so_incqlen--;
438 			sp->so_qstate &= ~SQ_INCOMP;
439 			sp->so_head = NULL;
440 			ACCEPT_UNLOCK();
441 			(void) soabort(sp);
442 			ACCEPT_LOCK();
443 		}
444 		while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
445 			TAILQ_REMOVE(&so->so_comp, sp, so_list);
446 			so->so_qlen--;
447 			sp->so_qstate &= ~SQ_COMP;
448 			sp->so_head = NULL;
449 			ACCEPT_UNLOCK();
450 			(void) soabort(sp);
451 			ACCEPT_LOCK();
452 		}
453 		ACCEPT_UNLOCK();
454 	}
455 	if (so->so_pcb == NULL)
456 		goto discard;
457 	if (so->so_state & SS_ISCONNECTED) {
458 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
459 			error = sodisconnect(so);
460 			if (error)
461 				goto drop;
462 		}
463 		if (so->so_options & SO_LINGER) {
464 			if ((so->so_state & SS_ISDISCONNECTING) &&
465 			    (so->so_state & SS_NBIO))
466 				goto drop;
467 			while (so->so_state & SS_ISCONNECTED) {
468 				error = tsleep(&so->so_timeo,
469 				    PSOCK | PCATCH, "soclos", so->so_linger * hz);
470 				if (error)
471 					break;
472 			}
473 		}
474 	}
475 drop:
476 	if (so->so_pcb != NULL) {
477 		int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
478 		if (error == 0)
479 			error = error2;
480 	}
481 discard:
482 	ACCEPT_LOCK();
483 	SOCK_LOCK(so);
484 	KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
485 	so->so_state |= SS_NOFDREF;
486 	sorele(so);
487 	return (error);
488 }
489 
490 /*
491  * soabort() must not be called with any socket locks held, as it calls
492  * into the protocol, which will call back into the socket code causing
493  * it to acquire additional socket locks that may cause recursion or lock
494  * order reversals.
495  */
496 int
497 soabort(so)
498 	struct socket *so;
499 {
500 	int error;
501 
502 	error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
503 	if (error) {
504 		ACCEPT_LOCK();
505 		SOCK_LOCK(so);
506 		sotryfree(so);	/* note: does not decrement the ref count */
507 		return error;
508 	}
509 	return (0);
510 }
511 
512 int
513 soaccept(so, nam)
514 	struct socket *so;
515 	struct sockaddr **nam;
516 {
517 	int error;
518 
519 	SOCK_LOCK(so);
520 	KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
521 	so->so_state &= ~SS_NOFDREF;
522 	SOCK_UNLOCK(so);
523 	error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
524 	return (error);
525 }
526 
527 int
528 soconnect(so, nam, td)
529 	struct socket *so;
530 	struct sockaddr *nam;
531 	struct thread *td;
532 {
533 	int error;
534 
535 	if (so->so_options & SO_ACCEPTCONN)
536 		return (EOPNOTSUPP);
537 	/*
538 	 * If protocol is connection-based, can only connect once.
539 	 * Otherwise, if connected, try to disconnect first.
540 	 * This allows user to disconnect by connecting to, e.g.,
541 	 * a null address.
542 	 */
543 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
544 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
545 	    (error = sodisconnect(so)))) {
546 		error = EISCONN;
547 	} else {
548 		/*
549 		 * Prevent accumulated error from previous connection
550 		 * from biting us.
551 		 */
552 		so->so_error = 0;
553 		error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
554 	}
555 
556 	return (error);
557 }
558 
559 int
560 soconnect2(so1, so2)
561 	struct socket *so1;
562 	struct socket *so2;
563 {
564 
565 	return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
566 }
567 
568 int
569 sodisconnect(so)
570 	struct socket *so;
571 {
572 	int error;
573 
574 	if ((so->so_state & SS_ISCONNECTED) == 0)
575 		return (ENOTCONN);
576 	if (so->so_state & SS_ISDISCONNECTING)
577 		return (EALREADY);
578 	error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
579 	return (error);
580 }
581 
582 #define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
583 /*
584  * Send on a socket.
585  * If send must go all at once and message is larger than
586  * send buffering, then hard error.
587  * Lock against other senders.
588  * If must go all at once and not enough room now, then
589  * inform user that this would block and do nothing.
590  * Otherwise, if nonblocking, send as much as possible.
591  * The data to be sent is described by "uio" if nonzero,
592  * otherwise by the mbuf chain "top" (which must be null
593  * if uio is not).  Data provided in mbuf chain must be small
594  * enough to send all at once.
595  *
596  * Returns nonzero on error, timeout or signal; callers
597  * must check for short counts if EINTR/ERESTART are returned.
598  * Data and control buffers are freed on return.
599  */
600 
601 #ifdef ZERO_COPY_SOCKETS
602 struct so_zerocopy_stats{
603 	int size_ok;
604 	int align_ok;
605 	int found_ifp;
606 };
607 struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
608 #include <netinet/in.h>
609 #include <net/route.h>
610 #include <netinet/in_pcb.h>
611 #include <vm/vm.h>
612 #include <vm/vm_page.h>
613 #include <vm/vm_object.h>
614 #endif /*ZERO_COPY_SOCKETS*/
615 
616 int
617 sosend(so, addr, uio, top, control, flags, td)
618 	struct socket *so;
619 	struct sockaddr *addr;
620 	struct uio *uio;
621 	struct mbuf *top;
622 	struct mbuf *control;
623 	int flags;
624 	struct thread *td;
625 {
626 	struct mbuf **mp;
627 	struct mbuf *m;
628 	long space, len = 0, resid;
629 	int clen = 0, error, dontroute;
630 	int atomic = sosendallatonce(so) || top;
631 #ifdef ZERO_COPY_SOCKETS
632 	int cow_send;
633 #endif /* ZERO_COPY_SOCKETS */
634 
635 	if (uio != NULL)
636 		resid = uio->uio_resid;
637 	else
638 		resid = top->m_pkthdr.len;
639 	/*
640 	 * In theory resid should be unsigned.
641 	 * However, space must be signed, as it might be less than 0
642 	 * if we over-committed, and we must use a signed comparison
643 	 * of space and resid.  On the other hand, a negative resid
644 	 * causes us to loop sending 0-length segments to the protocol.
645 	 *
646 	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
647 	 * type sockets since that's an error.
648 	 */
649 	if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
650 		error = EINVAL;
651 		goto out;
652 	}
653 
654 	dontroute =
655 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
656 	    (so->so_proto->pr_flags & PR_ATOMIC);
657 	if (td != NULL)
658 		td->td_proc->p_stats->p_ru.ru_msgsnd++;
659 	if (control != NULL)
660 		clen = control->m_len;
661 #define	snderr(errno)	{ error = (errno); goto release; }
662 
663 	SOCKBUF_LOCK(&so->so_snd);
664 restart:
665 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
666 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
667 	if (error)
668 		goto out_locked;
669 	do {
670 		SOCKBUF_LOCK_ASSERT(&so->so_snd);
671 		if (so->so_snd.sb_state & SBS_CANTSENDMORE)
672 			snderr(EPIPE);
673 		if (so->so_error) {
674 			error = so->so_error;
675 			so->so_error = 0;
676 			goto release;
677 		}
678 		if ((so->so_state & SS_ISCONNECTED) == 0) {
679 			/*
680 			 * `sendto' and `sendmsg' is allowed on a connection-
681 			 * based socket if it supports implied connect.
682 			 * Return ENOTCONN if not connected and no address is
683 			 * supplied.
684 			 */
685 			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
686 			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
687 				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
688 				    !(resid == 0 && clen != 0))
689 					snderr(ENOTCONN);
690 			} else if (addr == NULL)
691 			    snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
692 				   ENOTCONN : EDESTADDRREQ);
693 		}
694 		space = sbspace(&so->so_snd);
695 		if (flags & MSG_OOB)
696 			space += 1024;
697 		if ((atomic && resid > so->so_snd.sb_hiwat) ||
698 		    clen > so->so_snd.sb_hiwat)
699 			snderr(EMSGSIZE);
700 		if (space < resid + clen &&
701 		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
702 			if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO))
703 				snderr(EWOULDBLOCK);
704 			sbunlock(&so->so_snd);
705 			error = sbwait(&so->so_snd);
706 			if (error)
707 				goto out_locked;
708 			goto restart;
709 		}
710 		SOCKBUF_UNLOCK(&so->so_snd);
711 		mp = &top;
712 		space -= clen;
713 		do {
714 		    if (uio == NULL) {
715 			/*
716 			 * Data is prepackaged in "top".
717 			 */
718 			resid = 0;
719 			if (flags & MSG_EOR)
720 				top->m_flags |= M_EOR;
721 		    } else do {
722 #ifdef ZERO_COPY_SOCKETS
723 			cow_send = 0;
724 #endif /* ZERO_COPY_SOCKETS */
725 			if (resid >= MINCLSIZE) {
726 #ifdef ZERO_COPY_SOCKETS
727 				if (top == NULL) {
728 					MGETHDR(m, M_TRYWAIT, MT_DATA);
729 					if (m == NULL) {
730 						error = ENOBUFS;
731 						SOCKBUF_LOCK(&so->so_snd);
732 						goto release;
733 					}
734 					m->m_pkthdr.len = 0;
735 					m->m_pkthdr.rcvif = (struct ifnet *)0;
736 				} else {
737 					MGET(m, M_TRYWAIT, MT_DATA);
738 					if (m == NULL) {
739 						error = ENOBUFS;
740 						SOCKBUF_LOCK(&so->so_snd);
741 						goto release;
742 					}
743 				}
744 				if (so_zero_copy_send &&
745 				    resid>=PAGE_SIZE &&
746 				    space>=PAGE_SIZE &&
747 				    uio->uio_iov->iov_len>=PAGE_SIZE) {
748 					so_zerocp_stats.size_ok++;
749 					if (!((vm_offset_t)
750 					  uio->uio_iov->iov_base & PAGE_MASK)){
751 						so_zerocp_stats.align_ok++;
752 						cow_send = socow_setup(m, uio);
753 					}
754 				}
755 				if (!cow_send) {
756 					MCLGET(m, M_TRYWAIT);
757 					if ((m->m_flags & M_EXT) == 0) {
758 						m_free(m);
759 						m = NULL;
760 					} else {
761 						len = min(min(MCLBYTES, resid), space);
762 					}
763 				} else
764 					len = PAGE_SIZE;
765 #else /* ZERO_COPY_SOCKETS */
766 				if (top == NULL) {
767 					m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
768 					m->m_pkthdr.len = 0;
769 					m->m_pkthdr.rcvif = (struct ifnet *)0;
770 				} else
771 					m = m_getcl(M_TRYWAIT, MT_DATA, 0);
772 				len = min(min(MCLBYTES, resid), space);
773 #endif /* ZERO_COPY_SOCKETS */
774 			} else {
775 				if (top == NULL) {
776 					m = m_gethdr(M_TRYWAIT, MT_DATA);
777 					m->m_pkthdr.len = 0;
778 					m->m_pkthdr.rcvif = (struct ifnet *)0;
779 
780 					len = min(min(MHLEN, resid), space);
781 					/*
782 					 * For datagram protocols, leave room
783 					 * for protocol headers in first mbuf.
784 					 */
785 					if (atomic && m && len < MHLEN)
786 						MH_ALIGN(m, len);
787 				} else {
788 					m = m_get(M_TRYWAIT, MT_DATA);
789 					len = min(min(MLEN, resid), space);
790 				}
791 			}
792 			if (m == NULL) {
793 				error = ENOBUFS;
794 				SOCKBUF_LOCK(&so->so_snd);
795 				goto release;
796 			}
797 
798 			space -= len;
799 #ifdef ZERO_COPY_SOCKETS
800 			if (cow_send)
801 				error = 0;
802 			else
803 #endif /* ZERO_COPY_SOCKETS */
804 			error = uiomove(mtod(m, void *), (int)len, uio);
805 			resid = uio->uio_resid;
806 			m->m_len = len;
807 			*mp = m;
808 			top->m_pkthdr.len += len;
809 			if (error) {
810 				SOCKBUF_LOCK(&so->so_snd);
811 				goto release;
812 			}
813 			mp = &m->m_next;
814 			if (resid <= 0) {
815 				if (flags & MSG_EOR)
816 					top->m_flags |= M_EOR;
817 				break;
818 			}
819 		    } while (space > 0 && atomic);
820 		    if (dontroute) {
821 			    SOCK_LOCK(so);
822 			    so->so_options |= SO_DONTROUTE;
823 			    SOCK_UNLOCK(so);
824 		    }
825 		    /*
826 		     * XXX all the SBS_CANTSENDMORE checks previously
827 		     * done could be out of date.  We could have recieved
828 		     * a reset packet in an interrupt or maybe we slept
829 		     * while doing page faults in uiomove() etc. We could
830 		     * probably recheck again inside the locking protection
831 		     * here, but there are probably other places that this
832 		     * also happens.  We must rethink this.
833 		     */
834 		    error = (*so->so_proto->pr_usrreqs->pru_send)(so,
835 			(flags & MSG_OOB) ? PRUS_OOB :
836 			/*
837 			 * If the user set MSG_EOF, the protocol
838 			 * understands this flag and nothing left to
839 			 * send then use PRU_SEND_EOF instead of PRU_SEND.
840 			 */
841 			((flags & MSG_EOF) &&
842 			 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
843 			 (resid <= 0)) ?
844 				PRUS_EOF :
845 			/* If there is more to send set PRUS_MORETOCOME */
846 			(resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
847 			top, addr, control, td);
848 		    if (dontroute) {
849 			    SOCK_LOCK(so);
850 			    so->so_options &= ~SO_DONTROUTE;
851 			    SOCK_UNLOCK(so);
852 		    }
853 		    clen = 0;
854 		    control = NULL;
855 		    top = NULL;
856 		    mp = &top;
857 		    if (error) {
858 			SOCKBUF_LOCK(&so->so_snd);
859 			goto release;
860 		    }
861 		} while (resid && space > 0);
862 		SOCKBUF_LOCK(&so->so_snd);
863 	} while (resid);
864 
865 release:
866 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
867 	sbunlock(&so->so_snd);
868 out_locked:
869 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
870 	SOCKBUF_UNLOCK(&so->so_snd);
871 out:
872 	if (top != NULL)
873 		m_freem(top);
874 	if (control != NULL)
875 		m_freem(control);
876 	return (error);
877 }
878 
879 /*
880  * The part of soreceive() that implements reading non-inline out-of-band
881  * data from a socket.  For more complete comments, see soreceive(), from
882  * which this code originated.
883  *
884  * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
885  * unable to return an mbuf chain to the caller.
886  */
887 static int
888 soreceive_rcvoob(so, uio, flags)
889 	struct socket *so;
890 	struct uio *uio;
891 	int flags;
892 {
893 	struct protosw *pr = so->so_proto;
894 	struct mbuf *m;
895 	int error;
896 
897 	KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
898 
899 	m = m_get(M_TRYWAIT, MT_DATA);
900 	if (m == NULL)
901 		return (ENOBUFS);
902 	error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
903 	if (error)
904 		goto bad;
905 	do {
906 #ifdef ZERO_COPY_SOCKETS
907 		if (so_zero_copy_receive) {
908 			int disposable;
909 
910 			if ((m->m_flags & M_EXT)
911 			 && (m->m_ext.ext_type == EXT_DISPOSABLE))
912 				disposable = 1;
913 			else
914 				disposable = 0;
915 
916 			error = uiomoveco(mtod(m, void *),
917 					  min(uio->uio_resid, m->m_len),
918 					  uio, disposable);
919 		} else
920 #endif /* ZERO_COPY_SOCKETS */
921 		error = uiomove(mtod(m, void *),
922 		    (int) min(uio->uio_resid, m->m_len), uio);
923 		m = m_free(m);
924 	} while (uio->uio_resid && error == 0 && m);
925 bad:
926 	if (m != NULL)
927 		m_freem(m);
928 	return (error);
929 }
930 
931 /*
932  * Following replacement or removal of the first mbuf on the first mbuf chain
933  * of a socket buffer, push necessary state changes back into the socket
934  * buffer so that other consumers see the values consistently.  'nextrecord'
935  * is the callers locally stored value of the original value of
936  * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
937  * NOTE: 'nextrecord' may be NULL.
938  */
939 static __inline void
940 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
941 {
942 
943 	SOCKBUF_LOCK_ASSERT(sb);
944 	/*
945 	 * First, update for the new value of nextrecord.  If necessary, make
946 	 * it the first record.
947 	 */
948 	if (sb->sb_mb != NULL)
949 		sb->sb_mb->m_nextpkt = nextrecord;
950 	else
951 		sb->sb_mb = nextrecord;
952 
953         /*
954          * Now update any dependent socket buffer fields to reflect the new
955          * state.  This is an expanded inline of SB_EMPTY_FIXUP(), with the
956 	 * addition of a second clause that takes care of the case where
957 	 * sb_mb has been updated, but remains the last record.
958          */
959         if (sb->sb_mb == NULL) {
960                 sb->sb_mbtail = NULL;
961                 sb->sb_lastrecord = NULL;
962         } else if (sb->sb_mb->m_nextpkt == NULL)
963                 sb->sb_lastrecord = sb->sb_mb;
964 }
965 
966 
967 /*
968  * Implement receive operations on a socket.
969  * We depend on the way that records are added to the sockbuf
970  * by sbappend*.  In particular, each record (mbufs linked through m_next)
971  * must begin with an address if the protocol so specifies,
972  * followed by an optional mbuf or mbufs containing ancillary data,
973  * and then zero or more mbufs of data.
974  * In order to avoid blocking network interrupts for the entire time here,
975  * we splx() while doing the actual copy to user space.
976  * Although the sockbuf is locked, new data may still be appended,
977  * and thus we must maintain consistency of the sockbuf during that time.
978  *
979  * The caller may receive the data as a single mbuf chain by supplying
980  * an mbuf **mp0 for use in returning the chain.  The uio is then used
981  * only for the count in uio_resid.
982  */
983 int
984 soreceive(so, psa, uio, mp0, controlp, flagsp)
985 	struct socket *so;
986 	struct sockaddr **psa;
987 	struct uio *uio;
988 	struct mbuf **mp0;
989 	struct mbuf **controlp;
990 	int *flagsp;
991 {
992 	struct mbuf *m, **mp;
993 	int flags, len, error, offset;
994 	struct protosw *pr = so->so_proto;
995 	struct mbuf *nextrecord;
996 	int moff, type = 0;
997 	int orig_resid = uio->uio_resid;
998 
999 	mp = mp0;
1000 	if (psa != NULL)
1001 		*psa = NULL;
1002 	if (controlp != NULL)
1003 		*controlp = NULL;
1004 	if (flagsp != NULL)
1005 		flags = *flagsp &~ MSG_EOR;
1006 	else
1007 		flags = 0;
1008 	if (flags & MSG_OOB)
1009 		return (soreceive_rcvoob(so, uio, flags));
1010 	if (mp != NULL)
1011 		*mp = NULL;
1012 	if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
1013 	    && uio->uio_resid)
1014 		(*pr->pr_usrreqs->pru_rcvd)(so, 0);
1015 
1016 	SOCKBUF_LOCK(&so->so_rcv);
1017 restart:
1018 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1019 	error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
1020 	if (error)
1021 		goto out;
1022 
1023 	m = so->so_rcv.sb_mb;
1024 	/*
1025 	 * If we have less data than requested, block awaiting more
1026 	 * (subject to any timeout) if:
1027 	 *   1. the current count is less than the low water mark, or
1028 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
1029 	 *	receive operation at once if we block (resid <= hiwat).
1030 	 *   3. MSG_DONTWAIT is not set
1031 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1032 	 * we have to do the receive in sections, and thus risk returning
1033 	 * a short count if a timeout or signal occurs after we start.
1034 	 */
1035 	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1036 	    so->so_rcv.sb_cc < uio->uio_resid) &&
1037 	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1038 	    ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1039 	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1040 		KASSERT(m != NULL || !so->so_rcv.sb_cc,
1041 		    ("receive: m == %p so->so_rcv.sb_cc == %u",
1042 		    m, so->so_rcv.sb_cc));
1043 		if (so->so_error) {
1044 			if (m != NULL)
1045 				goto dontblock;
1046 			error = so->so_error;
1047 			if ((flags & MSG_PEEK) == 0)
1048 				so->so_error = 0;
1049 			goto release;
1050 		}
1051 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1052 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1053 			if (m)
1054 				goto dontblock;
1055 			else
1056 				goto release;
1057 		}
1058 		for (; m != NULL; m = m->m_next)
1059 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
1060 				m = so->so_rcv.sb_mb;
1061 				goto dontblock;
1062 			}
1063 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1064 		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1065 			error = ENOTCONN;
1066 			goto release;
1067 		}
1068 		if (uio->uio_resid == 0)
1069 			goto release;
1070 		if ((so->so_state & SS_NBIO) ||
1071 		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1072 			error = EWOULDBLOCK;
1073 			goto release;
1074 		}
1075 		SBLASTRECORDCHK(&so->so_rcv);
1076 		SBLASTMBUFCHK(&so->so_rcv);
1077 		sbunlock(&so->so_rcv);
1078 		error = sbwait(&so->so_rcv);
1079 		if (error)
1080 			goto out;
1081 		goto restart;
1082 	}
1083 dontblock:
1084 	/*
1085 	 * From this point onward, we maintain 'nextrecord' as a cache of the
1086 	 * pointer to the next record in the socket buffer.  We must keep the
1087 	 * various socket buffer pointers and local stack versions of the
1088 	 * pointers in sync, pushing out modifications before dropping the
1089 	 * socket buffer mutex, and re-reading them when picking it up.
1090 	 *
1091 	 * Otherwise, we will race with the network stack appending new data
1092 	 * or records onto the socket buffer by using inconsistent/stale
1093 	 * versions of the field, possibly resulting in socket buffer
1094 	 * corruption.
1095 	 *
1096 	 * By holding the high-level sblock(), we prevent simultaneous
1097 	 * readers from pulling off the front of the socket buffer.
1098 	 */
1099 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1100 	if (uio->uio_td)
1101 		uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
1102 	KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1103 	SBLASTRECORDCHK(&so->so_rcv);
1104 	SBLASTMBUFCHK(&so->so_rcv);
1105 	nextrecord = m->m_nextpkt;
1106 	if (pr->pr_flags & PR_ADDR) {
1107 		KASSERT(m->m_type == MT_SONAME,
1108 		    ("m->m_type == %d", m->m_type));
1109 		orig_resid = 0;
1110 		if (psa != NULL)
1111 			*psa = sodupsockaddr(mtod(m, struct sockaddr *),
1112 			    M_NOWAIT);
1113 		if (flags & MSG_PEEK) {
1114 			m = m->m_next;
1115 		} else {
1116 			sbfree(&so->so_rcv, m);
1117 			so->so_rcv.sb_mb = m_free(m);
1118 			m = so->so_rcv.sb_mb;
1119 			sockbuf_pushsync(&so->so_rcv, nextrecord);
1120 		}
1121 	}
1122 
1123 	/*
1124 	 * Process one or more MT_CONTROL mbufs present before any data mbufs
1125 	 * in the first mbuf chain on the socket buffer.  If MSG_PEEK, we
1126 	 * just copy the data; if !MSG_PEEK, we call into the protocol to
1127 	 * perform externalization (or freeing if controlp == NULL).
1128 	 */
1129 	if (m != NULL && m->m_type == MT_CONTROL) {
1130 		struct mbuf *cm = NULL, *cmn;
1131 		struct mbuf **cme = &cm;
1132 
1133 		do {
1134 			if (flags & MSG_PEEK) {
1135 				if (controlp != NULL) {
1136 					*controlp = m_copy(m, 0, m->m_len);
1137 					controlp = &(*controlp)->m_next;
1138 				}
1139 				m = m->m_next;
1140 			} else {
1141 				sbfree(&so->so_rcv, m);
1142 				so->so_rcv.sb_mb = m->m_next;
1143 				m->m_next = NULL;
1144 				*cme = m;
1145 				cme = &(*cme)->m_next;
1146 				m = so->so_rcv.sb_mb;
1147 			}
1148 		} while (m != NULL && m->m_type == MT_CONTROL);
1149 		if ((flags & MSG_PEEK) == 0)
1150 			sockbuf_pushsync(&so->so_rcv, nextrecord);
1151 		while (cm != NULL) {
1152 			cmn = cm->m_next;
1153 			cm->m_next = NULL;
1154 			if (pr->pr_domain->dom_externalize != NULL) {
1155 				SOCKBUF_UNLOCK(&so->so_rcv);
1156 				error = (*pr->pr_domain->dom_externalize)
1157 				    (cm, controlp);
1158 				SOCKBUF_LOCK(&so->so_rcv);
1159 			} else if (controlp != NULL)
1160 				*controlp = cm;
1161 			else
1162 				m_freem(cm);
1163 			if (controlp != NULL) {
1164 				orig_resid = 0;
1165 				while (*controlp != NULL)
1166 					controlp = &(*controlp)->m_next;
1167 			}
1168 			cm = cmn;
1169 		}
1170 		nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1171 		orig_resid = 0;
1172 	}
1173 	if (m != NULL) {
1174 		if ((flags & MSG_PEEK) == 0) {
1175 			KASSERT(m->m_nextpkt == nextrecord,
1176 			    ("soreceive: post-control, nextrecord !sync"));
1177 			if (nextrecord == NULL) {
1178 				KASSERT(so->so_rcv.sb_mb == m,
1179 				    ("soreceive: post-control, sb_mb!=m"));
1180 				KASSERT(so->so_rcv.sb_lastrecord == m,
1181 				    ("soreceive: post-control, lastrecord!=m"));
1182 			}
1183 		}
1184 		type = m->m_type;
1185 		if (type == MT_OOBDATA)
1186 			flags |= MSG_OOB;
1187 	} else {
1188 		if ((flags & MSG_PEEK) == 0) {
1189 			KASSERT(so->so_rcv.sb_mb == nextrecord,
1190 			    ("soreceive: sb_mb != nextrecord"));
1191 			if (so->so_rcv.sb_mb == NULL) {
1192 				KASSERT(so->so_rcv.sb_lastrecord == NULL,
1193 				    ("soreceive: sb_lastercord != NULL"));
1194 			}
1195 		}
1196 	}
1197 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1198 	SBLASTRECORDCHK(&so->so_rcv);
1199 	SBLASTMBUFCHK(&so->so_rcv);
1200 
1201 	/*
1202 	 * Now continue to read any data mbufs off of the head of the socket
1203 	 * buffer until the read request is satisfied.  Note that 'type' is
1204 	 * used to store the type of any mbuf reads that have happened so far
1205 	 * such that soreceive() can stop reading if the type changes, which
1206 	 * causes soreceive() to return only one of regular data and inline
1207 	 * out-of-band data in a single socket receive operation.
1208 	 */
1209 	moff = 0;
1210 	offset = 0;
1211 	while (m != NULL && uio->uio_resid > 0 && error == 0) {
1212 		/*
1213 		 * If the type of mbuf has changed since the last mbuf
1214 		 * examined ('type'), end the receive operation.
1215 	 	 */
1216 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1217 		if (m->m_type == MT_OOBDATA) {
1218 			if (type != MT_OOBDATA)
1219 				break;
1220 		} else if (type == MT_OOBDATA)
1221 			break;
1222 		else
1223 		    KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1224 			("m->m_type == %d", m->m_type));
1225 		so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1226 		len = uio->uio_resid;
1227 		if (so->so_oobmark && len > so->so_oobmark - offset)
1228 			len = so->so_oobmark - offset;
1229 		if (len > m->m_len - moff)
1230 			len = m->m_len - moff;
1231 		/*
1232 		 * If mp is set, just pass back the mbufs.
1233 		 * Otherwise copy them out via the uio, then free.
1234 		 * Sockbuf must be consistent here (points to current mbuf,
1235 		 * it points to next record) when we drop priority;
1236 		 * we must note any additions to the sockbuf when we
1237 		 * block interrupts again.
1238 		 */
1239 		if (mp == NULL) {
1240 			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1241 			SBLASTRECORDCHK(&so->so_rcv);
1242 			SBLASTMBUFCHK(&so->so_rcv);
1243 			SOCKBUF_UNLOCK(&so->so_rcv);
1244 #ifdef ZERO_COPY_SOCKETS
1245 			if (so_zero_copy_receive) {
1246 				int disposable;
1247 
1248 				if ((m->m_flags & M_EXT)
1249 				 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1250 					disposable = 1;
1251 				else
1252 					disposable = 0;
1253 
1254 				error = uiomoveco(mtod(m, char *) + moff,
1255 						  (int)len, uio,
1256 						  disposable);
1257 			} else
1258 #endif /* ZERO_COPY_SOCKETS */
1259 			error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1260 			SOCKBUF_LOCK(&so->so_rcv);
1261 			if (error)
1262 				goto release;
1263 		} else
1264 			uio->uio_resid -= len;
1265 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1266 		if (len == m->m_len - moff) {
1267 			if (m->m_flags & M_EOR)
1268 				flags |= MSG_EOR;
1269 			if (flags & MSG_PEEK) {
1270 				m = m->m_next;
1271 				moff = 0;
1272 			} else {
1273 				nextrecord = m->m_nextpkt;
1274 				sbfree(&so->so_rcv, m);
1275 				if (mp != NULL) {
1276 					*mp = m;
1277 					mp = &m->m_next;
1278 					so->so_rcv.sb_mb = m = m->m_next;
1279 					*mp = NULL;
1280 				} else {
1281 					so->so_rcv.sb_mb = m_free(m);
1282 					m = so->so_rcv.sb_mb;
1283 				}
1284 				if (m != NULL) {
1285 					m->m_nextpkt = nextrecord;
1286 					if (nextrecord == NULL)
1287 						so->so_rcv.sb_lastrecord = m;
1288 				} else {
1289 					so->so_rcv.sb_mb = nextrecord;
1290 					SB_EMPTY_FIXUP(&so->so_rcv);
1291 				}
1292 				SBLASTRECORDCHK(&so->so_rcv);
1293 				SBLASTMBUFCHK(&so->so_rcv);
1294 			}
1295 		} else {
1296 			if (flags & MSG_PEEK)
1297 				moff += len;
1298 			else {
1299 				if (mp != NULL) {
1300 					int copy_flag;
1301 
1302 					if (flags & MSG_DONTWAIT)
1303 						copy_flag = M_DONTWAIT;
1304 					else
1305 						copy_flag = M_TRYWAIT;
1306 					if (copy_flag == M_TRYWAIT)
1307 						SOCKBUF_UNLOCK(&so->so_rcv);
1308 					*mp = m_copym(m, 0, len, copy_flag);
1309 					if (copy_flag == M_TRYWAIT)
1310 						SOCKBUF_LOCK(&so->so_rcv);
1311  					if (*mp == NULL) {
1312  						/*
1313  						 * m_copym() couldn't allocate an mbuf.
1314 						 * Adjust uio_resid back (it was adjusted
1315 						 * down by len bytes, which we didn't end
1316 						 * up "copying" over).
1317  						 */
1318  						uio->uio_resid += len;
1319  						break;
1320  					}
1321 				}
1322 				m->m_data += len;
1323 				m->m_len -= len;
1324 				so->so_rcv.sb_cc -= len;
1325 			}
1326 		}
1327 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1328 		if (so->so_oobmark) {
1329 			if ((flags & MSG_PEEK) == 0) {
1330 				so->so_oobmark -= len;
1331 				if (so->so_oobmark == 0) {
1332 					so->so_rcv.sb_state |= SBS_RCVATMARK;
1333 					break;
1334 				}
1335 			} else {
1336 				offset += len;
1337 				if (offset == so->so_oobmark)
1338 					break;
1339 			}
1340 		}
1341 		if (flags & MSG_EOR)
1342 			break;
1343 		/*
1344 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
1345 		 * we must not quit until "uio->uio_resid == 0" or an error
1346 		 * termination.  If a signal/timeout occurs, return
1347 		 * with a short count but without error.
1348 		 * Keep sockbuf locked against other readers.
1349 		 */
1350 		while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1351 		    !sosendallatonce(so) && nextrecord == NULL) {
1352 			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1353 			if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
1354 				break;
1355 			/*
1356 			 * Notify the protocol that some data has been
1357 			 * drained before blocking.
1358 			 */
1359 			if (pr->pr_flags & PR_WANTRCVD && so->so_pcb != NULL) {
1360 				SOCKBUF_UNLOCK(&so->so_rcv);
1361 				(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1362 				SOCKBUF_LOCK(&so->so_rcv);
1363 			}
1364 			SBLASTRECORDCHK(&so->so_rcv);
1365 			SBLASTMBUFCHK(&so->so_rcv);
1366 			error = sbwait(&so->so_rcv);
1367 			if (error)
1368 				goto release;
1369 			m = so->so_rcv.sb_mb;
1370 			if (m != NULL)
1371 				nextrecord = m->m_nextpkt;
1372 		}
1373 	}
1374 
1375 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1376 	if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1377 		flags |= MSG_TRUNC;
1378 		if ((flags & MSG_PEEK) == 0)
1379 			(void) sbdroprecord_locked(&so->so_rcv);
1380 	}
1381 	if ((flags & MSG_PEEK) == 0) {
1382 		if (m == NULL) {
1383 			/*
1384 			 * First part is an inline SB_EMPTY_FIXUP().  Second
1385 			 * part makes sure sb_lastrecord is up-to-date if
1386 			 * there is still data in the socket buffer.
1387 			 */
1388 			so->so_rcv.sb_mb = nextrecord;
1389 			if (so->so_rcv.sb_mb == NULL) {
1390 				so->so_rcv.sb_mbtail = NULL;
1391 				so->so_rcv.sb_lastrecord = NULL;
1392 			} else if (nextrecord->m_nextpkt == NULL)
1393 				so->so_rcv.sb_lastrecord = nextrecord;
1394 		}
1395 		SBLASTRECORDCHK(&so->so_rcv);
1396 		SBLASTMBUFCHK(&so->so_rcv);
1397 		/*
1398 		 * If soreceive() is being done from the socket callback, then
1399 		 * don't need to generate ACK to peer to update window, since
1400 		 * ACK will be generated on return to TCP.
1401 		 */
1402 		if (!(flags & MSG_SOCALLBCK) &&
1403 		    (pr->pr_flags & PR_WANTRCVD) && so->so_pcb) {
1404 			SOCKBUF_UNLOCK(&so->so_rcv);
1405 			(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1406 			SOCKBUF_LOCK(&so->so_rcv);
1407 		}
1408 	}
1409 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1410 	if (orig_resid == uio->uio_resid && orig_resid &&
1411 	    (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1412 		sbunlock(&so->so_rcv);
1413 		goto restart;
1414 	}
1415 
1416 	if (flagsp != NULL)
1417 		*flagsp |= flags;
1418 release:
1419 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1420 	sbunlock(&so->so_rcv);
1421 out:
1422 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1423 	SOCKBUF_UNLOCK(&so->so_rcv);
1424 	return (error);
1425 }
1426 
1427 int
1428 soshutdown(so, how)
1429 	struct socket *so;
1430 	int how;
1431 {
1432 	struct protosw *pr = so->so_proto;
1433 
1434 	if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1435 		return (EINVAL);
1436 
1437 	if (how != SHUT_WR)
1438 		sorflush(so);
1439 	if (how != SHUT_RD)
1440 		return ((*pr->pr_usrreqs->pru_shutdown)(so));
1441 	return (0);
1442 }
1443 
1444 void
1445 sorflush(so)
1446 	struct socket *so;
1447 {
1448 	struct sockbuf *sb = &so->so_rcv;
1449 	struct protosw *pr = so->so_proto;
1450 	struct sockbuf asb;
1451 
1452 	/*
1453 	 * XXXRW: This is quite ugly.  Previously, this code made a copy of
1454 	 * the socket buffer, then zero'd the original to clear the buffer
1455 	 * fields.  However, with mutexes in the socket buffer, this causes
1456 	 * problems.  We only clear the zeroable bits of the original;
1457 	 * however, we have to initialize and destroy the mutex in the copy
1458 	 * so that dom_dispose() and sbrelease() can lock t as needed.
1459 	 */
1460 	SOCKBUF_LOCK(sb);
1461 	sb->sb_flags |= SB_NOINTR;
1462 	(void) sblock(sb, M_WAITOK);
1463 	/*
1464 	 * socantrcvmore_locked() drops the socket buffer mutex so that it
1465 	 * can safely perform wakeups.  Re-acquire the mutex before
1466 	 * continuing.
1467 	 */
1468 	socantrcvmore_locked(so);
1469 	SOCKBUF_LOCK(sb);
1470 	sbunlock(sb);
1471 	/*
1472 	 * Invalidate/clear most of the sockbuf structure, but leave
1473 	 * selinfo and mutex data unchanged.
1474 	 */
1475 	bzero(&asb, offsetof(struct sockbuf, sb_startzero));
1476 	bcopy(&sb->sb_startzero, &asb.sb_startzero,
1477 	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1478 	bzero(&sb->sb_startzero,
1479 	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1480 	SOCKBUF_UNLOCK(sb);
1481 
1482 	SOCKBUF_LOCK_INIT(&asb, "so_rcv");
1483 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
1484 		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
1485 	sbrelease(&asb, so);
1486 	SOCKBUF_LOCK_DESTROY(&asb);
1487 }
1488 
1489 /*
1490  * Perhaps this routine, and sooptcopyout(), below, ought to come in
1491  * an additional variant to handle the case where the option value needs
1492  * to be some kind of integer, but not a specific size.
1493  * In addition to their use here, these functions are also called by the
1494  * protocol-level pr_ctloutput() routines.
1495  */
1496 int
1497 sooptcopyin(sopt, buf, len, minlen)
1498 	struct	sockopt *sopt;
1499 	void	*buf;
1500 	size_t	len;
1501 	size_t	minlen;
1502 {
1503 	size_t	valsize;
1504 
1505 	/*
1506 	 * If the user gives us more than we wanted, we ignore it,
1507 	 * but if we don't get the minimum length the caller
1508 	 * wants, we return EINVAL.  On success, sopt->sopt_valsize
1509 	 * is set to however much we actually retrieved.
1510 	 */
1511 	if ((valsize = sopt->sopt_valsize) < minlen)
1512 		return EINVAL;
1513 	if (valsize > len)
1514 		sopt->sopt_valsize = valsize = len;
1515 
1516 	if (sopt->sopt_td != NULL)
1517 		return (copyin(sopt->sopt_val, buf, valsize));
1518 
1519 	bcopy(sopt->sopt_val, buf, valsize);
1520 	return 0;
1521 }
1522 
1523 /*
1524  * Kernel version of setsockopt(2)/
1525  * XXX: optlen is size_t, not socklen_t
1526  */
1527 int
1528 so_setsockopt(struct socket *so, int level, int optname, void *optval,
1529     size_t optlen)
1530 {
1531 	struct sockopt sopt;
1532 
1533 	sopt.sopt_level = level;
1534 	sopt.sopt_name = optname;
1535 	sopt.sopt_dir = SOPT_SET;
1536 	sopt.sopt_val = optval;
1537 	sopt.sopt_valsize = optlen;
1538 	sopt.sopt_td = NULL;
1539 	return (sosetopt(so, &sopt));
1540 }
1541 
1542 int
1543 sosetopt(so, sopt)
1544 	struct socket *so;
1545 	struct sockopt *sopt;
1546 {
1547 	int	error, optval;
1548 	struct	linger l;
1549 	struct	timeval tv;
1550 	u_long  val;
1551 #ifdef MAC
1552 	struct mac extmac;
1553 #endif
1554 
1555 	error = 0;
1556 	if (sopt->sopt_level != SOL_SOCKET) {
1557 		if (so->so_proto && so->so_proto->pr_ctloutput)
1558 			return ((*so->so_proto->pr_ctloutput)
1559 				  (so, sopt));
1560 		error = ENOPROTOOPT;
1561 	} else {
1562 		switch (sopt->sopt_name) {
1563 #ifdef INET
1564 		case SO_ACCEPTFILTER:
1565 			error = do_setopt_accept_filter(so, sopt);
1566 			if (error)
1567 				goto bad;
1568 			break;
1569 #endif
1570 		case SO_LINGER:
1571 			error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1572 			if (error)
1573 				goto bad;
1574 
1575 			SOCK_LOCK(so);
1576 			so->so_linger = l.l_linger;
1577 			if (l.l_onoff)
1578 				so->so_options |= SO_LINGER;
1579 			else
1580 				so->so_options &= ~SO_LINGER;
1581 			SOCK_UNLOCK(so);
1582 			break;
1583 
1584 		case SO_DEBUG:
1585 		case SO_KEEPALIVE:
1586 		case SO_DONTROUTE:
1587 		case SO_USELOOPBACK:
1588 		case SO_BROADCAST:
1589 		case SO_REUSEADDR:
1590 		case SO_REUSEPORT:
1591 		case SO_OOBINLINE:
1592 		case SO_TIMESTAMP:
1593 		case SO_BINTIME:
1594 		case SO_NOSIGPIPE:
1595 			error = sooptcopyin(sopt, &optval, sizeof optval,
1596 					    sizeof optval);
1597 			if (error)
1598 				goto bad;
1599 			SOCK_LOCK(so);
1600 			if (optval)
1601 				so->so_options |= sopt->sopt_name;
1602 			else
1603 				so->so_options &= ~sopt->sopt_name;
1604 			SOCK_UNLOCK(so);
1605 			break;
1606 
1607 		case SO_SNDBUF:
1608 		case SO_RCVBUF:
1609 		case SO_SNDLOWAT:
1610 		case SO_RCVLOWAT:
1611 			error = sooptcopyin(sopt, &optval, sizeof optval,
1612 					    sizeof optval);
1613 			if (error)
1614 				goto bad;
1615 
1616 			/*
1617 			 * Values < 1 make no sense for any of these
1618 			 * options, so disallow them.
1619 			 */
1620 			if (optval < 1) {
1621 				error = EINVAL;
1622 				goto bad;
1623 			}
1624 
1625 			switch (sopt->sopt_name) {
1626 			case SO_SNDBUF:
1627 			case SO_RCVBUF:
1628 				if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1629 				    &so->so_snd : &so->so_rcv, (u_long)optval,
1630 				    so, curthread) == 0) {
1631 					error = ENOBUFS;
1632 					goto bad;
1633 				}
1634 				break;
1635 
1636 			/*
1637 			 * Make sure the low-water is never greater than
1638 			 * the high-water.
1639 			 */
1640 			case SO_SNDLOWAT:
1641 				SOCKBUF_LOCK(&so->so_snd);
1642 				so->so_snd.sb_lowat =
1643 				    (optval > so->so_snd.sb_hiwat) ?
1644 				    so->so_snd.sb_hiwat : optval;
1645 				SOCKBUF_UNLOCK(&so->so_snd);
1646 				break;
1647 			case SO_RCVLOWAT:
1648 				SOCKBUF_LOCK(&so->so_rcv);
1649 				so->so_rcv.sb_lowat =
1650 				    (optval > so->so_rcv.sb_hiwat) ?
1651 				    so->so_rcv.sb_hiwat : optval;
1652 				SOCKBUF_UNLOCK(&so->so_rcv);
1653 				break;
1654 			}
1655 			break;
1656 
1657 		case SO_SNDTIMEO:
1658 		case SO_RCVTIMEO:
1659 			error = sooptcopyin(sopt, &tv, sizeof tv,
1660 					    sizeof tv);
1661 			if (error)
1662 				goto bad;
1663 
1664 			/* assert(hz > 0); */
1665 			if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
1666 			    tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1667 				error = EDOM;
1668 				goto bad;
1669 			}
1670 			/* assert(tick > 0); */
1671 			/* assert(ULONG_MAX - INT_MAX >= 1000000); */
1672 			val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1673 			if (val > INT_MAX) {
1674 				error = EDOM;
1675 				goto bad;
1676 			}
1677 			if (val == 0 && tv.tv_usec != 0)
1678 				val = 1;
1679 
1680 			switch (sopt->sopt_name) {
1681 			case SO_SNDTIMEO:
1682 				so->so_snd.sb_timeo = val;
1683 				break;
1684 			case SO_RCVTIMEO:
1685 				so->so_rcv.sb_timeo = val;
1686 				break;
1687 			}
1688 			break;
1689 		case SO_LABEL:
1690 #ifdef MAC
1691 			error = sooptcopyin(sopt, &extmac, sizeof extmac,
1692 			    sizeof extmac);
1693 			if (error)
1694 				goto bad;
1695 			error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
1696 			    so, &extmac);
1697 #else
1698 			error = EOPNOTSUPP;
1699 #endif
1700 			break;
1701 		default:
1702 			error = ENOPROTOOPT;
1703 			break;
1704 		}
1705 		if (error == 0 && so->so_proto != NULL &&
1706 		    so->so_proto->pr_ctloutput != NULL) {
1707 			(void) ((*so->so_proto->pr_ctloutput)
1708 				  (so, sopt));
1709 		}
1710 	}
1711 bad:
1712 	return (error);
1713 }
1714 
1715 /* Helper routine for getsockopt */
1716 int
1717 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
1718 {
1719 	int	error;
1720 	size_t	valsize;
1721 
1722 	error = 0;
1723 
1724 	/*
1725 	 * Documented get behavior is that we always return a value,
1726 	 * possibly truncated to fit in the user's buffer.
1727 	 * Traditional behavior is that we always tell the user
1728 	 * precisely how much we copied, rather than something useful
1729 	 * like the total amount we had available for her.
1730 	 * Note that this interface is not idempotent; the entire answer must
1731 	 * generated ahead of time.
1732 	 */
1733 	valsize = min(len, sopt->sopt_valsize);
1734 	sopt->sopt_valsize = valsize;
1735 	if (sopt->sopt_val != NULL) {
1736 		if (sopt->sopt_td != NULL)
1737 			error = copyout(buf, sopt->sopt_val, valsize);
1738 		else
1739 			bcopy(buf, sopt->sopt_val, valsize);
1740 	}
1741 	return error;
1742 }
1743 
1744 int
1745 sogetopt(so, sopt)
1746 	struct socket *so;
1747 	struct sockopt *sopt;
1748 {
1749 	int	error, optval;
1750 	struct	linger l;
1751 	struct	timeval tv;
1752 #ifdef INET
1753 	struct accept_filter_arg *afap;
1754 #endif
1755 #ifdef MAC
1756 	struct mac extmac;
1757 #endif
1758 
1759 	error = 0;
1760 	if (sopt->sopt_level != SOL_SOCKET) {
1761 		if (so->so_proto && so->so_proto->pr_ctloutput) {
1762 			return ((*so->so_proto->pr_ctloutput)
1763 				  (so, sopt));
1764 		} else
1765 			return (ENOPROTOOPT);
1766 	} else {
1767 		switch (sopt->sopt_name) {
1768 #ifdef INET
1769 		case SO_ACCEPTFILTER:
1770 			/* Unlocked read. */
1771 			if ((so->so_options & SO_ACCEPTCONN) == 0)
1772 				return (EINVAL);
1773 			MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1774 				M_TEMP, M_WAITOK | M_ZERO);
1775 			SOCK_LOCK(so);
1776 			if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1777 				strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1778 				if (so->so_accf->so_accept_filter_str != NULL)
1779 					strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1780 			}
1781 			SOCK_UNLOCK(so);
1782 			error = sooptcopyout(sopt, afap, sizeof(*afap));
1783 			FREE(afap, M_TEMP);
1784 			break;
1785 #endif
1786 
1787 		case SO_LINGER:
1788 			SOCK_LOCK(so);
1789 			l.l_onoff = so->so_options & SO_LINGER;
1790 			l.l_linger = so->so_linger;
1791 			SOCK_UNLOCK(so);
1792 			error = sooptcopyout(sopt, &l, sizeof l);
1793 			break;
1794 
1795 		case SO_USELOOPBACK:
1796 		case SO_DONTROUTE:
1797 		case SO_DEBUG:
1798 		case SO_KEEPALIVE:
1799 		case SO_REUSEADDR:
1800 		case SO_REUSEPORT:
1801 		case SO_BROADCAST:
1802 		case SO_OOBINLINE:
1803 		case SO_TIMESTAMP:
1804 		case SO_BINTIME:
1805 		case SO_NOSIGPIPE:
1806 			optval = so->so_options & sopt->sopt_name;
1807 integer:
1808 			error = sooptcopyout(sopt, &optval, sizeof optval);
1809 			break;
1810 
1811 		case SO_TYPE:
1812 			optval = so->so_type;
1813 			goto integer;
1814 
1815 		case SO_ERROR:
1816 			optval = so->so_error;
1817 			so->so_error = 0;
1818 			goto integer;
1819 
1820 		case SO_SNDBUF:
1821 			optval = so->so_snd.sb_hiwat;
1822 			goto integer;
1823 
1824 		case SO_RCVBUF:
1825 			optval = so->so_rcv.sb_hiwat;
1826 			goto integer;
1827 
1828 		case SO_SNDLOWAT:
1829 			optval = so->so_snd.sb_lowat;
1830 			goto integer;
1831 
1832 		case SO_RCVLOWAT:
1833 			optval = so->so_rcv.sb_lowat;
1834 			goto integer;
1835 
1836 		case SO_SNDTIMEO:
1837 		case SO_RCVTIMEO:
1838 			optval = (sopt->sopt_name == SO_SNDTIMEO ?
1839 				  so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1840 
1841 			tv.tv_sec = optval / hz;
1842 			tv.tv_usec = (optval % hz) * tick;
1843 			error = sooptcopyout(sopt, &tv, sizeof tv);
1844 			break;
1845 		case SO_LABEL:
1846 #ifdef MAC
1847 			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
1848 			    sizeof(extmac));
1849 			if (error)
1850 				return (error);
1851 			error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
1852 			    so, &extmac);
1853 			if (error)
1854 				return (error);
1855 			error = sooptcopyout(sopt, &extmac, sizeof extmac);
1856 #else
1857 			error = EOPNOTSUPP;
1858 #endif
1859 			break;
1860 		case SO_PEERLABEL:
1861 #ifdef MAC
1862 			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
1863 			    sizeof(extmac));
1864 			if (error)
1865 				return (error);
1866 			error = mac_getsockopt_peerlabel(
1867 			    sopt->sopt_td->td_ucred, so, &extmac);
1868 			if (error)
1869 				return (error);
1870 			error = sooptcopyout(sopt, &extmac, sizeof extmac);
1871 #else
1872 			error = EOPNOTSUPP;
1873 #endif
1874 			break;
1875 		default:
1876 			error = ENOPROTOOPT;
1877 			break;
1878 		}
1879 		return (error);
1880 	}
1881 }
1882 
1883 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1884 int
1885 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1886 {
1887 	struct mbuf *m, *m_prev;
1888 	int sopt_size = sopt->sopt_valsize;
1889 
1890 	MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
1891 	if (m == NULL)
1892 		return ENOBUFS;
1893 	if (sopt_size > MLEN) {
1894 		MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
1895 		if ((m->m_flags & M_EXT) == 0) {
1896 			m_free(m);
1897 			return ENOBUFS;
1898 		}
1899 		m->m_len = min(MCLBYTES, sopt_size);
1900 	} else {
1901 		m->m_len = min(MLEN, sopt_size);
1902 	}
1903 	sopt_size -= m->m_len;
1904 	*mp = m;
1905 	m_prev = m;
1906 
1907 	while (sopt_size) {
1908 		MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
1909 		if (m == NULL) {
1910 			m_freem(*mp);
1911 			return ENOBUFS;
1912 		}
1913 		if (sopt_size > MLEN) {
1914 			MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT :
1915 			    M_DONTWAIT);
1916 			if ((m->m_flags & M_EXT) == 0) {
1917 				m_freem(m);
1918 				m_freem(*mp);
1919 				return ENOBUFS;
1920 			}
1921 			m->m_len = min(MCLBYTES, sopt_size);
1922 		} else {
1923 			m->m_len = min(MLEN, sopt_size);
1924 		}
1925 		sopt_size -= m->m_len;
1926 		m_prev->m_next = m;
1927 		m_prev = m;
1928 	}
1929 	return 0;
1930 }
1931 
1932 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1933 int
1934 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1935 {
1936 	struct mbuf *m0 = m;
1937 
1938 	if (sopt->sopt_val == NULL)
1939 		return 0;
1940 	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1941 		if (sopt->sopt_td != NULL) {
1942 			int error;
1943 
1944 			error = copyin(sopt->sopt_val, mtod(m, char *),
1945 				       m->m_len);
1946 			if (error != 0) {
1947 				m_freem(m0);
1948 				return(error);
1949 			}
1950 		} else
1951 			bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1952 		sopt->sopt_valsize -= m->m_len;
1953 		sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
1954 		m = m->m_next;
1955 	}
1956 	if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1957 		panic("ip6_sooptmcopyin");
1958 	return 0;
1959 }
1960 
1961 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1962 int
1963 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1964 {
1965 	struct mbuf *m0 = m;
1966 	size_t valsize = 0;
1967 
1968 	if (sopt->sopt_val == NULL)
1969 		return 0;
1970 	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1971 		if (sopt->sopt_td != NULL) {
1972 			int error;
1973 
1974 			error = copyout(mtod(m, char *), sopt->sopt_val,
1975 				       m->m_len);
1976 			if (error != 0) {
1977 				m_freem(m0);
1978 				return(error);
1979 			}
1980 		} else
1981 			bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1982 	       sopt->sopt_valsize -= m->m_len;
1983 	       sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
1984 	       valsize += m->m_len;
1985 	       m = m->m_next;
1986 	}
1987 	if (m != NULL) {
1988 		/* enough soopt buffer should be given from user-land */
1989 		m_freem(m0);
1990 		return(EINVAL);
1991 	}
1992 	sopt->sopt_valsize = valsize;
1993 	return 0;
1994 }
1995 
1996 void
1997 sohasoutofband(so)
1998 	struct socket *so;
1999 {
2000 	if (so->so_sigio != NULL)
2001 		pgsigio(&so->so_sigio, SIGURG, 0);
2002 	selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2003 }
2004 
2005 int
2006 sopoll(struct socket *so, int events, struct ucred *active_cred,
2007     struct thread *td)
2008 {
2009 	int revents = 0;
2010 
2011 	SOCKBUF_LOCK(&so->so_snd);
2012 	SOCKBUF_LOCK(&so->so_rcv);
2013 	if (events & (POLLIN | POLLRDNORM))
2014 		if (soreadable(so))
2015 			revents |= events & (POLLIN | POLLRDNORM);
2016 
2017 	if (events & POLLINIGNEOF)
2018 		if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2019 		    !TAILQ_EMPTY(&so->so_comp) || so->so_error)
2020 			revents |= POLLINIGNEOF;
2021 
2022 	if (events & (POLLOUT | POLLWRNORM))
2023 		if (sowriteable(so))
2024 			revents |= events & (POLLOUT | POLLWRNORM);
2025 
2026 	if (events & (POLLPRI | POLLRDBAND))
2027 		if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2028 			revents |= events & (POLLPRI | POLLRDBAND);
2029 
2030 	if (revents == 0) {
2031 		if (events &
2032 		    (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
2033 		     POLLRDBAND)) {
2034 			selrecord(td, &so->so_rcv.sb_sel);
2035 			so->so_rcv.sb_flags |= SB_SEL;
2036 		}
2037 
2038 		if (events & (POLLOUT | POLLWRNORM)) {
2039 			selrecord(td, &so->so_snd.sb_sel);
2040 			so->so_snd.sb_flags |= SB_SEL;
2041 		}
2042 	}
2043 
2044 	SOCKBUF_UNLOCK(&so->so_rcv);
2045 	SOCKBUF_UNLOCK(&so->so_snd);
2046 	return (revents);
2047 }
2048 
2049 int
2050 soo_kqfilter(struct file *fp, struct knote *kn)
2051 {
2052 	struct socket *so = kn->kn_fp->f_data;
2053 	struct sockbuf *sb;
2054 
2055 	switch (kn->kn_filter) {
2056 	case EVFILT_READ:
2057 		if (so->so_options & SO_ACCEPTCONN)
2058 			kn->kn_fop = &solisten_filtops;
2059 		else
2060 			kn->kn_fop = &soread_filtops;
2061 		sb = &so->so_rcv;
2062 		break;
2063 	case EVFILT_WRITE:
2064 		kn->kn_fop = &sowrite_filtops;
2065 		sb = &so->so_snd;
2066 		break;
2067 	default:
2068 		return (EINVAL);
2069 	}
2070 
2071 	SOCKBUF_LOCK(sb);
2072 	knlist_add(&sb->sb_sel.si_note, kn, 1);
2073 	sb->sb_flags |= SB_KNOTE;
2074 	SOCKBUF_UNLOCK(sb);
2075 	return (0);
2076 }
2077 
2078 static void
2079 filt_sordetach(struct knote *kn)
2080 {
2081 	struct socket *so = kn->kn_fp->f_data;
2082 
2083 	SOCKBUF_LOCK(&so->so_rcv);
2084 	knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
2085 	if (knlist_empty(&so->so_rcv.sb_sel.si_note))
2086 		so->so_rcv.sb_flags &= ~SB_KNOTE;
2087 	SOCKBUF_UNLOCK(&so->so_rcv);
2088 }
2089 
2090 /*ARGSUSED*/
2091 static int
2092 filt_soread(struct knote *kn, long hint)
2093 {
2094 	struct socket *so;
2095 
2096 	so = kn->kn_fp->f_data;
2097 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2098 
2099 	kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
2100 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2101 		kn->kn_flags |= EV_EOF;
2102 		kn->kn_fflags = so->so_error;
2103 		return (1);
2104 	} else if (so->so_error)	/* temporary udp error */
2105 		return (1);
2106 	else if (kn->kn_sfflags & NOTE_LOWAT)
2107 		return (kn->kn_data >= kn->kn_sdata);
2108 	else
2109 		return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
2110 }
2111 
2112 static void
2113 filt_sowdetach(struct knote *kn)
2114 {
2115 	struct socket *so = kn->kn_fp->f_data;
2116 
2117 	SOCKBUF_LOCK(&so->so_snd);
2118 	knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
2119 	if (knlist_empty(&so->so_snd.sb_sel.si_note))
2120 		so->so_snd.sb_flags &= ~SB_KNOTE;
2121 	SOCKBUF_UNLOCK(&so->so_snd);
2122 }
2123 
2124 /*ARGSUSED*/
2125 static int
2126 filt_sowrite(struct knote *kn, long hint)
2127 {
2128 	struct socket *so;
2129 
2130 	so = kn->kn_fp->f_data;
2131 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
2132 	kn->kn_data = sbspace(&so->so_snd);
2133 	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2134 		kn->kn_flags |= EV_EOF;
2135 		kn->kn_fflags = so->so_error;
2136 		return (1);
2137 	} else if (so->so_error)	/* temporary udp error */
2138 		return (1);
2139 	else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2140 	    (so->so_proto->pr_flags & PR_CONNREQUIRED))
2141 		return (0);
2142 	else if (kn->kn_sfflags & NOTE_LOWAT)
2143 		return (kn->kn_data >= kn->kn_sdata);
2144 	else
2145 		return (kn->kn_data >= so->so_snd.sb_lowat);
2146 }
2147 
2148 /*ARGSUSED*/
2149 static int
2150 filt_solisten(struct knote *kn, long hint)
2151 {
2152 	struct socket *so = kn->kn_fp->f_data;
2153 
2154 	kn->kn_data = so->so_qlen;
2155 	return (! TAILQ_EMPTY(&so->so_comp));
2156 }
2157 
2158 int
2159 socheckuid(struct socket *so, uid_t uid)
2160 {
2161 
2162 	if (so == NULL)
2163 		return (EPERM);
2164 	if (so->so_cred->cr_uid != uid)
2165 		return (EPERM);
2166 	return (0);
2167 }
2168 
2169 static int
2170 somaxconn_sysctl(SYSCTL_HANDLER_ARGS)
2171 {
2172 	int error;
2173 	int val;
2174 
2175 	val = somaxconn;
2176 	error = sysctl_handle_int(oidp, &val, sizeof(int), req);
2177 	if (error || !req->newptr )
2178 		return (error);
2179 
2180 	if (val < 1 || val > USHRT_MAX)
2181 		return (EINVAL);
2182 
2183 	somaxconn = val;
2184 	return (0);
2185 }
2186