xref: /freebsd/sys/kern/uipc_socket.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 2004 The FreeBSD Foundation
3  * Copyright (c) 2004 Robert Watson
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 4. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 #include "opt_mac.h"
39 #include "opt_zero.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/fcntl.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/mac.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/domain.h>
51 #include <sys/file.h>			/* for struct knote */
52 #include <sys/kernel.h>
53 #include <sys/event.h>
54 #include <sys/poll.h>
55 #include <sys/proc.h>
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/resourcevar.h>
60 #include <sys/signalvar.h>
61 #include <sys/sysctl.h>
62 #include <sys/uio.h>
63 #include <sys/jail.h>
64 
65 #include <vm/uma.h>
66 
67 
68 static int	soreceive_rcvoob(struct socket *so, struct uio *uio,
69 		    int flags);
70 
71 #ifdef INET
72 static int	 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
73 #endif
74 
75 static void	filt_sordetach(struct knote *kn);
76 static int	filt_soread(struct knote *kn, long hint);
77 static void	filt_sowdetach(struct knote *kn);
78 static int	filt_sowrite(struct knote *kn, long hint);
79 static int	filt_solisten(struct knote *kn, long hint);
80 
81 static struct filterops solisten_filtops =
82 	{ 1, NULL, filt_sordetach, filt_solisten };
83 static struct filterops soread_filtops =
84 	{ 1, NULL, filt_sordetach, filt_soread };
85 static struct filterops sowrite_filtops =
86 	{ 1, NULL, filt_sowdetach, filt_sowrite };
87 
88 uma_zone_t socket_zone;
89 so_gen_t	so_gencnt;	/* generation count for sockets */
90 
91 MALLOC_DEFINE(M_SONAME, "soname", "socket name");
92 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
93 
94 SYSCTL_DECL(_kern_ipc);
95 
96 static int somaxconn = SOMAXCONN;
97 static int somaxconn_sysctl(SYSCTL_HANDLER_ARGS);
98 /* XXX: we dont have SYSCTL_USHORT */
99 SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
100     0, sizeof(int), somaxconn_sysctl, "I", "Maximum pending socket connection "
101     "queue size");
102 static int numopensockets;
103 SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
104     &numopensockets, 0, "Number of open sockets");
105 #ifdef ZERO_COPY_SOCKETS
106 /* These aren't static because they're used in other files. */
107 int so_zero_copy_send = 1;
108 int so_zero_copy_receive = 1;
109 SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
110     "Zero copy controls");
111 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
112     &so_zero_copy_receive, 0, "Enable zero copy receive");
113 SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
114     &so_zero_copy_send, 0, "Enable zero copy send");
115 #endif /* ZERO_COPY_SOCKETS */
116 
117 /*
118  * accept_mtx locks down per-socket fields relating to accept queues.  See
119  * socketvar.h for an annotation of the protected fields of struct socket.
120  */
121 struct mtx accept_mtx;
122 MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
123 
124 /*
125  * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
126  * so_gencnt field.
127  */
128 static struct mtx so_global_mtx;
129 MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
130 
131 /*
132  * Socket operation routines.
133  * These routines are called by the routines in
134  * sys_socket.c or from a system process, and
135  * implement the semantics of socket operations by
136  * switching out to the protocol specific routines.
137  */
138 
139 /*
140  * Get a socket structure from our zone, and initialize it.
141  * Note that it would probably be better to allocate socket
142  * and PCB at the same time, but I'm not convinced that all
143  * the protocols can be easily modified to do this.
144  *
145  * soalloc() returns a socket with a ref count of 0.
146  */
147 struct socket *
148 soalloc(int mflags)
149 {
150 	struct socket *so;
151 
152 	so = uma_zalloc(socket_zone, mflags | M_ZERO);
153 	if (so != NULL) {
154 #ifdef MAC
155 		if (mac_init_socket(so, mflags) != 0) {
156 			uma_zfree(socket_zone, so);
157 			return (NULL);
158 		}
159 #endif
160 		SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
161 		SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
162 		/* sx_init(&so->so_sxlock, "socket sxlock"); */
163 		TAILQ_INIT(&so->so_aiojobq);
164 		mtx_lock(&so_global_mtx);
165 		so->so_gencnt = ++so_gencnt;
166 		++numopensockets;
167 		mtx_unlock(&so_global_mtx);
168 	}
169 	return (so);
170 }
171 
172 /*
173  * socreate returns a socket with a ref count of 1.  The socket should be
174  * closed with soclose().
175  */
176 int
177 socreate(dom, aso, type, proto, cred, td)
178 	int dom;
179 	struct socket **aso;
180 	int type;
181 	int proto;
182 	struct ucred *cred;
183 	struct thread *td;
184 {
185 	struct protosw *prp;
186 	struct socket *so;
187 	int error;
188 
189 	if (proto)
190 		prp = pffindproto(dom, proto, type);
191 	else
192 		prp = pffindtype(dom, type);
193 
194 	if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
195 	    prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
196 		return (EPROTONOSUPPORT);
197 
198 	if (jailed(cred) && jail_socket_unixiproute_only &&
199 	    prp->pr_domain->dom_family != PF_LOCAL &&
200 	    prp->pr_domain->dom_family != PF_INET &&
201 	    prp->pr_domain->dom_family != PF_ROUTE) {
202 		return (EPROTONOSUPPORT);
203 	}
204 
205 	if (prp->pr_type != type)
206 		return (EPROTOTYPE);
207 	so = soalloc(M_WAITOK);
208 	if (so == NULL)
209 		return (ENOBUFS);
210 
211 	TAILQ_INIT(&so->so_incomp);
212 	TAILQ_INIT(&so->so_comp);
213 	so->so_type = type;
214 	so->so_cred = crhold(cred);
215 	so->so_proto = prp;
216 #ifdef MAC
217 	mac_create_socket(cred, so);
218 #endif
219 	SOCK_LOCK(so);
220 	knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
221 	knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
222 	soref(so);
223 	SOCK_UNLOCK(so);
224 	error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
225 	if (error) {
226 		ACCEPT_LOCK();
227 		SOCK_LOCK(so);
228 		so->so_state |= SS_NOFDREF;
229 		sorele(so);
230 		return (error);
231 	}
232 	*aso = so;
233 	return (0);
234 }
235 
236 int
237 sobind(so, nam, td)
238 	struct socket *so;
239 	struct sockaddr *nam;
240 	struct thread *td;
241 {
242 
243 	return ((*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td));
244 }
245 
246 void
247 sodealloc(struct socket *so)
248 {
249 
250 	KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
251 	mtx_lock(&so_global_mtx);
252 	so->so_gencnt = ++so_gencnt;
253 	mtx_unlock(&so_global_mtx);
254 	if (so->so_rcv.sb_hiwat)
255 		(void)chgsbsize(so->so_cred->cr_uidinfo,
256 		    &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
257 	if (so->so_snd.sb_hiwat)
258 		(void)chgsbsize(so->so_cred->cr_uidinfo,
259 		    &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
260 #ifdef INET
261 	/* remove acccept filter if one is present. */
262 	if (so->so_accf != NULL)
263 		do_setopt_accept_filter(so, NULL);
264 #endif
265 #ifdef MAC
266 	mac_destroy_socket(so);
267 #endif
268 	crfree(so->so_cred);
269 	SOCKBUF_LOCK_DESTROY(&so->so_snd);
270 	SOCKBUF_LOCK_DESTROY(&so->so_rcv);
271 	/* sx_destroy(&so->so_sxlock); */
272 	uma_zfree(socket_zone, so);
273 	mtx_lock(&so_global_mtx);
274 	--numopensockets;
275 	mtx_unlock(&so_global_mtx);
276 }
277 
278 int
279 solisten(so, backlog, td)
280 	struct socket *so;
281 	int backlog;
282 	struct thread *td;
283 {
284 	int error;
285 
286 	/*
287 	 * XXXRW: Ordering issue here -- perhaps we need to set
288 	 * SO_ACCEPTCONN before the call to pru_listen()?
289 	 * XXXRW: General atomic test-and-set concerns here also.
290 	 */
291 	if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
292 			    SS_ISDISCONNECTING))
293 		return (EINVAL);
294 	error = (*so->so_proto->pr_usrreqs->pru_listen)(so, td);
295 	if (error)
296 		return (error);
297 	ACCEPT_LOCK();
298 	if (TAILQ_EMPTY(&so->so_comp)) {
299 		SOCK_LOCK(so);
300 		so->so_options |= SO_ACCEPTCONN;
301 		SOCK_UNLOCK(so);
302 	}
303 	if (backlog < 0 || backlog > somaxconn)
304 		backlog = somaxconn;
305 	so->so_qlimit = backlog;
306 	ACCEPT_UNLOCK();
307 	return (0);
308 }
309 
310 /*
311  * Attempt to free a socket.  This should really be sotryfree().
312  *
313  * We free the socket if the protocol is no longer interested in the socket,
314  * there's no file descriptor reference, and the refcount is 0.  While the
315  * calling macro sotryfree() tests the refcount, sofree() has to test it
316  * again as it's possible to race with an accept()ing thread if the socket is
317  * in an listen queue of a listen socket, as being in the listen queue
318  * doesn't elevate the reference count.  sofree() acquires the accept mutex
319  * early for this test in order to avoid that race.
320  */
321 void
322 sofree(so)
323 	struct socket *so;
324 {
325 	struct socket *head;
326 
327 	ACCEPT_LOCK_ASSERT();
328 	SOCK_LOCK_ASSERT(so);
329 
330 	if (so->so_pcb != NULL || (so->so_state & SS_NOFDREF) == 0 ||
331 	    so->so_count != 0) {
332 		SOCK_UNLOCK(so);
333 		ACCEPT_UNLOCK();
334 		return;
335 	}
336 
337 	head = so->so_head;
338 	if (head != NULL) {
339 		KASSERT((so->so_qstate & SQ_COMP) != 0 ||
340 		    (so->so_qstate & SQ_INCOMP) != 0,
341 		    ("sofree: so_head != NULL, but neither SQ_COMP nor "
342 		    "SQ_INCOMP"));
343 		KASSERT((so->so_qstate & SQ_COMP) == 0 ||
344 		    (so->so_qstate & SQ_INCOMP) == 0,
345 		    ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
346 		/*
347 		 * accept(2) is responsible draining the completed
348 		 * connection queue and freeing those sockets, so
349 		 * we just return here if this socket is currently
350 		 * on the completed connection queue.  Otherwise,
351 		 * accept(2) may hang after select(2) has indicating
352 		 * that a listening socket was ready.  If it's an
353 		 * incomplete connection, we remove it from the queue
354 		 * and free it; otherwise, it won't be released until
355 		 * the listening socket is closed.
356 		 */
357 		if ((so->so_qstate & SQ_COMP) != 0) {
358 			SOCK_UNLOCK(so);
359 			ACCEPT_UNLOCK();
360 			return;
361 		}
362 		TAILQ_REMOVE(&head->so_incomp, so, so_list);
363 		head->so_incqlen--;
364 		so->so_qstate &= ~SQ_INCOMP;
365 		so->so_head = NULL;
366 	}
367 	KASSERT((so->so_qstate & SQ_COMP) == 0 &&
368 	    (so->so_qstate & SQ_INCOMP) == 0,
369 	    ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
370 	    so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
371 	SOCK_UNLOCK(so);
372 	ACCEPT_UNLOCK();
373 	SOCKBUF_LOCK(&so->so_snd);
374 	so->so_snd.sb_flags |= SB_NOINTR;
375 	(void)sblock(&so->so_snd, M_WAITOK);
376 	/*
377 	 * socantsendmore_locked() drops the socket buffer mutex so that it
378 	 * can safely perform wakeups.  Re-acquire the mutex before
379 	 * continuing.
380 	 */
381 	socantsendmore_locked(so);
382 	SOCKBUF_LOCK(&so->so_snd);
383 	sbunlock(&so->so_snd);
384 	sbrelease_locked(&so->so_snd, so);
385 	SOCKBUF_UNLOCK(&so->so_snd);
386 	sorflush(so);
387 	knlist_destroy(&so->so_rcv.sb_sel.si_note);
388 	knlist_destroy(&so->so_snd.sb_sel.si_note);
389 	sodealloc(so);
390 }
391 
392 /*
393  * Close a socket on last file table reference removal.
394  * Initiate disconnect if connected.
395  * Free socket when disconnect complete.
396  *
397  * This function will sorele() the socket.  Note that soclose() may be
398  * called prior to the ref count reaching zero.  The actual socket
399  * structure will not be freed until the ref count reaches zero.
400  */
401 int
402 soclose(so)
403 	struct socket *so;
404 {
405 	int error = 0;
406 
407 	KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
408 
409 	funsetown(&so->so_sigio);
410 	if (so->so_options & SO_ACCEPTCONN) {
411 		struct socket *sp;
412 		ACCEPT_LOCK();
413 		while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
414 			TAILQ_REMOVE(&so->so_incomp, sp, so_list);
415 			so->so_incqlen--;
416 			sp->so_qstate &= ~SQ_INCOMP;
417 			sp->so_head = NULL;
418 			ACCEPT_UNLOCK();
419 			(void) soabort(sp);
420 			ACCEPT_LOCK();
421 		}
422 		while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
423 			TAILQ_REMOVE(&so->so_comp, sp, so_list);
424 			so->so_qlen--;
425 			sp->so_qstate &= ~SQ_COMP;
426 			sp->so_head = NULL;
427 			ACCEPT_UNLOCK();
428 			(void) soabort(sp);
429 			ACCEPT_LOCK();
430 		}
431 		ACCEPT_UNLOCK();
432 	}
433 	if (so->so_pcb == NULL)
434 		goto discard;
435 	if (so->so_state & SS_ISCONNECTED) {
436 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
437 			error = sodisconnect(so);
438 			if (error)
439 				goto drop;
440 		}
441 		if (so->so_options & SO_LINGER) {
442 			if ((so->so_state & SS_ISDISCONNECTING) &&
443 			    (so->so_state & SS_NBIO))
444 				goto drop;
445 			while (so->so_state & SS_ISCONNECTED) {
446 				error = tsleep(&so->so_timeo,
447 				    PSOCK | PCATCH, "soclos", so->so_linger * hz);
448 				if (error)
449 					break;
450 			}
451 		}
452 	}
453 drop:
454 	if (so->so_pcb != NULL) {
455 		int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
456 		if (error == 0)
457 			error = error2;
458 	}
459 discard:
460 	ACCEPT_LOCK();
461 	SOCK_LOCK(so);
462 	KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
463 	so->so_state |= SS_NOFDREF;
464 	sorele(so);
465 	return (error);
466 }
467 
468 /*
469  * soabort() must not be called with any socket locks held, as it calls
470  * into the protocol, which will call back into the socket code causing
471  * it to acquire additional socket locks that may cause recursion or lock
472  * order reversals.
473  */
474 int
475 soabort(so)
476 	struct socket *so;
477 {
478 	int error;
479 
480 	error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
481 	if (error) {
482 		ACCEPT_LOCK();
483 		SOCK_LOCK(so);
484 		sotryfree(so);	/* note: does not decrement the ref count */
485 		return error;
486 	}
487 	return (0);
488 }
489 
490 int
491 soaccept(so, nam)
492 	struct socket *so;
493 	struct sockaddr **nam;
494 {
495 	int error;
496 
497 	SOCK_LOCK(so);
498 	KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
499 	so->so_state &= ~SS_NOFDREF;
500 	SOCK_UNLOCK(so);
501 	error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
502 	return (error);
503 }
504 
505 int
506 soconnect(so, nam, td)
507 	struct socket *so;
508 	struct sockaddr *nam;
509 	struct thread *td;
510 {
511 	int error;
512 
513 	if (so->so_options & SO_ACCEPTCONN)
514 		return (EOPNOTSUPP);
515 	/*
516 	 * If protocol is connection-based, can only connect once.
517 	 * Otherwise, if connected, try to disconnect first.
518 	 * This allows user to disconnect by connecting to, e.g.,
519 	 * a null address.
520 	 */
521 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
522 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
523 	    (error = sodisconnect(so)))) {
524 		error = EISCONN;
525 	} else {
526 		SOCK_LOCK(so);
527 		/*
528 		 * Prevent accumulated error from previous connection
529 		 * from biting us.
530 		 */
531 		so->so_error = 0;
532 		SOCK_UNLOCK(so);
533 		error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
534 	}
535 
536 	return (error);
537 }
538 
539 int
540 soconnect2(so1, so2)
541 	struct socket *so1;
542 	struct socket *so2;
543 {
544 
545 	return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
546 }
547 
548 int
549 sodisconnect(so)
550 	struct socket *so;
551 {
552 	int error;
553 
554 	if ((so->so_state & SS_ISCONNECTED) == 0)
555 		return (ENOTCONN);
556 	if (so->so_state & SS_ISDISCONNECTING)
557 		return (EALREADY);
558 	error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
559 	return (error);
560 }
561 
562 #define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
563 /*
564  * Send on a socket.
565  * If send must go all at once and message is larger than
566  * send buffering, then hard error.
567  * Lock against other senders.
568  * If must go all at once and not enough room now, then
569  * inform user that this would block and do nothing.
570  * Otherwise, if nonblocking, send as much as possible.
571  * The data to be sent is described by "uio" if nonzero,
572  * otherwise by the mbuf chain "top" (which must be null
573  * if uio is not).  Data provided in mbuf chain must be small
574  * enough to send all at once.
575  *
576  * Returns nonzero on error, timeout or signal; callers
577  * must check for short counts if EINTR/ERESTART are returned.
578  * Data and control buffers are freed on return.
579  */
580 
581 #ifdef ZERO_COPY_SOCKETS
582 struct so_zerocopy_stats{
583 	int size_ok;
584 	int align_ok;
585 	int found_ifp;
586 };
587 struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
588 #include <netinet/in.h>
589 #include <net/route.h>
590 #include <netinet/in_pcb.h>
591 #include <vm/vm.h>
592 #include <vm/vm_page.h>
593 #include <vm/vm_object.h>
594 #endif /*ZERO_COPY_SOCKETS*/
595 
596 int
597 sosend(so, addr, uio, top, control, flags, td)
598 	struct socket *so;
599 	struct sockaddr *addr;
600 	struct uio *uio;
601 	struct mbuf *top;
602 	struct mbuf *control;
603 	int flags;
604 	struct thread *td;
605 {
606 	struct mbuf **mp;
607 	struct mbuf *m;
608 	long space, len = 0, resid;
609 	int clen = 0, error, dontroute;
610 	int atomic = sosendallatonce(so) || top;
611 #ifdef ZERO_COPY_SOCKETS
612 	int cow_send;
613 #endif /* ZERO_COPY_SOCKETS */
614 
615 	if (uio != NULL)
616 		resid = uio->uio_resid;
617 	else
618 		resid = top->m_pkthdr.len;
619 	/*
620 	 * In theory resid should be unsigned.
621 	 * However, space must be signed, as it might be less than 0
622 	 * if we over-committed, and we must use a signed comparison
623 	 * of space and resid.  On the other hand, a negative resid
624 	 * causes us to loop sending 0-length segments to the protocol.
625 	 *
626 	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
627 	 * type sockets since that's an error.
628 	 */
629 	if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
630 		error = EINVAL;
631 		goto out;
632 	}
633 
634 	dontroute =
635 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
636 	    (so->so_proto->pr_flags & PR_ATOMIC);
637 	if (td != NULL)
638 		td->td_proc->p_stats->p_ru.ru_msgsnd++;
639 	if (control != NULL)
640 		clen = control->m_len;
641 #define	snderr(errno)	{ error = (errno); goto release; }
642 
643 	SOCKBUF_LOCK(&so->so_snd);
644 restart:
645 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
646 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
647 	if (error)
648 		goto out_locked;
649 	do {
650 		SOCKBUF_LOCK_ASSERT(&so->so_snd);
651 		if (so->so_snd.sb_state & SBS_CANTSENDMORE)
652 			snderr(EPIPE);
653 		if (so->so_error) {
654 			error = so->so_error;
655 			so->so_error = 0;
656 			goto release;
657 		}
658 		if ((so->so_state & SS_ISCONNECTED) == 0) {
659 			/*
660 			 * `sendto' and `sendmsg' is allowed on a connection-
661 			 * based socket if it supports implied connect.
662 			 * Return ENOTCONN if not connected and no address is
663 			 * supplied.
664 			 */
665 			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
666 			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
667 				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
668 				    !(resid == 0 && clen != 0))
669 					snderr(ENOTCONN);
670 			} else if (addr == NULL)
671 			    snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
672 				   ENOTCONN : EDESTADDRREQ);
673 		}
674 		space = sbspace(&so->so_snd);
675 		if (flags & MSG_OOB)
676 			space += 1024;
677 		if ((atomic && resid > so->so_snd.sb_hiwat) ||
678 		    clen > so->so_snd.sb_hiwat)
679 			snderr(EMSGSIZE);
680 		if (space < resid + clen &&
681 		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
682 			if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO))
683 				snderr(EWOULDBLOCK);
684 			sbunlock(&so->so_snd);
685 			error = sbwait(&so->so_snd);
686 			if (error)
687 				goto out_locked;
688 			goto restart;
689 		}
690 		SOCKBUF_UNLOCK(&so->so_snd);
691 		mp = &top;
692 		space -= clen;
693 		do {
694 		    if (uio == NULL) {
695 			/*
696 			 * Data is prepackaged in "top".
697 			 */
698 			resid = 0;
699 			if (flags & MSG_EOR)
700 				top->m_flags |= M_EOR;
701 		    } else do {
702 #ifdef ZERO_COPY_SOCKETS
703 			cow_send = 0;
704 #endif /* ZERO_COPY_SOCKETS */
705 			if (resid >= MINCLSIZE) {
706 #ifdef ZERO_COPY_SOCKETS
707 				if (top == NULL) {
708 					MGETHDR(m, M_TRYWAIT, MT_DATA);
709 					if (m == NULL) {
710 						error = ENOBUFS;
711 						SOCKBUF_LOCK(&so->so_snd);
712 						goto release;
713 					}
714 					m->m_pkthdr.len = 0;
715 					m->m_pkthdr.rcvif = (struct ifnet *)0;
716 				} else {
717 					MGET(m, M_TRYWAIT, MT_DATA);
718 					if (m == NULL) {
719 						error = ENOBUFS;
720 						SOCKBUF_LOCK(&so->so_snd);
721 						goto release;
722 					}
723 				}
724 				if (so_zero_copy_send &&
725 				    resid>=PAGE_SIZE &&
726 				    space>=PAGE_SIZE &&
727 				    uio->uio_iov->iov_len>=PAGE_SIZE) {
728 					so_zerocp_stats.size_ok++;
729 					if (!((vm_offset_t)
730 					  uio->uio_iov->iov_base & PAGE_MASK)){
731 						so_zerocp_stats.align_ok++;
732 						cow_send = socow_setup(m, uio);
733 					}
734 				}
735 				if (!cow_send) {
736 					MCLGET(m, M_TRYWAIT);
737 					if ((m->m_flags & M_EXT) == 0) {
738 						m_free(m);
739 						m = NULL;
740 					} else {
741 						len = min(min(MCLBYTES, resid), space);
742 					}
743 				} else
744 					len = PAGE_SIZE;
745 #else /* ZERO_COPY_SOCKETS */
746 				if (top == NULL) {
747 					m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
748 					m->m_pkthdr.len = 0;
749 					m->m_pkthdr.rcvif = (struct ifnet *)0;
750 				} else
751 					m = m_getcl(M_TRYWAIT, MT_DATA, 0);
752 				len = min(min(MCLBYTES, resid), space);
753 #endif /* ZERO_COPY_SOCKETS */
754 			} else {
755 				if (top == NULL) {
756 					m = m_gethdr(M_TRYWAIT, MT_DATA);
757 					m->m_pkthdr.len = 0;
758 					m->m_pkthdr.rcvif = (struct ifnet *)0;
759 
760 					len = min(min(MHLEN, resid), space);
761 					/*
762 					 * For datagram protocols, leave room
763 					 * for protocol headers in first mbuf.
764 					 */
765 					if (atomic && m && len < MHLEN)
766 						MH_ALIGN(m, len);
767 				} else {
768 					m = m_get(M_TRYWAIT, MT_DATA);
769 					len = min(min(MLEN, resid), space);
770 				}
771 			}
772 			if (m == NULL) {
773 				error = ENOBUFS;
774 				SOCKBUF_LOCK(&so->so_snd);
775 				goto release;
776 			}
777 
778 			space -= len;
779 #ifdef ZERO_COPY_SOCKETS
780 			if (cow_send)
781 				error = 0;
782 			else
783 #endif /* ZERO_COPY_SOCKETS */
784 			error = uiomove(mtod(m, void *), (int)len, uio);
785 			resid = uio->uio_resid;
786 			m->m_len = len;
787 			*mp = m;
788 			top->m_pkthdr.len += len;
789 			if (error) {
790 				SOCKBUF_LOCK(&so->so_snd);
791 				goto release;
792 			}
793 			mp = &m->m_next;
794 			if (resid <= 0) {
795 				if (flags & MSG_EOR)
796 					top->m_flags |= M_EOR;
797 				break;
798 			}
799 		    } while (space > 0 && atomic);
800 		    if (dontroute) {
801 			    SOCK_LOCK(so);
802 			    so->so_options |= SO_DONTROUTE;
803 			    SOCK_UNLOCK(so);
804 		    }
805 		    /*
806 		     * XXX all the SBS_CANTSENDMORE checks previously
807 		     * done could be out of date.  We could have recieved
808 		     * a reset packet in an interrupt or maybe we slept
809 		     * while doing page faults in uiomove() etc. We could
810 		     * probably recheck again inside the locking protection
811 		     * here, but there are probably other places that this
812 		     * also happens.  We must rethink this.
813 		     */
814 		    error = (*so->so_proto->pr_usrreqs->pru_send)(so,
815 			(flags & MSG_OOB) ? PRUS_OOB :
816 			/*
817 			 * If the user set MSG_EOF, the protocol
818 			 * understands this flag and nothing left to
819 			 * send then use PRU_SEND_EOF instead of PRU_SEND.
820 			 */
821 			((flags & MSG_EOF) &&
822 			 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
823 			 (resid <= 0)) ?
824 				PRUS_EOF :
825 			/* If there is more to send set PRUS_MORETOCOME */
826 			(resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
827 			top, addr, control, td);
828 		    if (dontroute) {
829 			    SOCK_LOCK(so);
830 			    so->so_options &= ~SO_DONTROUTE;
831 			    SOCK_UNLOCK(so);
832 		    }
833 		    clen = 0;
834 		    control = NULL;
835 		    top = NULL;
836 		    mp = &top;
837 		    if (error) {
838 			SOCKBUF_LOCK(&so->so_snd);
839 			goto release;
840 		    }
841 		} while (resid && space > 0);
842 		SOCKBUF_LOCK(&so->so_snd);
843 	} while (resid);
844 
845 release:
846 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
847 	sbunlock(&so->so_snd);
848 out_locked:
849 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
850 	SOCKBUF_UNLOCK(&so->so_snd);
851 out:
852 	if (top != NULL)
853 		m_freem(top);
854 	if (control != NULL)
855 		m_freem(control);
856 	return (error);
857 }
858 
859 /*
860  * The part of soreceive() that implements reading non-inline out-of-band
861  * data from a socket.  For more complete comments, see soreceive(), from
862  * which this code originated.
863  *
864  * Note that soreceive_rcvoob(), unlike the remainder of soreiceve(), is
865  * unable to return an mbuf chain to the caller.
866  */
867 static int
868 soreceive_rcvoob(so, uio, flags)
869 	struct socket *so;
870 	struct uio *uio;
871 	int flags;
872 {
873 	struct protosw *pr = so->so_proto;
874 	struct mbuf *m;
875 	int error;
876 
877 	KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
878 
879 	m = m_get(M_TRYWAIT, MT_DATA);
880 	if (m == NULL)
881 		return (ENOBUFS);
882 	error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
883 	if (error)
884 		goto bad;
885 	do {
886 #ifdef ZERO_COPY_SOCKETS
887 		if (so_zero_copy_receive) {
888 			int disposable;
889 
890 			if ((m->m_flags & M_EXT)
891 			 && (m->m_ext.ext_type == EXT_DISPOSABLE))
892 				disposable = 1;
893 			else
894 				disposable = 0;
895 
896 			error = uiomoveco(mtod(m, void *),
897 					  min(uio->uio_resid, m->m_len),
898 					  uio, disposable);
899 		} else
900 #endif /* ZERO_COPY_SOCKETS */
901 		error = uiomove(mtod(m, void *),
902 		    (int) min(uio->uio_resid, m->m_len), uio);
903 		m = m_free(m);
904 	} while (uio->uio_resid && error == 0 && m);
905 bad:
906 	if (m != NULL)
907 		m_freem(m);
908 	return (error);
909 }
910 
911 /*
912  * Following replacement or removal of the first mbuf on the first mbuf chain
913  * of a socket buffer, push necessary state changes back into the socket
914  * buffer so that other consumers see the values consistently.  'nextrecord'
915  * is the callers locally stored value of the original value of
916  * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
917  * NOTE: 'nextrecord' may be NULL.
918  */
919 static __inline void
920 sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
921 {
922 
923 	SOCKBUF_LOCK_ASSERT(sb);
924 	/*
925 	 * First, update for the new value of nextrecord.  If necessary, make
926 	 * it the first record.
927 	 */
928 	if (sb->sb_mb != NULL)
929 		sb->sb_mb->m_nextpkt = nextrecord;
930 	else
931 		sb->sb_mb = nextrecord;
932 
933         /*
934          * Now update any dependent socket buffer fields to reflect the new
935          * state.  This is an expanded inline of SB_EMPTY_FIXUP(), with the
936 	 * addition of a second clause that takes care of the case where
937 	 * sb_mb has been updated, but remains the last record.
938          */
939         if (sb->sb_mb == NULL) {
940                 sb->sb_mbtail = NULL;
941                 sb->sb_lastrecord = NULL;
942         } else if (sb->sb_mb->m_nextpkt == NULL)
943                 sb->sb_lastrecord = sb->sb_mb;
944 }
945 
946 
947 /*
948  * Implement receive operations on a socket.
949  * We depend on the way that records are added to the sockbuf
950  * by sbappend*.  In particular, each record (mbufs linked through m_next)
951  * must begin with an address if the protocol so specifies,
952  * followed by an optional mbuf or mbufs containing ancillary data,
953  * and then zero or more mbufs of data.
954  * In order to avoid blocking network interrupts for the entire time here,
955  * we splx() while doing the actual copy to user space.
956  * Although the sockbuf is locked, new data may still be appended,
957  * and thus we must maintain consistency of the sockbuf during that time.
958  *
959  * The caller may receive the data as a single mbuf chain by supplying
960  * an mbuf **mp0 for use in returning the chain.  The uio is then used
961  * only for the count in uio_resid.
962  */
963 int
964 soreceive(so, psa, uio, mp0, controlp, flagsp)
965 	struct socket *so;
966 	struct sockaddr **psa;
967 	struct uio *uio;
968 	struct mbuf **mp0;
969 	struct mbuf **controlp;
970 	int *flagsp;
971 {
972 	struct mbuf *m, **mp;
973 	int flags, len, error, offset;
974 	struct protosw *pr = so->so_proto;
975 	struct mbuf *nextrecord;
976 	int moff, type = 0;
977 	int orig_resid = uio->uio_resid;
978 
979 	mp = mp0;
980 	if (psa != NULL)
981 		*psa = NULL;
982 	if (controlp != NULL)
983 		*controlp = NULL;
984 	if (flagsp != NULL)
985 		flags = *flagsp &~ MSG_EOR;
986 	else
987 		flags = 0;
988 	if (flags & MSG_OOB)
989 		return (soreceive_rcvoob(so, uio, flags));
990 	if (mp != NULL)
991 		*mp = NULL;
992 	if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
993 		(*pr->pr_usrreqs->pru_rcvd)(so, 0);
994 
995 	SOCKBUF_LOCK(&so->so_rcv);
996 restart:
997 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
998 	error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
999 	if (error)
1000 		goto out;
1001 
1002 	m = so->so_rcv.sb_mb;
1003 	/*
1004 	 * If we have less data than requested, block awaiting more
1005 	 * (subject to any timeout) if:
1006 	 *   1. the current count is less than the low water mark, or
1007 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
1008 	 *	receive operation at once if we block (resid <= hiwat).
1009 	 *   3. MSG_DONTWAIT is not set
1010 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
1011 	 * we have to do the receive in sections, and thus risk returning
1012 	 * a short count if a timeout or signal occurs after we start.
1013 	 */
1014 	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
1015 	    so->so_rcv.sb_cc < uio->uio_resid) &&
1016 	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
1017 	    ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
1018 	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
1019 		KASSERT(m != NULL || !so->so_rcv.sb_cc,
1020 		    ("receive: m == %p so->so_rcv.sb_cc == %u",
1021 		    m, so->so_rcv.sb_cc));
1022 		if (so->so_error) {
1023 			if (m != NULL)
1024 				goto dontblock;
1025 			error = so->so_error;
1026 			if ((flags & MSG_PEEK) == 0)
1027 				so->so_error = 0;
1028 			goto release;
1029 		}
1030 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1031 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1032 			if (m)
1033 				goto dontblock;
1034 			else
1035 				goto release;
1036 		}
1037 		for (; m != NULL; m = m->m_next)
1038 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
1039 				m = so->so_rcv.sb_mb;
1040 				goto dontblock;
1041 			}
1042 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
1043 		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
1044 			error = ENOTCONN;
1045 			goto release;
1046 		}
1047 		if (uio->uio_resid == 0)
1048 			goto release;
1049 		if ((so->so_state & SS_NBIO) ||
1050 		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
1051 			error = EWOULDBLOCK;
1052 			goto release;
1053 		}
1054 		SBLASTRECORDCHK(&so->so_rcv);
1055 		SBLASTMBUFCHK(&so->so_rcv);
1056 		sbunlock(&so->so_rcv);
1057 		error = sbwait(&so->so_rcv);
1058 		if (error)
1059 			goto out;
1060 		goto restart;
1061 	}
1062 dontblock:
1063 	/*
1064 	 * From this point onward, we maintain 'nextrecord' as a cache of the
1065 	 * pointer to the next record in the socket buffer.  We must keep the
1066 	 * various socket buffer pointers and local stack versions of the
1067 	 * pointers in sync, pushing out modifications before dropping the
1068 	 * socket buffer mutex, and re-reading them when picking it up.
1069 	 *
1070 	 * Otherwise, we will race with the network stack appending new data
1071 	 * or records onto the socket buffer by using inconsistent/stale
1072 	 * versions of the field, possibly resulting in socket buffer
1073 	 * corruption.
1074 	 *
1075 	 * By holding the high-level sblock(), we prevent simultaneous
1076 	 * readers from pulling off the front of the socket buffer.
1077 	 */
1078 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1079 	if (uio->uio_td)
1080 		uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
1081 	KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
1082 	SBLASTRECORDCHK(&so->so_rcv);
1083 	SBLASTMBUFCHK(&so->so_rcv);
1084 	nextrecord = m->m_nextpkt;
1085 	if (pr->pr_flags & PR_ADDR) {
1086 		KASSERT(m->m_type == MT_SONAME,
1087 		    ("m->m_type == %d", m->m_type));
1088 		orig_resid = 0;
1089 		if (psa != NULL)
1090 			*psa = sodupsockaddr(mtod(m, struct sockaddr *),
1091 			    M_NOWAIT);
1092 		if (flags & MSG_PEEK) {
1093 			m = m->m_next;
1094 		} else {
1095 			sbfree(&so->so_rcv, m);
1096 			so->so_rcv.sb_mb = m_free(m);
1097 			m = so->so_rcv.sb_mb;
1098 			sockbuf_pushsync(&so->so_rcv, nextrecord);
1099 		}
1100 	}
1101 
1102 	/*
1103 	 * Process one or more MT_CONTROL mbufs present before any data mbufs
1104 	 * in the first mbuf chain on the socket buffer.  If MSG_PEEK, we
1105 	 * just copy the data; if !MSG_PEEK, we call into the protocol to
1106 	 * perform externalization (or freeing if controlp == NULL).
1107 	 */
1108 	if (m != NULL && m->m_type == MT_CONTROL) {
1109 		struct mbuf *cm = NULL, *cmn;
1110 		struct mbuf **cme = &cm;
1111 
1112 		do {
1113 			if (flags & MSG_PEEK) {
1114 				if (controlp != NULL) {
1115 					*controlp = m_copy(m, 0, m->m_len);
1116 					controlp = &(*controlp)->m_next;
1117 				}
1118 				m = m->m_next;
1119 			} else {
1120 				sbfree(&so->so_rcv, m);
1121 				so->so_rcv.sb_mb = m->m_next;
1122 				m->m_next = NULL;
1123 				*cme = m;
1124 				cme = &(*cme)->m_next;
1125 				m = so->so_rcv.sb_mb;
1126 			}
1127 		} while (m != NULL && m->m_type == MT_CONTROL);
1128 		if ((flags & MSG_PEEK) == 0)
1129 			sockbuf_pushsync(&so->so_rcv, nextrecord);
1130 		while (cm != NULL) {
1131 			cmn = cm->m_next;
1132 			cm->m_next = NULL;
1133 			if (pr->pr_domain->dom_externalize != NULL) {
1134 				SOCKBUF_UNLOCK(&so->so_rcv);
1135 				error = (*pr->pr_domain->dom_externalize)
1136 				    (cm, controlp);
1137 				SOCKBUF_LOCK(&so->so_rcv);
1138 			} else if (controlp != NULL)
1139 				*controlp = cm;
1140 			else
1141 				m_freem(cm);
1142 			if (controlp != NULL) {
1143 				orig_resid = 0;
1144 				while (*controlp != NULL)
1145 					controlp = &(*controlp)->m_next;
1146 			}
1147 			cm = cmn;
1148 		}
1149 		nextrecord = so->so_rcv.sb_mb->m_nextpkt;
1150 		orig_resid = 0;
1151 	}
1152 	if (m != NULL) {
1153 		if ((flags & MSG_PEEK) == 0) {
1154 			KASSERT(m->m_nextpkt == nextrecord,
1155 			    ("soreceive: post-control, nextrecord !sync"));
1156 			if (nextrecord == NULL) {
1157 				KASSERT(so->so_rcv.sb_mb == m,
1158 				    ("soreceive: post-control, sb_mb!=m"));
1159 				KASSERT(so->so_rcv.sb_lastrecord == m,
1160 				    ("soreceive: post-control, lastrecord!=m"));
1161 			}
1162 		}
1163 		type = m->m_type;
1164 		if (type == MT_OOBDATA)
1165 			flags |= MSG_OOB;
1166 	} else {
1167 		if ((flags & MSG_PEEK) == 0) {
1168 			KASSERT(so->so_rcv.sb_mb == nextrecord,
1169 			    ("soreceive: sb_mb != nextrecord"));
1170 			if (so->so_rcv.sb_mb == NULL) {
1171 				KASSERT(so->so_rcv.sb_lastrecord == NULL,
1172 				    ("soreceive: sb_lastercord != NULL"));
1173 			}
1174 		}
1175 	}
1176 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1177 	SBLASTRECORDCHK(&so->so_rcv);
1178 	SBLASTMBUFCHK(&so->so_rcv);
1179 
1180 	/*
1181 	 * Now continue to read any data mbufs off of the head of the socket
1182 	 * buffer until the read request is satisfied.  Note that 'type' is
1183 	 * used to store the type of any mbuf reads that have happened so far
1184 	 * such that soreceive() can stop reading if the type changes, which
1185 	 * causes soreceive() to return only one of regular data and inline
1186 	 * out-of-band data in a single socket receive operation.
1187 	 */
1188 	moff = 0;
1189 	offset = 0;
1190 	while (m != NULL && uio->uio_resid > 0 && error == 0) {
1191 		/*
1192 		 * If the type of mbuf has changed since the last mbuf
1193 		 * examined ('type'), end the receive operation.
1194 	 	 */
1195 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1196 		if (m->m_type == MT_OOBDATA) {
1197 			if (type != MT_OOBDATA)
1198 				break;
1199 		} else if (type == MT_OOBDATA)
1200 			break;
1201 		else
1202 		    KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
1203 			("m->m_type == %d", m->m_type));
1204 		so->so_rcv.sb_state &= ~SBS_RCVATMARK;
1205 		len = uio->uio_resid;
1206 		if (so->so_oobmark && len > so->so_oobmark - offset)
1207 			len = so->so_oobmark - offset;
1208 		if (len > m->m_len - moff)
1209 			len = m->m_len - moff;
1210 		/*
1211 		 * If mp is set, just pass back the mbufs.
1212 		 * Otherwise copy them out via the uio, then free.
1213 		 * Sockbuf must be consistent here (points to current mbuf,
1214 		 * it points to next record) when we drop priority;
1215 		 * we must note any additions to the sockbuf when we
1216 		 * block interrupts again.
1217 		 */
1218 		if (mp == NULL) {
1219 			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1220 			SBLASTRECORDCHK(&so->so_rcv);
1221 			SBLASTMBUFCHK(&so->so_rcv);
1222 			SOCKBUF_UNLOCK(&so->so_rcv);
1223 #ifdef ZERO_COPY_SOCKETS
1224 			if (so_zero_copy_receive) {
1225 				int disposable;
1226 
1227 				if ((m->m_flags & M_EXT)
1228 				 && (m->m_ext.ext_type == EXT_DISPOSABLE))
1229 					disposable = 1;
1230 				else
1231 					disposable = 0;
1232 
1233 				error = uiomoveco(mtod(m, char *) + moff,
1234 						  (int)len, uio,
1235 						  disposable);
1236 			} else
1237 #endif /* ZERO_COPY_SOCKETS */
1238 			error = uiomove(mtod(m, char *) + moff, (int)len, uio);
1239 			SOCKBUF_LOCK(&so->so_rcv);
1240 			if (error)
1241 				goto release;
1242 		} else
1243 			uio->uio_resid -= len;
1244 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1245 		if (len == m->m_len - moff) {
1246 			if (m->m_flags & M_EOR)
1247 				flags |= MSG_EOR;
1248 			if (flags & MSG_PEEK) {
1249 				m = m->m_next;
1250 				moff = 0;
1251 			} else {
1252 				nextrecord = m->m_nextpkt;
1253 				sbfree(&so->so_rcv, m);
1254 				if (mp != NULL) {
1255 					*mp = m;
1256 					mp = &m->m_next;
1257 					so->so_rcv.sb_mb = m = m->m_next;
1258 					*mp = NULL;
1259 				} else {
1260 					so->so_rcv.sb_mb = m_free(m);
1261 					m = so->so_rcv.sb_mb;
1262 				}
1263 				if (m != NULL) {
1264 					m->m_nextpkt = nextrecord;
1265 					if (nextrecord == NULL)
1266 						so->so_rcv.sb_lastrecord = m;
1267 				} else {
1268 					so->so_rcv.sb_mb = nextrecord;
1269 					SB_EMPTY_FIXUP(&so->so_rcv);
1270 				}
1271 				SBLASTRECORDCHK(&so->so_rcv);
1272 				SBLASTMBUFCHK(&so->so_rcv);
1273 			}
1274 		} else {
1275 			if (flags & MSG_PEEK)
1276 				moff += len;
1277 			else {
1278 				if (mp != NULL) {
1279 					int copy_flag;
1280 
1281 					if (flags & MSG_DONTWAIT)
1282 						copy_flag = M_DONTWAIT;
1283 					else
1284 						copy_flag = M_TRYWAIT;
1285 					if (copy_flag == M_TRYWAIT)
1286 						SOCKBUF_UNLOCK(&so->so_rcv);
1287 					*mp = m_copym(m, 0, len, copy_flag);
1288 					if (copy_flag == M_TRYWAIT)
1289 						SOCKBUF_LOCK(&so->so_rcv);
1290  					if (*mp == NULL) {
1291  						/*
1292  						 * m_copym() couldn't allocate an mbuf.
1293 						 * Adjust uio_resid back (it was adjusted
1294 						 * down by len bytes, which we didn't end
1295 						 * up "copying" over).
1296  						 */
1297  						uio->uio_resid += len;
1298  						break;
1299  					}
1300 				}
1301 				m->m_data += len;
1302 				m->m_len -= len;
1303 				so->so_rcv.sb_cc -= len;
1304 			}
1305 		}
1306 		SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1307 		if (so->so_oobmark) {
1308 			if ((flags & MSG_PEEK) == 0) {
1309 				so->so_oobmark -= len;
1310 				if (so->so_oobmark == 0) {
1311 					so->so_rcv.sb_state |= SBS_RCVATMARK;
1312 					break;
1313 				}
1314 			} else {
1315 				offset += len;
1316 				if (offset == so->so_oobmark)
1317 					break;
1318 			}
1319 		}
1320 		if (flags & MSG_EOR)
1321 			break;
1322 		/*
1323 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
1324 		 * we must not quit until "uio->uio_resid == 0" or an error
1325 		 * termination.  If a signal/timeout occurs, return
1326 		 * with a short count but without error.
1327 		 * Keep sockbuf locked against other readers.
1328 		 */
1329 		while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
1330 		    !sosendallatonce(so) && nextrecord == NULL) {
1331 			SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1332 			if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
1333 				break;
1334 			/*
1335 			 * Notify the protocol that some data has been
1336 			 * drained before blocking.
1337 			 */
1338 			if (pr->pr_flags & PR_WANTRCVD && so->so_pcb != NULL) {
1339 				SOCKBUF_UNLOCK(&so->so_rcv);
1340 				(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1341 				SOCKBUF_LOCK(&so->so_rcv);
1342 			}
1343 			SBLASTRECORDCHK(&so->so_rcv);
1344 			SBLASTMBUFCHK(&so->so_rcv);
1345 			error = sbwait(&so->so_rcv);
1346 			if (error)
1347 				goto release;
1348 			m = so->so_rcv.sb_mb;
1349 			if (m != NULL)
1350 				nextrecord = m->m_nextpkt;
1351 		}
1352 	}
1353 
1354 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1355 	if (m != NULL && pr->pr_flags & PR_ATOMIC) {
1356 		flags |= MSG_TRUNC;
1357 		if ((flags & MSG_PEEK) == 0)
1358 			(void) sbdroprecord_locked(&so->so_rcv);
1359 	}
1360 	if ((flags & MSG_PEEK) == 0) {
1361 		if (m == NULL) {
1362 			/*
1363 			 * First part is an inline SB_EMPTY_FIXUP().  Second
1364 			 * part makes sure sb_lastrecord is up-to-date if
1365 			 * there is still data in the socket buffer.
1366 			 */
1367 			so->so_rcv.sb_mb = nextrecord;
1368 			if (so->so_rcv.sb_mb == NULL) {
1369 				so->so_rcv.sb_mbtail = NULL;
1370 				so->so_rcv.sb_lastrecord = NULL;
1371 			} else if (nextrecord->m_nextpkt == NULL)
1372 				so->so_rcv.sb_lastrecord = nextrecord;
1373 		}
1374 		SBLASTRECORDCHK(&so->so_rcv);
1375 		SBLASTMBUFCHK(&so->so_rcv);
1376 		/*
1377 		 * If soreceive() is being done from the socket callback, then
1378 		 * don't need to generate ACK to peer to update window, since
1379 		 * ACK will be generated on return to TCP.
1380 		 */
1381 		if (!(flags & MSG_SOCALLBCK) &&
1382 		    (pr->pr_flags & PR_WANTRCVD) && so->so_pcb) {
1383 			SOCKBUF_UNLOCK(&so->so_rcv);
1384 			(*pr->pr_usrreqs->pru_rcvd)(so, flags);
1385 			SOCKBUF_LOCK(&so->so_rcv);
1386 		}
1387 	}
1388 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1389 	if (orig_resid == uio->uio_resid && orig_resid &&
1390 	    (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
1391 		sbunlock(&so->so_rcv);
1392 		goto restart;
1393 	}
1394 
1395 	if (flagsp != NULL)
1396 		*flagsp |= flags;
1397 release:
1398 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1399 	sbunlock(&so->so_rcv);
1400 out:
1401 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1402 	SOCKBUF_UNLOCK(&so->so_rcv);
1403 	return (error);
1404 }
1405 
1406 int
1407 soshutdown(so, how)
1408 	struct socket *so;
1409 	int how;
1410 {
1411 	struct protosw *pr = so->so_proto;
1412 
1413 	if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1414 		return (EINVAL);
1415 
1416 	if (how != SHUT_WR)
1417 		sorflush(so);
1418 	if (how != SHUT_RD)
1419 		return ((*pr->pr_usrreqs->pru_shutdown)(so));
1420 	return (0);
1421 }
1422 
1423 void
1424 sorflush(so)
1425 	struct socket *so;
1426 {
1427 	struct sockbuf *sb = &so->so_rcv;
1428 	struct protosw *pr = so->so_proto;
1429 	struct sockbuf asb;
1430 
1431 	/*
1432 	 * XXXRW: This is quite ugly.  Previously, this code made a copy of
1433 	 * the socket buffer, then zero'd the original to clear the buffer
1434 	 * fields.  However, with mutexes in the socket buffer, this causes
1435 	 * problems.  We only clear the zeroable bits of the original;
1436 	 * however, we have to initialize and destroy the mutex in the copy
1437 	 * so that dom_dispose() and sbrelease() can lock t as needed.
1438 	 */
1439 	SOCKBUF_LOCK(sb);
1440 	sb->sb_flags |= SB_NOINTR;
1441 	(void) sblock(sb, M_WAITOK);
1442 	/*
1443 	 * socantrcvmore_locked() drops the socket buffer mutex so that it
1444 	 * can safely perform wakeups.  Re-acquire the mutex before
1445 	 * continuing.
1446 	 */
1447 	socantrcvmore_locked(so);
1448 	SOCKBUF_LOCK(sb);
1449 	sbunlock(sb);
1450 	/*
1451 	 * Invalidate/clear most of the sockbuf structure, but leave
1452 	 * selinfo and mutex data unchanged.
1453 	 */
1454 	bzero(&asb, offsetof(struct sockbuf, sb_startzero));
1455 	bcopy(&sb->sb_startzero, &asb.sb_startzero,
1456 	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1457 	bzero(&sb->sb_startzero,
1458 	    sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
1459 	SOCKBUF_UNLOCK(sb);
1460 
1461 	SOCKBUF_LOCK_INIT(&asb, "so_rcv");
1462 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
1463 		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
1464 	sbrelease(&asb, so);
1465 	SOCKBUF_LOCK_DESTROY(&asb);
1466 }
1467 
1468 #ifdef INET
1469 static int
1470 do_setopt_accept_filter(so, sopt)
1471 	struct	socket *so;
1472 	struct	sockopt *sopt;
1473 {
1474 	struct accept_filter_arg	*afap;
1475 	struct accept_filter	*afp;
1476 	struct so_accf	*newaf;
1477 	int	error = 0;
1478 
1479 	newaf = NULL;
1480 	afap = NULL;
1481 
1482 	/*
1483 	 * XXXRW: Configuring accept filters should be an atomic test-and-set
1484 	 * operation to prevent races during setup and attach.  There may be
1485 	 * more general issues of racing and ordering here that are not yet
1486 	 * addressed by locking.
1487 	 */
1488 	/* do not set/remove accept filters on non listen sockets */
1489 	SOCK_LOCK(so);
1490 	if ((so->so_options & SO_ACCEPTCONN) == 0) {
1491 		SOCK_UNLOCK(so);
1492 		return (EINVAL);
1493 	}
1494 
1495 	/* removing the filter */
1496 	if (sopt == NULL) {
1497 		if (so->so_accf != NULL) {
1498 			struct so_accf *af = so->so_accf;
1499 			if (af->so_accept_filter != NULL &&
1500 				af->so_accept_filter->accf_destroy != NULL) {
1501 				af->so_accept_filter->accf_destroy(so);
1502 			}
1503 			if (af->so_accept_filter_str != NULL) {
1504 				FREE(af->so_accept_filter_str, M_ACCF);
1505 			}
1506 			FREE(af, M_ACCF);
1507 			so->so_accf = NULL;
1508 		}
1509 		so->so_options &= ~SO_ACCEPTFILTER;
1510 		SOCK_UNLOCK(so);
1511 		return (0);
1512 	}
1513 	SOCK_UNLOCK(so);
1514 
1515 	/*-
1516 	 * Adding a filter.
1517 	 *
1518 	 * Do memory allocation, copyin, and filter lookup now while we're
1519 	 * not holding any locks.  Avoids sleeping with a mutex, as well as
1520 	 * introducing a lock order between accept filter locks and socket
1521 	 * locks here.
1522 	 */
1523 	MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP,
1524 	    M_WAITOK);
1525 	/* don't put large objects on the kernel stack */
1526 	error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1527 	afap->af_name[sizeof(afap->af_name)-1] = '\0';
1528 	afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1529 	if (error) {
1530 		FREE(afap, M_TEMP);
1531 		return (error);
1532 	}
1533 	afp = accept_filt_get(afap->af_name);
1534 	if (afp == NULL) {
1535 		FREE(afap, M_TEMP);
1536 		return (ENOENT);
1537 	}
1538 
1539 	/*
1540 	 * Allocate the new accept filter instance storage.  We may have to
1541 	 * free it again later if we fail to attach it.  If attached
1542 	 * properly, 'newaf' is NULLed to avoid a free() while in use.
1543 	 */
1544 	MALLOC(newaf, struct so_accf *, sizeof(*newaf), M_ACCF, M_WAITOK |
1545 	    M_ZERO);
1546 	if (afp->accf_create != NULL && afap->af_name[0] != '\0') {
1547 		int len = strlen(afap->af_name) + 1;
1548 		MALLOC(newaf->so_accept_filter_str, char *, len, M_ACCF,
1549 		    M_WAITOK);
1550 		strcpy(newaf->so_accept_filter_str, afap->af_name);
1551 	}
1552 
1553 	SOCK_LOCK(so);
1554 	/* must remove previous filter first */
1555 	if (so->so_accf != NULL) {
1556 		error = EINVAL;
1557 		goto out;
1558 	}
1559 	/*
1560 	 * Invoke the accf_create() method of the filter if required.
1561 	 * XXXRW: the socket mutex is held over this call, so the create
1562 	 * method cannot block.  This may be something we have to change, but
1563 	 * it would require addressing possible races.
1564 	 */
1565 	if (afp->accf_create != NULL) {
1566 		newaf->so_accept_filter_arg =
1567 		    afp->accf_create(so, afap->af_arg);
1568 		if (newaf->so_accept_filter_arg == NULL) {
1569 			error = EINVAL;
1570 			goto out;
1571 		}
1572 	}
1573 	newaf->so_accept_filter = afp;
1574 	so->so_accf = newaf;
1575 	so->so_options |= SO_ACCEPTFILTER;
1576 	newaf = NULL;
1577 out:
1578 	SOCK_UNLOCK(so);
1579 	if (newaf != NULL) {
1580 		if (newaf->so_accept_filter_str != NULL)
1581 			FREE(newaf->so_accept_filter_str, M_ACCF);
1582 		FREE(newaf, M_ACCF);
1583 	}
1584 	if (afap != NULL)
1585 		FREE(afap, M_TEMP);
1586 	return (error);
1587 }
1588 #endif /* INET */
1589 
1590 /*
1591  * Perhaps this routine, and sooptcopyout(), below, ought to come in
1592  * an additional variant to handle the case where the option value needs
1593  * to be some kind of integer, but not a specific size.
1594  * In addition to their use here, these functions are also called by the
1595  * protocol-level pr_ctloutput() routines.
1596  */
1597 int
1598 sooptcopyin(sopt, buf, len, minlen)
1599 	struct	sockopt *sopt;
1600 	void	*buf;
1601 	size_t	len;
1602 	size_t	minlen;
1603 {
1604 	size_t	valsize;
1605 
1606 	/*
1607 	 * If the user gives us more than we wanted, we ignore it,
1608 	 * but if we don't get the minimum length the caller
1609 	 * wants, we return EINVAL.  On success, sopt->sopt_valsize
1610 	 * is set to however much we actually retrieved.
1611 	 */
1612 	if ((valsize = sopt->sopt_valsize) < minlen)
1613 		return EINVAL;
1614 	if (valsize > len)
1615 		sopt->sopt_valsize = valsize = len;
1616 
1617 	if (sopt->sopt_td != NULL)
1618 		return (copyin(sopt->sopt_val, buf, valsize));
1619 
1620 	bcopy(sopt->sopt_val, buf, valsize);
1621 	return 0;
1622 }
1623 
1624 /*
1625  * Kernel version of setsockopt(2)/
1626  * XXX: optlen is size_t, not socklen_t
1627  */
1628 int
1629 so_setsockopt(struct socket *so, int level, int optname, void *optval,
1630     size_t optlen)
1631 {
1632 	struct sockopt sopt;
1633 
1634 	sopt.sopt_level = level;
1635 	sopt.sopt_name = optname;
1636 	sopt.sopt_dir = SOPT_SET;
1637 	sopt.sopt_val = optval;
1638 	sopt.sopt_valsize = optlen;
1639 	sopt.sopt_td = NULL;
1640 	return (sosetopt(so, &sopt));
1641 }
1642 
1643 int
1644 sosetopt(so, sopt)
1645 	struct socket *so;
1646 	struct sockopt *sopt;
1647 {
1648 	int	error, optval;
1649 	struct	linger l;
1650 	struct	timeval tv;
1651 	u_long  val;
1652 #ifdef MAC
1653 	struct mac extmac;
1654 #endif
1655 
1656 	error = 0;
1657 	if (sopt->sopt_level != SOL_SOCKET) {
1658 		if (so->so_proto && so->so_proto->pr_ctloutput)
1659 			return ((*so->so_proto->pr_ctloutput)
1660 				  (so, sopt));
1661 		error = ENOPROTOOPT;
1662 	} else {
1663 		switch (sopt->sopt_name) {
1664 #ifdef INET
1665 		case SO_ACCEPTFILTER:
1666 			error = do_setopt_accept_filter(so, sopt);
1667 			if (error)
1668 				goto bad;
1669 			break;
1670 #endif
1671 		case SO_LINGER:
1672 			error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1673 			if (error)
1674 				goto bad;
1675 
1676 			SOCK_LOCK(so);
1677 			so->so_linger = l.l_linger;
1678 			if (l.l_onoff)
1679 				so->so_options |= SO_LINGER;
1680 			else
1681 				so->so_options &= ~SO_LINGER;
1682 			SOCK_UNLOCK(so);
1683 			break;
1684 
1685 		case SO_DEBUG:
1686 		case SO_KEEPALIVE:
1687 		case SO_DONTROUTE:
1688 		case SO_USELOOPBACK:
1689 		case SO_BROADCAST:
1690 		case SO_REUSEADDR:
1691 		case SO_REUSEPORT:
1692 		case SO_OOBINLINE:
1693 		case SO_TIMESTAMP:
1694 		case SO_BINTIME:
1695 		case SO_NOSIGPIPE:
1696 			error = sooptcopyin(sopt, &optval, sizeof optval,
1697 					    sizeof optval);
1698 			if (error)
1699 				goto bad;
1700 			SOCK_LOCK(so);
1701 			if (optval)
1702 				so->so_options |= sopt->sopt_name;
1703 			else
1704 				so->so_options &= ~sopt->sopt_name;
1705 			SOCK_UNLOCK(so);
1706 			break;
1707 
1708 		case SO_SNDBUF:
1709 		case SO_RCVBUF:
1710 		case SO_SNDLOWAT:
1711 		case SO_RCVLOWAT:
1712 			error = sooptcopyin(sopt, &optval, sizeof optval,
1713 					    sizeof optval);
1714 			if (error)
1715 				goto bad;
1716 
1717 			/*
1718 			 * Values < 1 make no sense for any of these
1719 			 * options, so disallow them.
1720 			 */
1721 			if (optval < 1) {
1722 				error = EINVAL;
1723 				goto bad;
1724 			}
1725 
1726 			switch (sopt->sopt_name) {
1727 			case SO_SNDBUF:
1728 			case SO_RCVBUF:
1729 				if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1730 				    &so->so_snd : &so->so_rcv, (u_long)optval,
1731 				    so, curthread) == 0) {
1732 					error = ENOBUFS;
1733 					goto bad;
1734 				}
1735 				break;
1736 
1737 			/*
1738 			 * Make sure the low-water is never greater than
1739 			 * the high-water.
1740 			 */
1741 			case SO_SNDLOWAT:
1742 				SOCKBUF_LOCK(&so->so_snd);
1743 				so->so_snd.sb_lowat =
1744 				    (optval > so->so_snd.sb_hiwat) ?
1745 				    so->so_snd.sb_hiwat : optval;
1746 				SOCKBUF_UNLOCK(&so->so_snd);
1747 				break;
1748 			case SO_RCVLOWAT:
1749 				SOCKBUF_LOCK(&so->so_rcv);
1750 				so->so_rcv.sb_lowat =
1751 				    (optval > so->so_rcv.sb_hiwat) ?
1752 				    so->so_rcv.sb_hiwat : optval;
1753 				SOCKBUF_UNLOCK(&so->so_rcv);
1754 				break;
1755 			}
1756 			break;
1757 
1758 		case SO_SNDTIMEO:
1759 		case SO_RCVTIMEO:
1760 			error = sooptcopyin(sopt, &tv, sizeof tv,
1761 					    sizeof tv);
1762 			if (error)
1763 				goto bad;
1764 
1765 			/* assert(hz > 0); */
1766 			if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
1767 			    tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1768 				error = EDOM;
1769 				goto bad;
1770 			}
1771 			/* assert(tick > 0); */
1772 			/* assert(ULONG_MAX - INT_MAX >= 1000000); */
1773 			val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1774 			if (val > INT_MAX) {
1775 				error = EDOM;
1776 				goto bad;
1777 			}
1778 			if (val == 0 && tv.tv_usec != 0)
1779 				val = 1;
1780 
1781 			switch (sopt->sopt_name) {
1782 			case SO_SNDTIMEO:
1783 				so->so_snd.sb_timeo = val;
1784 				break;
1785 			case SO_RCVTIMEO:
1786 				so->so_rcv.sb_timeo = val;
1787 				break;
1788 			}
1789 			break;
1790 		case SO_LABEL:
1791 #ifdef MAC
1792 			error = sooptcopyin(sopt, &extmac, sizeof extmac,
1793 			    sizeof extmac);
1794 			if (error)
1795 				goto bad;
1796 			error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
1797 			    so, &extmac);
1798 #else
1799 			error = EOPNOTSUPP;
1800 #endif
1801 			break;
1802 		default:
1803 			error = ENOPROTOOPT;
1804 			break;
1805 		}
1806 		if (error == 0 && so->so_proto != NULL &&
1807 		    so->so_proto->pr_ctloutput != NULL) {
1808 			(void) ((*so->so_proto->pr_ctloutput)
1809 				  (so, sopt));
1810 		}
1811 	}
1812 bad:
1813 	return (error);
1814 }
1815 
1816 /* Helper routine for getsockopt */
1817 int
1818 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
1819 {
1820 	int	error;
1821 	size_t	valsize;
1822 
1823 	error = 0;
1824 
1825 	/*
1826 	 * Documented get behavior is that we always return a value,
1827 	 * possibly truncated to fit in the user's buffer.
1828 	 * Traditional behavior is that we always tell the user
1829 	 * precisely how much we copied, rather than something useful
1830 	 * like the total amount we had available for her.
1831 	 * Note that this interface is not idempotent; the entire answer must
1832 	 * generated ahead of time.
1833 	 */
1834 	valsize = min(len, sopt->sopt_valsize);
1835 	sopt->sopt_valsize = valsize;
1836 	if (sopt->sopt_val != NULL) {
1837 		if (sopt->sopt_td != NULL)
1838 			error = copyout(buf, sopt->sopt_val, valsize);
1839 		else
1840 			bcopy(buf, sopt->sopt_val, valsize);
1841 	}
1842 	return error;
1843 }
1844 
1845 int
1846 sogetopt(so, sopt)
1847 	struct socket *so;
1848 	struct sockopt *sopt;
1849 {
1850 	int	error, optval;
1851 	struct	linger l;
1852 	struct	timeval tv;
1853 #ifdef INET
1854 	struct accept_filter_arg *afap;
1855 #endif
1856 #ifdef MAC
1857 	struct mac extmac;
1858 #endif
1859 
1860 	error = 0;
1861 	if (sopt->sopt_level != SOL_SOCKET) {
1862 		if (so->so_proto && so->so_proto->pr_ctloutput) {
1863 			return ((*so->so_proto->pr_ctloutput)
1864 				  (so, sopt));
1865 		} else
1866 			return (ENOPROTOOPT);
1867 	} else {
1868 		switch (sopt->sopt_name) {
1869 #ifdef INET
1870 		case SO_ACCEPTFILTER:
1871 			/* Unlocked read. */
1872 			if ((so->so_options & SO_ACCEPTCONN) == 0)
1873 				return (EINVAL);
1874 			MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1875 				M_TEMP, M_WAITOK | M_ZERO);
1876 			SOCK_LOCK(so);
1877 			if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1878 				strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1879 				if (so->so_accf->so_accept_filter_str != NULL)
1880 					strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1881 			}
1882 			SOCK_UNLOCK(so);
1883 			error = sooptcopyout(sopt, afap, sizeof(*afap));
1884 			FREE(afap, M_TEMP);
1885 			break;
1886 #endif
1887 
1888 		case SO_LINGER:
1889 			SOCK_LOCK(so);
1890 			l.l_onoff = so->so_options & SO_LINGER;
1891 			l.l_linger = so->so_linger;
1892 			SOCK_UNLOCK(so);
1893 			error = sooptcopyout(sopt, &l, sizeof l);
1894 			break;
1895 
1896 		case SO_USELOOPBACK:
1897 		case SO_DONTROUTE:
1898 		case SO_DEBUG:
1899 		case SO_KEEPALIVE:
1900 		case SO_REUSEADDR:
1901 		case SO_REUSEPORT:
1902 		case SO_BROADCAST:
1903 		case SO_OOBINLINE:
1904 		case SO_TIMESTAMP:
1905 		case SO_BINTIME:
1906 		case SO_NOSIGPIPE:
1907 			optval = so->so_options & sopt->sopt_name;
1908 integer:
1909 			error = sooptcopyout(sopt, &optval, sizeof optval);
1910 			break;
1911 
1912 		case SO_TYPE:
1913 			optval = so->so_type;
1914 			goto integer;
1915 
1916 		case SO_ERROR:
1917 			optval = so->so_error;
1918 			so->so_error = 0;
1919 			goto integer;
1920 
1921 		case SO_SNDBUF:
1922 			optval = so->so_snd.sb_hiwat;
1923 			goto integer;
1924 
1925 		case SO_RCVBUF:
1926 			optval = so->so_rcv.sb_hiwat;
1927 			goto integer;
1928 
1929 		case SO_SNDLOWAT:
1930 			optval = so->so_snd.sb_lowat;
1931 			goto integer;
1932 
1933 		case SO_RCVLOWAT:
1934 			optval = so->so_rcv.sb_lowat;
1935 			goto integer;
1936 
1937 		case SO_SNDTIMEO:
1938 		case SO_RCVTIMEO:
1939 			optval = (sopt->sopt_name == SO_SNDTIMEO ?
1940 				  so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1941 
1942 			tv.tv_sec = optval / hz;
1943 			tv.tv_usec = (optval % hz) * tick;
1944 			error = sooptcopyout(sopt, &tv, sizeof tv);
1945 			break;
1946 		case SO_LABEL:
1947 #ifdef MAC
1948 			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
1949 			    sizeof(extmac));
1950 			if (error)
1951 				return (error);
1952 			error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
1953 			    so, &extmac);
1954 			if (error)
1955 				return (error);
1956 			error = sooptcopyout(sopt, &extmac, sizeof extmac);
1957 #else
1958 			error = EOPNOTSUPP;
1959 #endif
1960 			break;
1961 		case SO_PEERLABEL:
1962 #ifdef MAC
1963 			error = sooptcopyin(sopt, &extmac, sizeof(extmac),
1964 			    sizeof(extmac));
1965 			if (error)
1966 				return (error);
1967 			error = mac_getsockopt_peerlabel(
1968 			    sopt->sopt_td->td_ucred, so, &extmac);
1969 			if (error)
1970 				return (error);
1971 			error = sooptcopyout(sopt, &extmac, sizeof extmac);
1972 #else
1973 			error = EOPNOTSUPP;
1974 #endif
1975 			break;
1976 		default:
1977 			error = ENOPROTOOPT;
1978 			break;
1979 		}
1980 		return (error);
1981 	}
1982 }
1983 
1984 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1985 int
1986 soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1987 {
1988 	struct mbuf *m, *m_prev;
1989 	int sopt_size = sopt->sopt_valsize;
1990 
1991 	MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
1992 	if (m == NULL)
1993 		return ENOBUFS;
1994 	if (sopt_size > MLEN) {
1995 		MCLGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT);
1996 		if ((m->m_flags & M_EXT) == 0) {
1997 			m_free(m);
1998 			return ENOBUFS;
1999 		}
2000 		m->m_len = min(MCLBYTES, sopt_size);
2001 	} else {
2002 		m->m_len = min(MLEN, sopt_size);
2003 	}
2004 	sopt_size -= m->m_len;
2005 	*mp = m;
2006 	m_prev = m;
2007 
2008 	while (sopt_size) {
2009 		MGET(m, sopt->sopt_td ? M_TRYWAIT : M_DONTWAIT, MT_DATA);
2010 		if (m == NULL) {
2011 			m_freem(*mp);
2012 			return ENOBUFS;
2013 		}
2014 		if (sopt_size > MLEN) {
2015 			MCLGET(m, sopt->sopt_td != NULL ? M_TRYWAIT :
2016 			    M_DONTWAIT);
2017 			if ((m->m_flags & M_EXT) == 0) {
2018 				m_freem(m);
2019 				m_freem(*mp);
2020 				return ENOBUFS;
2021 			}
2022 			m->m_len = min(MCLBYTES, sopt_size);
2023 		} else {
2024 			m->m_len = min(MLEN, sopt_size);
2025 		}
2026 		sopt_size -= m->m_len;
2027 		m_prev->m_next = m;
2028 		m_prev = m;
2029 	}
2030 	return 0;
2031 }
2032 
2033 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
2034 int
2035 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
2036 {
2037 	struct mbuf *m0 = m;
2038 
2039 	if (sopt->sopt_val == NULL)
2040 		return 0;
2041 	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2042 		if (sopt->sopt_td != NULL) {
2043 			int error;
2044 
2045 			error = copyin(sopt->sopt_val, mtod(m, char *),
2046 				       m->m_len);
2047 			if (error != 0) {
2048 				m_freem(m0);
2049 				return(error);
2050 			}
2051 		} else
2052 			bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
2053 		sopt->sopt_valsize -= m->m_len;
2054 		sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2055 		m = m->m_next;
2056 	}
2057 	if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
2058 		panic("ip6_sooptmcopyin");
2059 	return 0;
2060 }
2061 
2062 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
2063 int
2064 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
2065 {
2066 	struct mbuf *m0 = m;
2067 	size_t valsize = 0;
2068 
2069 	if (sopt->sopt_val == NULL)
2070 		return 0;
2071 	while (m != NULL && sopt->sopt_valsize >= m->m_len) {
2072 		if (sopt->sopt_td != NULL) {
2073 			int error;
2074 
2075 			error = copyout(mtod(m, char *), sopt->sopt_val,
2076 				       m->m_len);
2077 			if (error != 0) {
2078 				m_freem(m0);
2079 				return(error);
2080 			}
2081 		} else
2082 			bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
2083 	       sopt->sopt_valsize -= m->m_len;
2084 	       sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
2085 	       valsize += m->m_len;
2086 	       m = m->m_next;
2087 	}
2088 	if (m != NULL) {
2089 		/* enough soopt buffer should be given from user-land */
2090 		m_freem(m0);
2091 		return(EINVAL);
2092 	}
2093 	sopt->sopt_valsize = valsize;
2094 	return 0;
2095 }
2096 
2097 void
2098 sohasoutofband(so)
2099 	struct socket *so;
2100 {
2101 	if (so->so_sigio != NULL)
2102 		pgsigio(&so->so_sigio, SIGURG, 0);
2103 	selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
2104 }
2105 
2106 int
2107 sopoll(struct socket *so, int events, struct ucred *active_cred,
2108     struct thread *td)
2109 {
2110 	int revents = 0;
2111 
2112 	SOCKBUF_LOCK(&so->so_snd);
2113 	SOCKBUF_LOCK(&so->so_rcv);
2114 	if (events & (POLLIN | POLLRDNORM))
2115 		if (soreadable(so))
2116 			revents |= events & (POLLIN | POLLRDNORM);
2117 
2118 	if (events & POLLINIGNEOF)
2119 		if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
2120 		    !TAILQ_EMPTY(&so->so_comp) || so->so_error)
2121 			revents |= POLLINIGNEOF;
2122 
2123 	if (events & (POLLOUT | POLLWRNORM))
2124 		if (sowriteable(so))
2125 			revents |= events & (POLLOUT | POLLWRNORM);
2126 
2127 	if (events & (POLLPRI | POLLRDBAND))
2128 		if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
2129 			revents |= events & (POLLPRI | POLLRDBAND);
2130 
2131 	if (revents == 0) {
2132 		if (events &
2133 		    (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
2134 		     POLLRDBAND)) {
2135 			selrecord(td, &so->so_rcv.sb_sel);
2136 			so->so_rcv.sb_flags |= SB_SEL;
2137 		}
2138 
2139 		if (events & (POLLOUT | POLLWRNORM)) {
2140 			selrecord(td, &so->so_snd.sb_sel);
2141 			so->so_snd.sb_flags |= SB_SEL;
2142 		}
2143 	}
2144 
2145 	SOCKBUF_UNLOCK(&so->so_rcv);
2146 	SOCKBUF_UNLOCK(&so->so_snd);
2147 	return (revents);
2148 }
2149 
2150 int
2151 soo_kqfilter(struct file *fp, struct knote *kn)
2152 {
2153 	struct socket *so = kn->kn_fp->f_data;
2154 	struct sockbuf *sb;
2155 
2156 	switch (kn->kn_filter) {
2157 	case EVFILT_READ:
2158 		if (so->so_options & SO_ACCEPTCONN)
2159 			kn->kn_fop = &solisten_filtops;
2160 		else
2161 			kn->kn_fop = &soread_filtops;
2162 		sb = &so->so_rcv;
2163 		break;
2164 	case EVFILT_WRITE:
2165 		kn->kn_fop = &sowrite_filtops;
2166 		sb = &so->so_snd;
2167 		break;
2168 	default:
2169 		return (EINVAL);
2170 	}
2171 
2172 	SOCKBUF_LOCK(sb);
2173 	knlist_add(&sb->sb_sel.si_note, kn, 1);
2174 	sb->sb_flags |= SB_KNOTE;
2175 	SOCKBUF_UNLOCK(sb);
2176 	return (0);
2177 }
2178 
2179 static void
2180 filt_sordetach(struct knote *kn)
2181 {
2182 	struct socket *so = kn->kn_fp->f_data;
2183 
2184 	SOCKBUF_LOCK(&so->so_rcv);
2185 	knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
2186 	if (knlist_empty(&so->so_rcv.sb_sel.si_note))
2187 		so->so_rcv.sb_flags &= ~SB_KNOTE;
2188 	SOCKBUF_UNLOCK(&so->so_rcv);
2189 }
2190 
2191 /*ARGSUSED*/
2192 static int
2193 filt_soread(struct knote *kn, long hint)
2194 {
2195 	struct socket *so;
2196 
2197 	so = kn->kn_fp->f_data;
2198 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
2199 
2200 	kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
2201 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
2202 		kn->kn_flags |= EV_EOF;
2203 		kn->kn_fflags = so->so_error;
2204 		return (1);
2205 	} else if (so->so_error)	/* temporary udp error */
2206 		return (1);
2207 	else if (kn->kn_sfflags & NOTE_LOWAT)
2208 		return (kn->kn_data >= kn->kn_sdata);
2209 	else
2210 		return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
2211 }
2212 
2213 static void
2214 filt_sowdetach(struct knote *kn)
2215 {
2216 	struct socket *so = kn->kn_fp->f_data;
2217 
2218 	SOCKBUF_LOCK(&so->so_snd);
2219 	knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
2220 	if (knlist_empty(&so->so_snd.sb_sel.si_note))
2221 		so->so_snd.sb_flags &= ~SB_KNOTE;
2222 	SOCKBUF_UNLOCK(&so->so_snd);
2223 }
2224 
2225 /*ARGSUSED*/
2226 static int
2227 filt_sowrite(struct knote *kn, long hint)
2228 {
2229 	struct socket *so;
2230 
2231 	so = kn->kn_fp->f_data;
2232 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
2233 	kn->kn_data = sbspace(&so->so_snd);
2234 	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2235 		kn->kn_flags |= EV_EOF;
2236 		kn->kn_fflags = so->so_error;
2237 		return (1);
2238 	} else if (so->so_error)	/* temporary udp error */
2239 		return (1);
2240 	else if (((so->so_state & SS_ISCONNECTED) == 0) &&
2241 	    (so->so_proto->pr_flags & PR_CONNREQUIRED))
2242 		return (0);
2243 	else if (kn->kn_sfflags & NOTE_LOWAT)
2244 		return (kn->kn_data >= kn->kn_sdata);
2245 	else
2246 		return (kn->kn_data >= so->so_snd.sb_lowat);
2247 }
2248 
2249 /*ARGSUSED*/
2250 static int
2251 filt_solisten(struct knote *kn, long hint)
2252 {
2253 	struct socket *so = kn->kn_fp->f_data;
2254 
2255 	kn->kn_data = so->so_qlen;
2256 	return (! TAILQ_EMPTY(&so->so_comp));
2257 }
2258 
2259 int
2260 socheckuid(struct socket *so, uid_t uid)
2261 {
2262 
2263 	if (so == NULL)
2264 		return (EPERM);
2265 	if (so->so_cred->cr_uid == uid)
2266 		return (0);
2267 	return (EPERM);
2268 }
2269 
2270 static int
2271 somaxconn_sysctl(SYSCTL_HANDLER_ARGS)
2272 {
2273 	int error;
2274 	int val;
2275 
2276 	val = somaxconn;
2277 	error = sysctl_handle_int(oidp, &val, sizeof(int), req);
2278 	if (error || !req->newptr )
2279 		return (error);
2280 
2281 	if (val < 1 || val > USHRT_MAX)
2282 		return (EINVAL);
2283 
2284 	somaxconn = val;
2285 	return (0);
2286 }
2287