xref: /freebsd/sys/kern/uipc_socket.c (revision 05c7a37afb48ddd5ee1bd921a5d46fe59cc70b15)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)uipc_socket.c	8.3 (Berkeley) 4/15/94
34  * $Id: uipc_socket.c,v 1.15 1996/02/13 18:16:20 wollman Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/file.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/domain.h>
45 #include <sys/kernel.h>
46 #include <sys/protosw.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/resourcevar.h>
50 #include <sys/signalvar.h>
51 #include <sys/sysctl.h>
52 
53 static int somaxconn = SOMAXCONN;
54 SYSCTL_INT(_kern, KERN_SOMAXCONN, somaxconn, CTLFLAG_RW, &somaxconn, 0, "");
55 
56 /*
57  * Socket operation routines.
58  * These routines are called by the routines in
59  * sys_socket.c or from a system process, and
60  * implement the semantics of socket operations by
61  * switching out to the protocol specific routines.
62  */
63 /*ARGSUSED*/
64 int
65 socreate(dom, aso, type, proto, p)
66 	int dom;
67 	struct socket **aso;
68 	register int type;
69 	int proto;
70 	struct proc *p;
71 {
72 	register struct protosw *prp;
73 	register struct socket *so;
74 	register int error;
75 
76 	if (proto)
77 		prp = pffindproto(dom, proto, type);
78 	else
79 		prp = pffindtype(dom, type);
80 	if (prp == 0 || prp->pr_usrreq == 0)
81 		return (EPROTONOSUPPORT);
82 	if (prp->pr_type != type)
83 		return (EPROTOTYPE);
84 	MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT);
85 	bzero((caddr_t)so, sizeof(*so));
86 	TAILQ_INIT(&so->so_incomp);
87 	TAILQ_INIT(&so->so_comp);
88 	so->so_type = type;
89 	if (p->p_ucred->cr_uid == 0)
90 		so->so_state = SS_PRIV;
91 	so->so_proto = prp;
92 	error =
93 	    (*prp->pr_usrreq)(so, PRU_ATTACH,
94 		(struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
95 	if (error) {
96 		so->so_state |= SS_NOFDREF;
97 		sofree(so);
98 		return (error);
99 	}
100 	*aso = so;
101 	return (0);
102 }
103 
104 int
105 sobind(so, nam)
106 	struct socket *so;
107 	struct mbuf *nam;
108 {
109 	int s = splnet();
110 	int error;
111 
112 	error =
113 	    (*so->so_proto->pr_usrreq)(so, PRU_BIND,
114 		(struct mbuf *)0, nam, (struct mbuf *)0);
115 	splx(s);
116 	return (error);
117 }
118 
119 int
120 solisten(so, backlog)
121 	register struct socket *so;
122 	int backlog;
123 {
124 	int s = splnet(), error;
125 
126 	error =
127 	    (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
128 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
129 	if (error) {
130 		splx(s);
131 		return (error);
132 	}
133 	if (so->so_comp.tqh_first == NULL)
134 		so->so_options |= SO_ACCEPTCONN;
135 	if (backlog < 0 || backlog > somaxconn)
136 		backlog = somaxconn;
137 	so->so_qlimit = backlog;
138 	splx(s);
139 	return (0);
140 }
141 
142 void
143 sofree(so)
144 	register struct socket *so;
145 {
146 	struct socket *head = so->so_head;
147 
148 	if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
149 		return;
150 	if (head != NULL) {
151 		if (so->so_state & SS_INCOMP) {
152 			TAILQ_REMOVE(&head->so_incomp, so, so_list);
153 		} else if (so->so_state & SS_COMP) {
154 			TAILQ_REMOVE(&head->so_comp, so, so_list);
155 		} else {
156 			panic("sofree: not queued");
157 		}
158 		head->so_qlen--;
159 		so->so_state &= ~(SS_INCOMP|SS_COMP);
160 		so->so_head = NULL;
161 	}
162 	sbrelease(&so->so_snd);
163 	sorflush(so);
164 	FREE(so, M_SOCKET);
165 }
166 
167 /*
168  * Close a socket on last file table reference removal.
169  * Initiate disconnect if connected.
170  * Free socket when disconnect complete.
171  */
172 int
173 soclose(so)
174 	register struct socket *so;
175 {
176 	int s = splnet();		/* conservative */
177 	int error = 0;
178 
179 	if (so->so_options & SO_ACCEPTCONN) {
180 		struct socket *sp;
181 
182 		for (sp = so->so_incomp.tqh_first; sp != NULL; sp = sp->so_list.tqe_next)
183 			(void) soabort(sp);
184 		for (sp = so->so_comp.tqh_first; sp != NULL; sp = sp->so_list.tqe_next)
185 			(void) soabort(sp);
186 	}
187 	if (so->so_pcb == 0)
188 		goto discard;
189 	if (so->so_state & SS_ISCONNECTED) {
190 		if ((so->so_state & SS_ISDISCONNECTING) == 0) {
191 			error = sodisconnect(so);
192 			if (error)
193 				goto drop;
194 		}
195 		if (so->so_options & SO_LINGER) {
196 			if ((so->so_state & SS_ISDISCONNECTING) &&
197 			    (so->so_state & SS_NBIO))
198 				goto drop;
199 			while (so->so_state & SS_ISCONNECTED) {
200 				error = tsleep((caddr_t)&so->so_timeo,
201 				    PSOCK | PCATCH, "soclos", so->so_linger);
202 				if (error)
203 					break;
204 			}
205 		}
206 	}
207 drop:
208 	if (so->so_pcb) {
209 		int error2 =
210 		    (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
211 			(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
212 		if (error == 0)
213 			error = error2;
214 	}
215 discard:
216 	if (so->so_state & SS_NOFDREF)
217 		panic("soclose: NOFDREF");
218 	so->so_state |= SS_NOFDREF;
219 	sofree(so);
220 	splx(s);
221 	return (error);
222 }
223 
224 /*
225  * Must be called at splnet...
226  */
227 int
228 soabort(so)
229 	struct socket *so;
230 {
231 
232 	return (
233 	    (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
234 		(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
235 }
236 
237 int
238 soaccept(so, nam)
239 	register struct socket *so;
240 	struct mbuf *nam;
241 {
242 	int s = splnet();
243 	int error;
244 
245 	if ((so->so_state & SS_NOFDREF) == 0)
246 		panic("soaccept: !NOFDREF");
247 	so->so_state &= ~SS_NOFDREF;
248 	error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
249 	    (struct mbuf *)0, nam, (struct mbuf *)0);
250 	splx(s);
251 	return (error);
252 }
253 
254 int
255 soconnect(so, nam)
256 	register struct socket *so;
257 	struct mbuf *nam;
258 {
259 	int s;
260 	int error;
261 
262 	if (so->so_options & SO_ACCEPTCONN)
263 		return (EOPNOTSUPP);
264 	s = splnet();
265 	/*
266 	 * If protocol is connection-based, can only connect once.
267 	 * Otherwise, if connected, try to disconnect first.
268 	 * This allows user to disconnect by connecting to, e.g.,
269 	 * a null address.
270 	 */
271 	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
272 	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
273 	    (error = sodisconnect(so))))
274 		error = EISCONN;
275 	else
276 		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
277 		    (struct mbuf *)0, nam, (struct mbuf *)0);
278 	splx(s);
279 	return (error);
280 }
281 
282 int
283 soconnect2(so1, so2)
284 	register struct socket *so1;
285 	struct socket *so2;
286 {
287 	int s = splnet();
288 	int error;
289 
290 	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
291 	    (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
292 	splx(s);
293 	return (error);
294 }
295 
296 int
297 sodisconnect(so)
298 	register struct socket *so;
299 {
300 	int s = splnet();
301 	int error;
302 
303 	if ((so->so_state & SS_ISCONNECTED) == 0) {
304 		error = ENOTCONN;
305 		goto bad;
306 	}
307 	if (so->so_state & SS_ISDISCONNECTING) {
308 		error = EALREADY;
309 		goto bad;
310 	}
311 	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
312 	    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
313 bad:
314 	splx(s);
315 	return (error);
316 }
317 
318 #define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
319 /*
320  * Send on a socket.
321  * If send must go all at once and message is larger than
322  * send buffering, then hard error.
323  * Lock against other senders.
324  * If must go all at once and not enough room now, then
325  * inform user that this would block and do nothing.
326  * Otherwise, if nonblocking, send as much as possible.
327  * The data to be sent is described by "uio" if nonzero,
328  * otherwise by the mbuf chain "top" (which must be null
329  * if uio is not).  Data provided in mbuf chain must be small
330  * enough to send all at once.
331  *
332  * Returns nonzero on error, timeout or signal; callers
333  * must check for short counts if EINTR/ERESTART are returned.
334  * Data and control buffers are freed on return.
335  */
336 int
337 sosend(so, addr, uio, top, control, flags)
338 	register struct socket *so;
339 	struct mbuf *addr;
340 	struct uio *uio;
341 	struct mbuf *top;
342 	struct mbuf *control;
343 	int flags;
344 {
345 	struct proc *p = curproc;		/* XXX */
346 	struct mbuf **mp;
347 	register struct mbuf *m;
348 	register long space, len, resid;
349 	int clen = 0, error, s, dontroute, mlen;
350 	int atomic = sosendallatonce(so) || top;
351 
352 	if (uio)
353 		resid = uio->uio_resid;
354 	else
355 		resid = top->m_pkthdr.len;
356 	/*
357 	 * In theory resid should be unsigned.
358 	 * However, space must be signed, as it might be less than 0
359 	 * if we over-committed, and we must use a signed comparison
360 	 * of space and resid.  On the other hand, a negative resid
361 	 * causes us to loop sending 0-length segments to the protocol.
362 	 */
363 	if (resid < 0)
364 		return (EINVAL);
365 	dontroute =
366 	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
367 	    (so->so_proto->pr_flags & PR_ATOMIC);
368 	p->p_stats->p_ru.ru_msgsnd++;
369 	if (control)
370 		clen = control->m_len;
371 #define	snderr(errno)	{ error = errno; splx(s); goto release; }
372 
373 restart:
374 	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
375 	if (error)
376 		goto out;
377 	do {
378 		s = splnet();
379 		if (so->so_state & SS_CANTSENDMORE)
380 			snderr(EPIPE);
381 		if (so->so_error)
382 			snderr(so->so_error);
383 		if ((so->so_state & SS_ISCONNECTED) == 0) {
384 			/*
385 			 * `sendto' and `sendmsg' is allowed on a connection-
386 			 * based socket if it supports implied connect.
387 			 * Return ENOTCONN if not connected and no address is
388 			 * supplied.
389 			 */
390 			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
391 			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
392 				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
393 				    !(resid == 0 && clen != 0))
394 					snderr(ENOTCONN);
395 			} else if (addr == 0)
396 			    snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
397 				   ENOTCONN : EDESTADDRREQ);
398 		}
399 		space = sbspace(&so->so_snd);
400 		if (flags & MSG_OOB)
401 			space += 1024;
402 		if ((atomic && resid > so->so_snd.sb_hiwat) ||
403 		    clen > so->so_snd.sb_hiwat)
404 			snderr(EMSGSIZE);
405 		if (space < resid + clen && uio &&
406 		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {
407 			if (so->so_state & SS_NBIO)
408 				snderr(EWOULDBLOCK);
409 			sbunlock(&so->so_snd);
410 			error = sbwait(&so->so_snd);
411 			splx(s);
412 			if (error)
413 				goto out;
414 			goto restart;
415 		}
416 		splx(s);
417 		mp = &top;
418 		space -= clen;
419 		do {
420 		    if (uio == NULL) {
421 			/*
422 			 * Data is prepackaged in "top".
423 			 */
424 			resid = 0;
425 			if (flags & MSG_EOR)
426 				top->m_flags |= M_EOR;
427 		    } else do {
428 			if (top == 0) {
429 				MGETHDR(m, M_WAIT, MT_DATA);
430 				mlen = MHLEN;
431 				m->m_pkthdr.len = 0;
432 				m->m_pkthdr.rcvif = (struct ifnet *)0;
433 			} else {
434 				MGET(m, M_WAIT, MT_DATA);
435 				mlen = MLEN;
436 			}
437 			if (resid >= MINCLSIZE) {
438 				MCLGET(m, M_WAIT);
439 				if ((m->m_flags & M_EXT) == 0)
440 					goto nopages;
441 				mlen = MCLBYTES;
442 				len = min(min(mlen, resid), space);
443 			} else {
444 nopages:
445 				len = min(min(mlen, resid), space);
446 				/*
447 				 * For datagram protocols, leave room
448 				 * for protocol headers in first mbuf.
449 				 */
450 				if (atomic && top == 0 && len < mlen)
451 					MH_ALIGN(m, len);
452 			}
453 			space -= len;
454 			error = uiomove(mtod(m, caddr_t), (int)len, uio);
455 			resid = uio->uio_resid;
456 			m->m_len = len;
457 			*mp = m;
458 			top->m_pkthdr.len += len;
459 			if (error)
460 				goto release;
461 			mp = &m->m_next;
462 			if (resid <= 0) {
463 				if (flags & MSG_EOR)
464 					top->m_flags |= M_EOR;
465 				break;
466 			}
467 		    } while (space > 0 && atomic);
468 		    if (dontroute)
469 			    so->so_options |= SO_DONTROUTE;
470 		    s = splnet();				/* XXX */
471 		    error = (*so->so_proto->pr_usrreq)(so,
472 			(flags & MSG_OOB) ? PRU_SENDOOB :
473 			/*
474 			 * If the user set MSG_EOF, the protocol
475 			 * understands this flag and nothing left to
476 			 * send then use PRU_SEND_EOF instead of PRU_SEND.
477 			 */
478 			((flags & MSG_EOF) &&
479 			 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
480 			 (resid <= 0)) ?
481 				PRU_SEND_EOF : PRU_SEND,
482 			top, addr, control);
483 		    splx(s);
484 		    if (dontroute)
485 			    so->so_options &= ~SO_DONTROUTE;
486 		    clen = 0;
487 		    control = 0;
488 		    top = 0;
489 		    mp = &top;
490 		    if (error)
491 			goto release;
492 		} while (resid && space > 0);
493 	} while (resid);
494 
495 release:
496 	sbunlock(&so->so_snd);
497 out:
498 	if (top)
499 		m_freem(top);
500 	if (control)
501 		m_freem(control);
502 	return (error);
503 }
504 
505 /*
506  * Implement receive operations on a socket.
507  * We depend on the way that records are added to the sockbuf
508  * by sbappend*.  In particular, each record (mbufs linked through m_next)
509  * must begin with an address if the protocol so specifies,
510  * followed by an optional mbuf or mbufs containing ancillary data,
511  * and then zero or more mbufs of data.
512  * In order to avoid blocking network interrupts for the entire time here,
513  * we splx() while doing the actual copy to user space.
514  * Although the sockbuf is locked, new data may still be appended,
515  * and thus we must maintain consistency of the sockbuf during that time.
516  *
517  * The caller may receive the data as a single mbuf chain by supplying
518  * an mbuf **mp0 for use in returning the chain.  The uio is then used
519  * only for the count in uio_resid.
520  */
521 int
522 soreceive(so, paddr, uio, mp0, controlp, flagsp)
523 	register struct socket *so;
524 	struct mbuf **paddr;
525 	struct uio *uio;
526 	struct mbuf **mp0;
527 	struct mbuf **controlp;
528 	int *flagsp;
529 {
530 	register struct mbuf *m, **mp;
531 	register int flags, len, error, s, offset;
532 	struct protosw *pr = so->so_proto;
533 	struct mbuf *nextrecord;
534 	int moff, type = 0;
535 	int orig_resid = uio->uio_resid;
536 
537 	mp = mp0;
538 	if (paddr)
539 		*paddr = 0;
540 	if (controlp)
541 		*controlp = 0;
542 	if (flagsp)
543 		flags = *flagsp &~ MSG_EOR;
544 	else
545 		flags = 0;
546 	if (flags & MSG_OOB) {
547 		m = m_get(M_WAIT, MT_DATA);
548 		error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
549 		    m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
550 		if (error)
551 			goto bad;
552 		do {
553 			error = uiomove(mtod(m, caddr_t),
554 			    (int) min(uio->uio_resid, m->m_len), uio);
555 			m = m_free(m);
556 		} while (uio->uio_resid && error == 0 && m);
557 bad:
558 		if (m)
559 			m_freem(m);
560 		return (error);
561 	}
562 	if (mp)
563 		*mp = (struct mbuf *)0;
564 	if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
565 		(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
566 		    (struct mbuf *)0, (struct mbuf *)0);
567 
568 restart:
569 	error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
570 	if (error)
571 		return (error);
572 	s = splnet();
573 
574 	m = so->so_rcv.sb_mb;
575 	/*
576 	 * If we have less data than requested, block awaiting more
577 	 * (subject to any timeout) if:
578 	 *   1. the current count is less than the low water mark, or
579 	 *   2. MSG_WAITALL is set, and it is possible to do the entire
580 	 *	receive operation at once if we block (resid <= hiwat).
581 	 *   3. MSG_DONTWAIT is not set
582 	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
583 	 * we have to do the receive in sections, and thus risk returning
584 	 * a short count if a timeout or signal occurs after we start.
585 	 */
586 	if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
587 	    so->so_rcv.sb_cc < uio->uio_resid) &&
588 	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
589 	    ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
590 	    m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
591 #ifdef DIAGNOSTIC
592 		if (m == 0 && so->so_rcv.sb_cc)
593 			panic("receive 1");
594 #endif
595 		if (so->so_error) {
596 			if (m)
597 				goto dontblock;
598 			error = so->so_error;
599 			if ((flags & MSG_PEEK) == 0)
600 				so->so_error = 0;
601 			goto release;
602 		}
603 		if (so->so_state & SS_CANTRCVMORE) {
604 			if (m)
605 				goto dontblock;
606 			else
607 				goto release;
608 		}
609 		for (; m; m = m->m_next)
610 			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
611 				m = so->so_rcv.sb_mb;
612 				goto dontblock;
613 			}
614 		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
615 		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
616 			error = ENOTCONN;
617 			goto release;
618 		}
619 		if (uio->uio_resid == 0)
620 			goto release;
621 		if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
622 			error = EWOULDBLOCK;
623 			goto release;
624 		}
625 		sbunlock(&so->so_rcv);
626 		error = sbwait(&so->so_rcv);
627 		splx(s);
628 		if (error)
629 			return (error);
630 		goto restart;
631 	}
632 dontblock:
633 	if (uio->uio_procp)
634 		uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
635 	nextrecord = m->m_nextpkt;
636 	if (pr->pr_flags & PR_ADDR) {
637 #ifdef DIAGNOSTIC
638 		if (m->m_type != MT_SONAME)
639 			panic("receive 1a");
640 #endif
641 		orig_resid = 0;
642 		if (flags & MSG_PEEK) {
643 			if (paddr)
644 				*paddr = m_copy(m, 0, m->m_len);
645 			m = m->m_next;
646 		} else {
647 			sbfree(&so->so_rcv, m);
648 			if (paddr) {
649 				*paddr = m;
650 				so->so_rcv.sb_mb = m->m_next;
651 				m->m_next = 0;
652 				m = so->so_rcv.sb_mb;
653 			} else {
654 				MFREE(m, so->so_rcv.sb_mb);
655 				m = so->so_rcv.sb_mb;
656 			}
657 		}
658 	}
659 	while (m && m->m_type == MT_CONTROL && error == 0) {
660 		if (flags & MSG_PEEK) {
661 			if (controlp)
662 				*controlp = m_copy(m, 0, m->m_len);
663 			m = m->m_next;
664 		} else {
665 			sbfree(&so->so_rcv, m);
666 			if (controlp) {
667 				if (pr->pr_domain->dom_externalize &&
668 				    mtod(m, struct cmsghdr *)->cmsg_type ==
669 				    SCM_RIGHTS)
670 				   error = (*pr->pr_domain->dom_externalize)(m);
671 				*controlp = m;
672 				so->so_rcv.sb_mb = m->m_next;
673 				m->m_next = 0;
674 				m = so->so_rcv.sb_mb;
675 			} else {
676 				MFREE(m, so->so_rcv.sb_mb);
677 				m = so->so_rcv.sb_mb;
678 			}
679 		}
680 		if (controlp) {
681 			orig_resid = 0;
682 			controlp = &(*controlp)->m_next;
683 		}
684 	}
685 	if (m) {
686 		if ((flags & MSG_PEEK) == 0)
687 			m->m_nextpkt = nextrecord;
688 		type = m->m_type;
689 		if (type == MT_OOBDATA)
690 			flags |= MSG_OOB;
691 	}
692 	moff = 0;
693 	offset = 0;
694 	while (m && uio->uio_resid > 0 && error == 0) {
695 		if (m->m_type == MT_OOBDATA) {
696 			if (type != MT_OOBDATA)
697 				break;
698 		} else if (type == MT_OOBDATA)
699 			break;
700 #ifdef DIAGNOSTIC
701 		else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
702 			panic("receive 3");
703 #endif
704 		so->so_state &= ~SS_RCVATMARK;
705 		len = uio->uio_resid;
706 		if (so->so_oobmark && len > so->so_oobmark - offset)
707 			len = so->so_oobmark - offset;
708 		if (len > m->m_len - moff)
709 			len = m->m_len - moff;
710 		/*
711 		 * If mp is set, just pass back the mbufs.
712 		 * Otherwise copy them out via the uio, then free.
713 		 * Sockbuf must be consistent here (points to current mbuf,
714 		 * it points to next record) when we drop priority;
715 		 * we must note any additions to the sockbuf when we
716 		 * block interrupts again.
717 		 */
718 		if (mp == 0) {
719 			splx(s);
720 			error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
721 			s = splnet();
722 		} else
723 			uio->uio_resid -= len;
724 		if (len == m->m_len - moff) {
725 			if (m->m_flags & M_EOR)
726 				flags |= MSG_EOR;
727 			if (flags & MSG_PEEK) {
728 				m = m->m_next;
729 				moff = 0;
730 			} else {
731 				nextrecord = m->m_nextpkt;
732 				sbfree(&so->so_rcv, m);
733 				if (mp) {
734 					*mp = m;
735 					mp = &m->m_next;
736 					so->so_rcv.sb_mb = m = m->m_next;
737 					*mp = (struct mbuf *)0;
738 				} else {
739 					MFREE(m, so->so_rcv.sb_mb);
740 					m = so->so_rcv.sb_mb;
741 				}
742 				if (m)
743 					m->m_nextpkt = nextrecord;
744 			}
745 		} else {
746 			if (flags & MSG_PEEK)
747 				moff += len;
748 			else {
749 				if (mp)
750 					*mp = m_copym(m, 0, len, M_WAIT);
751 				m->m_data += len;
752 				m->m_len -= len;
753 				so->so_rcv.sb_cc -= len;
754 			}
755 		}
756 		if (so->so_oobmark) {
757 			if ((flags & MSG_PEEK) == 0) {
758 				so->so_oobmark -= len;
759 				if (so->so_oobmark == 0) {
760 					so->so_state |= SS_RCVATMARK;
761 					break;
762 				}
763 			} else {
764 				offset += len;
765 				if (offset == so->so_oobmark)
766 					break;
767 			}
768 		}
769 		if (flags & MSG_EOR)
770 			break;
771 		/*
772 		 * If the MSG_WAITALL flag is set (for non-atomic socket),
773 		 * we must not quit until "uio->uio_resid == 0" or an error
774 		 * termination.  If a signal/timeout occurs, return
775 		 * with a short count but without error.
776 		 * Keep sockbuf locked against other readers.
777 		 */
778 		while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
779 		    !sosendallatonce(so) && !nextrecord) {
780 			if (so->so_error || so->so_state & SS_CANTRCVMORE)
781 				break;
782 			error = sbwait(&so->so_rcv);
783 			if (error) {
784 				sbunlock(&so->so_rcv);
785 				splx(s);
786 				return (0);
787 			}
788 			m = so->so_rcv.sb_mb;
789 			if (m)
790 				nextrecord = m->m_nextpkt;
791 		}
792 	}
793 
794 	if (m && pr->pr_flags & PR_ATOMIC) {
795 		flags |= MSG_TRUNC;
796 		if ((flags & MSG_PEEK) == 0)
797 			(void) sbdroprecord(&so->so_rcv);
798 	}
799 	if ((flags & MSG_PEEK) == 0) {
800 		if (m == 0)
801 			so->so_rcv.sb_mb = nextrecord;
802 		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
803 			(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
804 			    (struct mbuf *)flags, (struct mbuf *)0);
805 	}
806 	if (orig_resid == uio->uio_resid && orig_resid &&
807 	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
808 		sbunlock(&so->so_rcv);
809 		splx(s);
810 		goto restart;
811 	}
812 
813 	if (flagsp)
814 		*flagsp |= flags;
815 release:
816 	sbunlock(&so->so_rcv);
817 	splx(s);
818 	return (error);
819 }
820 
821 int
822 soshutdown(so, how)
823 	register struct socket *so;
824 	register int how;
825 {
826 	register struct protosw *pr = so->so_proto;
827 
828 	how++;
829 	if (how & FREAD)
830 		sorflush(so);
831 	if (how & FWRITE)
832 		return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
833 		    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
834 	return (0);
835 }
836 
837 void
838 sorflush(so)
839 	register struct socket *so;
840 {
841 	register struct sockbuf *sb = &so->so_rcv;
842 	register struct protosw *pr = so->so_proto;
843 	register int s;
844 	struct sockbuf asb;
845 
846 	sb->sb_flags |= SB_NOINTR;
847 	(void) sblock(sb, M_WAITOK);
848 	s = splimp();
849 	socantrcvmore(so);
850 	sbunlock(sb);
851 	asb = *sb;
852 	bzero((caddr_t)sb, sizeof (*sb));
853 	splx(s);
854 	if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
855 		(*pr->pr_domain->dom_dispose)(asb.sb_mb);
856 	sbrelease(&asb);
857 }
858 
859 int
860 sosetopt(so, level, optname, m0)
861 	register struct socket *so;
862 	int level, optname;
863 	struct mbuf *m0;
864 {
865 	int error = 0;
866 	register struct mbuf *m = m0;
867 
868 	if (level != SOL_SOCKET) {
869 		if (so->so_proto && so->so_proto->pr_ctloutput)
870 			return ((*so->so_proto->pr_ctloutput)
871 				  (PRCO_SETOPT, so, level, optname, &m0));
872 		error = ENOPROTOOPT;
873 	} else {
874 		switch (optname) {
875 
876 		case SO_LINGER:
877 			if (m == NULL || m->m_len != sizeof (struct linger)) {
878 				error = EINVAL;
879 				goto bad;
880 			}
881 			so->so_linger = mtod(m, struct linger *)->l_linger;
882 			/* fall thru... */
883 
884 		case SO_DEBUG:
885 		case SO_KEEPALIVE:
886 		case SO_DONTROUTE:
887 		case SO_USELOOPBACK:
888 		case SO_BROADCAST:
889 		case SO_REUSEADDR:
890 		case SO_REUSEPORT:
891 		case SO_OOBINLINE:
892 			if (m == NULL || m->m_len < sizeof (int)) {
893 				error = EINVAL;
894 				goto bad;
895 			}
896 			if (*mtod(m, int *))
897 				so->so_options |= optname;
898 			else
899 				so->so_options &= ~optname;
900 			break;
901 
902 		case SO_SNDBUF:
903 		case SO_RCVBUF:
904 		case SO_SNDLOWAT:
905 		case SO_RCVLOWAT:
906 			if (m == NULL || m->m_len < sizeof (int)) {
907 				error = EINVAL;
908 				goto bad;
909 			}
910 			switch (optname) {
911 
912 			case SO_SNDBUF:
913 			case SO_RCVBUF:
914 				if (sbreserve(optname == SO_SNDBUF ?
915 				    &so->so_snd : &so->so_rcv,
916 				    (u_long) *mtod(m, int *)) == 0) {
917 					error = ENOBUFS;
918 					goto bad;
919 				}
920 				break;
921 
922 			case SO_SNDLOWAT:
923 				so->so_snd.sb_lowat = *mtod(m, int *);
924 				break;
925 			case SO_RCVLOWAT:
926 				so->so_rcv.sb_lowat = *mtod(m, int *);
927 				break;
928 			}
929 			break;
930 
931 		case SO_SNDTIMEO:
932 		case SO_RCVTIMEO:
933 		    {
934 			struct timeval *tv;
935 			short val;
936 
937 			if (m == NULL || m->m_len < sizeof (*tv)) {
938 				error = EINVAL;
939 				goto bad;
940 			}
941 			tv = mtod(m, struct timeval *);
942 			if (tv->tv_sec > SHRT_MAX / hz - hz) {
943 				error = EDOM;
944 				goto bad;
945 			}
946 			val = tv->tv_sec * hz + tv->tv_usec / tick;
947 
948 			switch (optname) {
949 
950 			case SO_SNDTIMEO:
951 				so->so_snd.sb_timeo = val;
952 				break;
953 			case SO_RCVTIMEO:
954 				so->so_rcv.sb_timeo = val;
955 				break;
956 			}
957 			break;
958 		    }
959 
960 		case SO_PRIVSTATE:
961 			/* we don't care what the parameter is... */
962 			so->so_state &= ~SS_PRIV;
963 			break;
964 
965 		default:
966 			error = ENOPROTOOPT;
967 			break;
968 		}
969 		if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
970 			(void) ((*so->so_proto->pr_ctloutput)
971 				  (PRCO_SETOPT, so, level, optname, &m0));
972 			m = NULL;	/* freed by protocol */
973 		}
974 	}
975 bad:
976 	if (m)
977 		(void) m_free(m);
978 	return (error);
979 }
980 
981 int
982 sogetopt(so, level, optname, mp)
983 	register struct socket *so;
984 	int level, optname;
985 	struct mbuf **mp;
986 {
987 	register struct mbuf *m;
988 
989 	if (level != SOL_SOCKET) {
990 		if (so->so_proto && so->so_proto->pr_ctloutput) {
991 			return ((*so->so_proto->pr_ctloutput)
992 				  (PRCO_GETOPT, so, level, optname, mp));
993 		} else
994 			return (ENOPROTOOPT);
995 	} else {
996 		m = m_get(M_WAIT, MT_SOOPTS);
997 		m->m_len = sizeof (int);
998 
999 		switch (optname) {
1000 
1001 		case SO_LINGER:
1002 			m->m_len = sizeof (struct linger);
1003 			mtod(m, struct linger *)->l_onoff =
1004 				so->so_options & SO_LINGER;
1005 			mtod(m, struct linger *)->l_linger = so->so_linger;
1006 			break;
1007 
1008 		case SO_USELOOPBACK:
1009 		case SO_DONTROUTE:
1010 		case SO_DEBUG:
1011 		case SO_KEEPALIVE:
1012 		case SO_REUSEADDR:
1013 		case SO_REUSEPORT:
1014 		case SO_BROADCAST:
1015 		case SO_OOBINLINE:
1016 			*mtod(m, int *) = so->so_options & optname;
1017 			break;
1018 
1019 		case SO_PRIVSTATE:
1020 			*mtod(m, int *) = so->so_state & SS_PRIV;
1021 			break;
1022 
1023 		case SO_TYPE:
1024 			*mtod(m, int *) = so->so_type;
1025 			break;
1026 
1027 		case SO_ERROR:
1028 			*mtod(m, int *) = so->so_error;
1029 			so->so_error = 0;
1030 			break;
1031 
1032 		case SO_SNDBUF:
1033 			*mtod(m, int *) = so->so_snd.sb_hiwat;
1034 			break;
1035 
1036 		case SO_RCVBUF:
1037 			*mtod(m, int *) = so->so_rcv.sb_hiwat;
1038 			break;
1039 
1040 		case SO_SNDLOWAT:
1041 			*mtod(m, int *) = so->so_snd.sb_lowat;
1042 			break;
1043 
1044 		case SO_RCVLOWAT:
1045 			*mtod(m, int *) = so->so_rcv.sb_lowat;
1046 			break;
1047 
1048 		case SO_SNDTIMEO:
1049 		case SO_RCVTIMEO:
1050 		    {
1051 			int val = (optname == SO_SNDTIMEO ?
1052 			     so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1053 
1054 			m->m_len = sizeof(struct timeval);
1055 			mtod(m, struct timeval *)->tv_sec = val / hz;
1056 			mtod(m, struct timeval *)->tv_usec =
1057 			    (val % hz) * tick;
1058 			break;
1059 		    }
1060 
1061 		default:
1062 			(void)m_free(m);
1063 			return (ENOPROTOOPT);
1064 		}
1065 		*mp = m;
1066 		return (0);
1067 	}
1068 }
1069 
1070 void
1071 sohasoutofband(so)
1072 	register struct socket *so;
1073 {
1074 	struct proc *p;
1075 
1076 	if (so->so_pgid < 0)
1077 		gsignal(-so->so_pgid, SIGURG);
1078 	else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
1079 		psignal(p, SIGURG);
1080 	selwakeup(&so->so_rcv.sb_sel);
1081 }
1082