xref: /freebsd/sys/kern/uipc_sockbuf.c (revision 35a04710d7286aa9538917fd7f8e417dbee95b82)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_param.h"
36 
37 #include <sys/param.h>
38 #include <sys/aio.h> /* for aio_swake proto */
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mbuf.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/protosw.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/sx.h>
50 #include <sys/sysctl.h>
51 
52 /*
53  * Function pointer set by the AIO routines so that the socket buffer code
54  * can call back into the AIO module if it is loaded.
55  */
56 void	(*aio_swake)(struct socket *, struct sockbuf *);
57 
58 /*
59  * Primitive routines for operating on socket buffers
60  */
61 
62 u_long	sb_max = SB_MAX;
63 u_long sb_max_adj =
64        SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
65 
66 static	u_long sb_efficiency = 8;	/* parameter for sbreserve() */
67 
68 static void	sbdrop_internal(struct sockbuf *sb, int len);
69 static void	sbflush_internal(struct sockbuf *sb);
70 static void	sbrelease_internal(struct sockbuf *sb, struct socket *so);
71 
72 /*
73  * Socantsendmore indicates that no more data will be sent on the socket; it
74  * would normally be applied to a socket when the user informs the system
75  * that no more data is to be sent, by the protocol code (in case
76  * PRU_SHUTDOWN).  Socantrcvmore indicates that no more data will be
77  * received, and will normally be applied to the socket by a protocol when it
78  * detects that the peer will send no more data.  Data queued for reading in
79  * the socket may yet be read.
80  */
81 void
82 socantsendmore_locked(struct socket *so)
83 {
84 
85 	SOCKBUF_LOCK_ASSERT(&so->so_snd);
86 
87 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
88 	sowwakeup_locked(so);
89 	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
90 }
91 
92 void
93 socantsendmore(struct socket *so)
94 {
95 
96 	SOCKBUF_LOCK(&so->so_snd);
97 	socantsendmore_locked(so);
98 	mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
99 }
100 
101 void
102 socantrcvmore_locked(struct socket *so)
103 {
104 
105 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
106 
107 	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
108 	sorwakeup_locked(so);
109 	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
110 }
111 
112 void
113 socantrcvmore(struct socket *so)
114 {
115 
116 	SOCKBUF_LOCK(&so->so_rcv);
117 	socantrcvmore_locked(so);
118 	mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
119 }
120 
121 /*
122  * Wait for data to arrive at/drain from a socket buffer.
123  */
124 int
125 sbwait(struct sockbuf *sb)
126 {
127 
128 	SOCKBUF_LOCK_ASSERT(sb);
129 
130 	sb->sb_flags |= SB_WAIT;
131 	return (msleep(&sb->sb_cc, &sb->sb_mtx,
132 	    (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
133 	    sb->sb_timeo));
134 }
135 
136 int
137 sblock(struct sockbuf *sb, int flags)
138 {
139 
140 	if (flags == M_WAITOK) {
141 		if (sb->sb_flags & SB_NOINTR) {
142 			sx_xlock(&sb->sb_sx);
143 			return (0);
144 		}
145 		return (sx_xlock_sig(&sb->sb_sx));
146 	} else {
147 		if (sx_try_xlock(&sb->sb_sx) == 0)
148 			return (EWOULDBLOCK);
149 		return (0);
150 	}
151 }
152 
153 void
154 sbunlock(struct sockbuf *sb)
155 {
156 
157 	sx_xunlock(&sb->sb_sx);
158 }
159 
160 /*
161  * Wakeup processes waiting on a socket buffer.  Do asynchronous notification
162  * via SIGIO if the socket has the SS_ASYNC flag set.
163  *
164  * Called with the socket buffer lock held; will release the lock by the end
165  * of the function.  This allows the caller to acquire the socket buffer lock
166  * while testing for the need for various sorts of wakeup and hold it through
167  * to the point where it's no longer required.  We currently hold the lock
168  * through calls out to other subsystems (with the exception of kqueue), and
169  * then release it to avoid lock order issues.  It's not clear that's
170  * correct.
171  */
172 void
173 sowakeup(struct socket *so, struct sockbuf *sb)
174 {
175 
176 	SOCKBUF_LOCK_ASSERT(sb);
177 
178 	selwakeuppri(&sb->sb_sel, PSOCK);
179 	sb->sb_flags &= ~SB_SEL;
180 	if (sb->sb_flags & SB_WAIT) {
181 		sb->sb_flags &= ~SB_WAIT;
182 		wakeup(&sb->sb_cc);
183 	}
184 	KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
185 	SOCKBUF_UNLOCK(sb);
186 	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
187 		pgsigio(&so->so_sigio, SIGIO, 0);
188 	if (sb->sb_flags & SB_UPCALL)
189 		(*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT);
190 	if (sb->sb_flags & SB_AIO)
191 		aio_swake(so, sb);
192 	mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
193 }
194 
195 /*
196  * Socket buffer (struct sockbuf) utility routines.
197  *
198  * Each socket contains two socket buffers: one for sending data and one for
199  * receiving data.  Each buffer contains a queue of mbufs, information about
200  * the number of mbufs and amount of data in the queue, and other fields
201  * allowing select() statements and notification on data availability to be
202  * implemented.
203  *
204  * Data stored in a socket buffer is maintained as a list of records.  Each
205  * record is a list of mbufs chained together with the m_next field.  Records
206  * are chained together with the m_nextpkt field. The upper level routine
207  * soreceive() expects the following conventions to be observed when placing
208  * information in the receive buffer:
209  *
210  * 1. If the protocol requires each message be preceded by the sender's name,
211  *    then a record containing that name must be present before any
212  *    associated data (mbuf's must be of type MT_SONAME).
213  * 2. If the protocol supports the exchange of ``access rights'' (really just
214  *    additional data associated with the message), and there are ``rights''
215  *    to be received, then a record containing this data should be present
216  *    (mbuf's must be of type MT_RIGHTS).
217  * 3. If a name or rights record exists, then it must be followed by a data
218  *    record, perhaps of zero length.
219  *
220  * Before using a new socket structure it is first necessary to reserve
221  * buffer space to the socket, by calling sbreserve().  This should commit
222  * some of the available buffer space in the system buffer pool for the
223  * socket (currently, it does nothing but enforce limits).  The space should
224  * be released by calling sbrelease() when the socket is destroyed.
225  */
226 int
227 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
228 {
229 	struct thread *td = curthread;
230 
231 	SOCKBUF_LOCK(&so->so_snd);
232 	SOCKBUF_LOCK(&so->so_rcv);
233 	if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
234 		goto bad;
235 	if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
236 		goto bad2;
237 	if (so->so_rcv.sb_lowat == 0)
238 		so->so_rcv.sb_lowat = 1;
239 	if (so->so_snd.sb_lowat == 0)
240 		so->so_snd.sb_lowat = MCLBYTES;
241 	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
242 		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
243 	SOCKBUF_UNLOCK(&so->so_rcv);
244 	SOCKBUF_UNLOCK(&so->so_snd);
245 	return (0);
246 bad2:
247 	sbrelease_locked(&so->so_snd, so);
248 bad:
249 	SOCKBUF_UNLOCK(&so->so_rcv);
250 	SOCKBUF_UNLOCK(&so->so_snd);
251 	return (ENOBUFS);
252 }
253 
254 static int
255 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
256 {
257 	int error = 0;
258 	u_long tmp_sb_max = sb_max;
259 
260 	error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req);
261 	if (error || !req->newptr)
262 		return (error);
263 	if (tmp_sb_max < MSIZE + MCLBYTES)
264 		return (EINVAL);
265 	sb_max = tmp_sb_max;
266 	sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
267 	return (0);
268 }
269 
270 /*
271  * Allot mbufs to a sockbuf.  Attempt to scale mbmax so that mbcnt doesn't
272  * become limiting if buffering efficiency is near the normal case.
273  */
274 int
275 sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
276     struct thread *td)
277 {
278 	rlim_t sbsize_limit;
279 
280 	SOCKBUF_LOCK_ASSERT(sb);
281 
282 	/*
283 	 * td will only be NULL when we're in an interrupt (e.g. in
284 	 * tcp_input()).
285 	 *
286 	 * XXXRW: This comment needs updating, as might the code.
287 	 */
288 	if (cc > sb_max_adj)
289 		return (0);
290 	if (td != NULL) {
291 		PROC_LOCK(td->td_proc);
292 		sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
293 		PROC_UNLOCK(td->td_proc);
294 	} else
295 		sbsize_limit = RLIM_INFINITY;
296 	if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
297 	    sbsize_limit))
298 		return (0);
299 	sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
300 	if (sb->sb_lowat > sb->sb_hiwat)
301 		sb->sb_lowat = sb->sb_hiwat;
302 	return (1);
303 }
304 
305 int
306 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so,
307     struct thread *td)
308 {
309 	int error;
310 
311 	SOCKBUF_LOCK(sb);
312 	error = sbreserve_locked(sb, cc, so, td);
313 	SOCKBUF_UNLOCK(sb);
314 	return (error);
315 }
316 
317 /*
318  * Free mbufs held by a socket, and reserved mbuf space.
319  */
320 static void
321 sbrelease_internal(struct sockbuf *sb, struct socket *so)
322 {
323 
324 	sbflush_internal(sb);
325 	(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
326 	    RLIM_INFINITY);
327 	sb->sb_mbmax = 0;
328 }
329 
330 void
331 sbrelease_locked(struct sockbuf *sb, struct socket *so)
332 {
333 
334 	SOCKBUF_LOCK_ASSERT(sb);
335 
336 	sbrelease_internal(sb, so);
337 }
338 
339 void
340 sbrelease(struct sockbuf *sb, struct socket *so)
341 {
342 
343 	SOCKBUF_LOCK(sb);
344 	sbrelease_locked(sb, so);
345 	SOCKBUF_UNLOCK(sb);
346 }
347 
348 void
349 sbdestroy(struct sockbuf *sb, struct socket *so)
350 {
351 
352 	sbrelease_internal(sb, so);
353 }
354 
355 /*
356  * Routines to add and remove data from an mbuf queue.
357  *
358  * The routines sbappend() or sbappendrecord() are normally called to append
359  * new mbufs to a socket buffer, after checking that adequate space is
360  * available, comparing the function sbspace() with the amount of data to be
361  * added.  sbappendrecord() differs from sbappend() in that data supplied is
362  * treated as the beginning of a new record.  To place a sender's address,
363  * optional access rights, and data in a socket receive buffer,
364  * sbappendaddr() should be used.  To place access rights and data in a
365  * socket receive buffer, sbappendrights() should be used.  In either case,
366  * the new data begins a new record.  Note that unlike sbappend() and
367  * sbappendrecord(), these routines check for the caller that there will be
368  * enough space to store the data.  Each fails if there is not enough space,
369  * or if it cannot find mbufs to store additional information in.
370  *
371  * Reliable protocols may use the socket send buffer to hold data awaiting
372  * acknowledgement.  Data is normally copied from a socket send buffer in a
373  * protocol with m_copy for output to a peer, and then removing the data from
374  * the socket buffer with sbdrop() or sbdroprecord() when the data is
375  * acknowledged by the peer.
376  */
377 #ifdef SOCKBUF_DEBUG
378 void
379 sblastrecordchk(struct sockbuf *sb, const char *file, int line)
380 {
381 	struct mbuf *m = sb->sb_mb;
382 
383 	SOCKBUF_LOCK_ASSERT(sb);
384 
385 	while (m && m->m_nextpkt)
386 		m = m->m_nextpkt;
387 
388 	if (m != sb->sb_lastrecord) {
389 		printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
390 			__func__, sb->sb_mb, sb->sb_lastrecord, m);
391 		printf("packet chain:\n");
392 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
393 			printf("\t%p\n", m);
394 		panic("%s from %s:%u", __func__, file, line);
395 	}
396 }
397 
398 void
399 sblastmbufchk(struct sockbuf *sb, const char *file, int line)
400 {
401 	struct mbuf *m = sb->sb_mb;
402 	struct mbuf *n;
403 
404 	SOCKBUF_LOCK_ASSERT(sb);
405 
406 	while (m && m->m_nextpkt)
407 		m = m->m_nextpkt;
408 
409 	while (m && m->m_next)
410 		m = m->m_next;
411 
412 	if (m != sb->sb_mbtail) {
413 		printf("%s: sb_mb %p sb_mbtail %p last %p\n",
414 			__func__, sb->sb_mb, sb->sb_mbtail, m);
415 		printf("packet tree:\n");
416 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
417 			printf("\t");
418 			for (n = m; n != NULL; n = n->m_next)
419 				printf("%p ", n);
420 			printf("\n");
421 		}
422 		panic("%s from %s:%u", __func__, file, line);
423 	}
424 }
425 #endif /* SOCKBUF_DEBUG */
426 
427 #define SBLINKRECORD(sb, m0) do {					\
428 	SOCKBUF_LOCK_ASSERT(sb);					\
429 	if ((sb)->sb_lastrecord != NULL)				\
430 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
431 	else								\
432 		(sb)->sb_mb = (m0);					\
433 	(sb)->sb_lastrecord = (m0);					\
434 } while (/*CONSTCOND*/0)
435 
436 /*
437  * Append mbuf chain m to the last record in the socket buffer sb.  The
438  * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
439  * are discarded and mbufs are compacted where possible.
440  */
441 void
442 sbappend_locked(struct sockbuf *sb, struct mbuf *m)
443 {
444 	struct mbuf *n;
445 
446 	SOCKBUF_LOCK_ASSERT(sb);
447 
448 	if (m == 0)
449 		return;
450 
451 	SBLASTRECORDCHK(sb);
452 	n = sb->sb_mb;
453 	if (n) {
454 		while (n->m_nextpkt)
455 			n = n->m_nextpkt;
456 		do {
457 			if (n->m_flags & M_EOR) {
458 				sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
459 				return;
460 			}
461 		} while (n->m_next && (n = n->m_next));
462 	} else {
463 		/*
464 		 * XXX Would like to simply use sb_mbtail here, but
465 		 * XXX I need to verify that I won't miss an EOR that
466 		 * XXX way.
467 		 */
468 		if ((n = sb->sb_lastrecord) != NULL) {
469 			do {
470 				if (n->m_flags & M_EOR) {
471 					sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
472 					return;
473 				}
474 			} while (n->m_next && (n = n->m_next));
475 		} else {
476 			/*
477 			 * If this is the first record in the socket buffer,
478 			 * it's also the last record.
479 			 */
480 			sb->sb_lastrecord = m;
481 		}
482 	}
483 	sbcompress(sb, m, n);
484 	SBLASTRECORDCHK(sb);
485 }
486 
487 /*
488  * Append mbuf chain m to the last record in the socket buffer sb.  The
489  * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
490  * are discarded and mbufs are compacted where possible.
491  */
492 void
493 sbappend(struct sockbuf *sb, struct mbuf *m)
494 {
495 
496 	SOCKBUF_LOCK(sb);
497 	sbappend_locked(sb, m);
498 	SOCKBUF_UNLOCK(sb);
499 }
500 
501 /*
502  * This version of sbappend() should only be used when the caller absolutely
503  * knows that there will never be more than one record in the socket buffer,
504  * that is, a stream protocol (such as TCP).
505  */
506 void
507 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
508 {
509 	SOCKBUF_LOCK_ASSERT(sb);
510 
511 	KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
512 	KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
513 
514 	SBLASTMBUFCHK(sb);
515 
516 	sbcompress(sb, m, sb->sb_mbtail);
517 
518 	sb->sb_lastrecord = sb->sb_mb;
519 	SBLASTRECORDCHK(sb);
520 }
521 
522 /*
523  * This version of sbappend() should only be used when the caller absolutely
524  * knows that there will never be more than one record in the socket buffer,
525  * that is, a stream protocol (such as TCP).
526  */
527 void
528 sbappendstream(struct sockbuf *sb, struct mbuf *m)
529 {
530 
531 	SOCKBUF_LOCK(sb);
532 	sbappendstream_locked(sb, m);
533 	SOCKBUF_UNLOCK(sb);
534 }
535 
536 #ifdef SOCKBUF_DEBUG
537 void
538 sbcheck(struct sockbuf *sb)
539 {
540 	struct mbuf *m;
541 	struct mbuf *n = 0;
542 	u_long len = 0, mbcnt = 0;
543 
544 	SOCKBUF_LOCK_ASSERT(sb);
545 
546 	for (m = sb->sb_mb; m; m = n) {
547 	    n = m->m_nextpkt;
548 	    for (; m; m = m->m_next) {
549 		len += m->m_len;
550 		mbcnt += MSIZE;
551 		if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
552 			mbcnt += m->m_ext.ext_size;
553 	    }
554 	}
555 	if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
556 		printf("cc %ld != %u || mbcnt %ld != %u\n", len, sb->sb_cc,
557 		    mbcnt, sb->sb_mbcnt);
558 		panic("sbcheck");
559 	}
560 }
561 #endif
562 
563 /*
564  * As above, except the mbuf chain begins a new record.
565  */
566 void
567 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0)
568 {
569 	struct mbuf *m;
570 
571 	SOCKBUF_LOCK_ASSERT(sb);
572 
573 	if (m0 == 0)
574 		return;
575 	m = sb->sb_mb;
576 	if (m)
577 		while (m->m_nextpkt)
578 			m = m->m_nextpkt;
579 	/*
580 	 * Put the first mbuf on the queue.  Note this permits zero length
581 	 * records.
582 	 */
583 	sballoc(sb, m0);
584 	SBLASTRECORDCHK(sb);
585 	SBLINKRECORD(sb, m0);
586 	if (m)
587 		m->m_nextpkt = m0;
588 	else
589 		sb->sb_mb = m0;
590 	m = m0->m_next;
591 	m0->m_next = 0;
592 	if (m && (m0->m_flags & M_EOR)) {
593 		m0->m_flags &= ~M_EOR;
594 		m->m_flags |= M_EOR;
595 	}
596 	sbcompress(sb, m, m0);
597 }
598 
599 /*
600  * As above, except the mbuf chain begins a new record.
601  */
602 void
603 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
604 {
605 
606 	SOCKBUF_LOCK(sb);
607 	sbappendrecord_locked(sb, m0);
608 	SOCKBUF_UNLOCK(sb);
609 }
610 
611 /*
612  * Append address and data, and optionally, control (ancillary) data to the
613  * receive queue of a socket.  If present, m0 must include a packet header
614  * with total length.  Returns 0 if no space in sockbuf or insufficient
615  * mbufs.
616  */
617 int
618 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
619     struct mbuf *m0, struct mbuf *control)
620 {
621 	struct mbuf *m, *n, *nlast;
622 	int space = asa->sa_len;
623 
624 	SOCKBUF_LOCK_ASSERT(sb);
625 
626 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
627 		panic("sbappendaddr_locked");
628 	if (m0)
629 		space += m0->m_pkthdr.len;
630 	space += m_length(control, &n);
631 
632 	if (space > sbspace(sb))
633 		return (0);
634 #if MSIZE <= 256
635 	if (asa->sa_len > MLEN)
636 		return (0);
637 #endif
638 	MGET(m, M_DONTWAIT, MT_SONAME);
639 	if (m == 0)
640 		return (0);
641 	m->m_len = asa->sa_len;
642 	bcopy(asa, mtod(m, caddr_t), asa->sa_len);
643 	if (n)
644 		n->m_next = m0;		/* concatenate data to control */
645 	else
646 		control = m0;
647 	m->m_next = control;
648 	for (n = m; n->m_next != NULL; n = n->m_next)
649 		sballoc(sb, n);
650 	sballoc(sb, n);
651 	nlast = n;
652 	SBLINKRECORD(sb, m);
653 
654 	sb->sb_mbtail = nlast;
655 	SBLASTMBUFCHK(sb);
656 
657 	SBLASTRECORDCHK(sb);
658 	return (1);
659 }
660 
661 /*
662  * Append address and data, and optionally, control (ancillary) data to the
663  * receive queue of a socket.  If present, m0 must include a packet header
664  * with total length.  Returns 0 if no space in sockbuf or insufficient
665  * mbufs.
666  */
667 int
668 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
669     struct mbuf *m0, struct mbuf *control)
670 {
671 	int retval;
672 
673 	SOCKBUF_LOCK(sb);
674 	retval = sbappendaddr_locked(sb, asa, m0, control);
675 	SOCKBUF_UNLOCK(sb);
676 	return (retval);
677 }
678 
679 int
680 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
681     struct mbuf *control)
682 {
683 	struct mbuf *m, *n, *mlast;
684 	int space;
685 
686 	SOCKBUF_LOCK_ASSERT(sb);
687 
688 	if (control == 0)
689 		panic("sbappendcontrol_locked");
690 	space = m_length(control, &n) + m_length(m0, NULL);
691 
692 	if (space > sbspace(sb))
693 		return (0);
694 	n->m_next = m0;			/* concatenate data to control */
695 
696 	SBLASTRECORDCHK(sb);
697 
698 	for (m = control; m->m_next; m = m->m_next)
699 		sballoc(sb, m);
700 	sballoc(sb, m);
701 	mlast = m;
702 	SBLINKRECORD(sb, control);
703 
704 	sb->sb_mbtail = mlast;
705 	SBLASTMBUFCHK(sb);
706 
707 	SBLASTRECORDCHK(sb);
708 	return (1);
709 }
710 
711 int
712 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
713 {
714 	int retval;
715 
716 	SOCKBUF_LOCK(sb);
717 	retval = sbappendcontrol_locked(sb, m0, control);
718 	SOCKBUF_UNLOCK(sb);
719 	return (retval);
720 }
721 
722 /*
723  * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
724  * (n).  If (n) is NULL, the buffer is presumed empty.
725  *
726  * When the data is compressed, mbufs in the chain may be handled in one of
727  * three ways:
728  *
729  * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
730  *     record boundary, and no change in data type).
731  *
732  * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
733  *     an mbuf already in the socket buffer.  This can occur if an
734  *     appropriate mbuf exists, there is room, and no merging of data types
735  *     will occur.
736  *
737  * (3) The mbuf may be appended to the end of the existing mbuf chain.
738  *
739  * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
740  * end-of-record.
741  */
742 void
743 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
744 {
745 	int eor = 0;
746 	struct mbuf *o;
747 
748 	SOCKBUF_LOCK_ASSERT(sb);
749 
750 	while (m) {
751 		eor |= m->m_flags & M_EOR;
752 		if (m->m_len == 0 &&
753 		    (eor == 0 ||
754 		     (((o = m->m_next) || (o = n)) &&
755 		      o->m_type == m->m_type))) {
756 			if (sb->sb_lastrecord == m)
757 				sb->sb_lastrecord = m->m_next;
758 			m = m_free(m);
759 			continue;
760 		}
761 		if (n && (n->m_flags & M_EOR) == 0 &&
762 		    M_WRITABLE(n) &&
763 		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
764 		    m->m_len <= M_TRAILINGSPACE(n) &&
765 		    n->m_type == m->m_type) {
766 			bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
767 			    (unsigned)m->m_len);
768 			n->m_len += m->m_len;
769 			sb->sb_cc += m->m_len;
770 			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
771 				/* XXX: Probably don't need.*/
772 				sb->sb_ctl += m->m_len;
773 			m = m_free(m);
774 			continue;
775 		}
776 		if (n)
777 			n->m_next = m;
778 		else
779 			sb->sb_mb = m;
780 		sb->sb_mbtail = m;
781 		sballoc(sb, m);
782 		n = m;
783 		m->m_flags &= ~M_EOR;
784 		m = m->m_next;
785 		n->m_next = 0;
786 	}
787 	if (eor) {
788 		KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
789 		n->m_flags |= eor;
790 	}
791 	SBLASTMBUFCHK(sb);
792 }
793 
794 /*
795  * Free all mbufs in a sockbuf.  Check that all resources are reclaimed.
796  */
797 static void
798 sbflush_internal(struct sockbuf *sb)
799 {
800 
801 	while (sb->sb_mbcnt) {
802 		/*
803 		 * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
804 		 * we would loop forever. Panic instead.
805 		 */
806 		if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
807 			break;
808 		sbdrop_internal(sb, (int)sb->sb_cc);
809 	}
810 	if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
811 		panic("sbflush_internal: cc %u || mb %p || mbcnt %u",
812 		    sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
813 }
814 
815 void
816 sbflush_locked(struct sockbuf *sb)
817 {
818 
819 	SOCKBUF_LOCK_ASSERT(sb);
820 	sbflush_internal(sb);
821 }
822 
823 void
824 sbflush(struct sockbuf *sb)
825 {
826 
827 	SOCKBUF_LOCK(sb);
828 	sbflush_locked(sb);
829 	SOCKBUF_UNLOCK(sb);
830 }
831 
832 /*
833  * Drop data from (the front of) a sockbuf.
834  */
835 static void
836 sbdrop_internal(struct sockbuf *sb, int len)
837 {
838 	struct mbuf *m;
839 	struct mbuf *next;
840 
841 	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
842 	while (len > 0) {
843 		if (m == 0) {
844 			if (next == 0)
845 				panic("sbdrop");
846 			m = next;
847 			next = m->m_nextpkt;
848 			continue;
849 		}
850 		if (m->m_len > len) {
851 			m->m_len -= len;
852 			m->m_data += len;
853 			sb->sb_cc -= len;
854 			if (sb->sb_sndptroff != 0)
855 				sb->sb_sndptroff -= len;
856 			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
857 				sb->sb_ctl -= len;
858 			break;
859 		}
860 		len -= m->m_len;
861 		sbfree(sb, m);
862 		m = m_free(m);
863 	}
864 	while (m && m->m_len == 0) {
865 		sbfree(sb, m);
866 		m = m_free(m);
867 	}
868 	if (m) {
869 		sb->sb_mb = m;
870 		m->m_nextpkt = next;
871 	} else
872 		sb->sb_mb = next;
873 	/*
874 	 * First part is an inline SB_EMPTY_FIXUP().  Second part makes sure
875 	 * sb_lastrecord is up-to-date if we dropped part of the last record.
876 	 */
877 	m = sb->sb_mb;
878 	if (m == NULL) {
879 		sb->sb_mbtail = NULL;
880 		sb->sb_lastrecord = NULL;
881 	} else if (m->m_nextpkt == NULL) {
882 		sb->sb_lastrecord = m;
883 	}
884 }
885 
886 /*
887  * Drop data from (the front of) a sockbuf.
888  */
889 void
890 sbdrop_locked(struct sockbuf *sb, int len)
891 {
892 
893 	SOCKBUF_LOCK_ASSERT(sb);
894 
895 	sbdrop_internal(sb, len);
896 }
897 
898 void
899 sbdrop(struct sockbuf *sb, int len)
900 {
901 
902 	SOCKBUF_LOCK(sb);
903 	sbdrop_locked(sb, len);
904 	SOCKBUF_UNLOCK(sb);
905 }
906 
907 /*
908  * Maintain a pointer and offset pair into the socket buffer mbuf chain to
909  * avoid traversal of the entire socket buffer for larger offsets.
910  */
911 struct mbuf *
912 sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff)
913 {
914 	struct mbuf *m, *ret;
915 
916 	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
917 	KASSERT(off + len <= sb->sb_cc, ("%s: beyond sb", __func__));
918 	KASSERT(sb->sb_sndptroff <= sb->sb_cc, ("%s: sndptroff broken", __func__));
919 
920 	/*
921 	 * Is off below stored offset? Happens on retransmits.
922 	 * Just return, we can't help here.
923 	 */
924 	if (sb->sb_sndptroff > off) {
925 		*moff = off;
926 		return (sb->sb_mb);
927 	}
928 
929 	/* Return closest mbuf in chain for current offset. */
930 	*moff = off - sb->sb_sndptroff;
931 	m = ret = sb->sb_sndptr ? sb->sb_sndptr : sb->sb_mb;
932 
933 	/* Advance by len to be as close as possible for the next transmit. */
934 	for (off = off - sb->sb_sndptroff + len - 1;
935 	     off > 0 && off >= m->m_len;
936 	     m = m->m_next) {
937 		sb->sb_sndptroff += m->m_len;
938 		off -= m->m_len;
939 	}
940 	sb->sb_sndptr = m;
941 
942 	return (ret);
943 }
944 
945 /*
946  * Drop a record off the front of a sockbuf and move the next record to the
947  * front.
948  */
949 void
950 sbdroprecord_locked(struct sockbuf *sb)
951 {
952 	struct mbuf *m;
953 
954 	SOCKBUF_LOCK_ASSERT(sb);
955 
956 	m = sb->sb_mb;
957 	if (m) {
958 		sb->sb_mb = m->m_nextpkt;
959 		do {
960 			sbfree(sb, m);
961 			m = m_free(m);
962 		} while (m);
963 	}
964 	SB_EMPTY_FIXUP(sb);
965 }
966 
967 /*
968  * Drop a record off the front of a sockbuf and move the next record to the
969  * front.
970  */
971 void
972 sbdroprecord(struct sockbuf *sb)
973 {
974 
975 	SOCKBUF_LOCK(sb);
976 	sbdroprecord_locked(sb);
977 	SOCKBUF_UNLOCK(sb);
978 }
979 
980 /*
981  * Create a "control" mbuf containing the specified data with the specified
982  * type for presentation on a socket buffer.
983  */
984 struct mbuf *
985 sbcreatecontrol(caddr_t p, int size, int type, int level)
986 {
987 	struct cmsghdr *cp;
988 	struct mbuf *m;
989 
990 	if (CMSG_SPACE((u_int)size) > MCLBYTES)
991 		return ((struct mbuf *) NULL);
992 	if (CMSG_SPACE((u_int)size) > MLEN)
993 		m = m_getcl(M_DONTWAIT, MT_CONTROL, 0);
994 	else
995 		m = m_get(M_DONTWAIT, MT_CONTROL);
996 	if (m == NULL)
997 		return ((struct mbuf *) NULL);
998 	cp = mtod(m, struct cmsghdr *);
999 	m->m_len = 0;
1000 	KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
1001 	    ("sbcreatecontrol: short mbuf"));
1002 	if (p != NULL)
1003 		(void)memcpy(CMSG_DATA(cp), p, size);
1004 	m->m_len = CMSG_SPACE(size);
1005 	cp->cmsg_len = CMSG_LEN(size);
1006 	cp->cmsg_level = level;
1007 	cp->cmsg_type = type;
1008 	return (m);
1009 }
1010 
1011 /*
1012  * This does the same for socket buffers that sotoxsocket does for sockets:
1013  * generate an user-format data structure describing the socket buffer.  Note
1014  * that the xsockbuf structure, since it is always embedded in a socket, does
1015  * not include a self pointer nor a length.  We make this entry point public
1016  * in case some other mechanism needs it.
1017  */
1018 void
1019 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1020 {
1021 
1022 	xsb->sb_cc = sb->sb_cc;
1023 	xsb->sb_hiwat = sb->sb_hiwat;
1024 	xsb->sb_mbcnt = sb->sb_mbcnt;
1025 	xsb->sb_mbmax = sb->sb_mbmax;
1026 	xsb->sb_lowat = sb->sb_lowat;
1027 	xsb->sb_flags = sb->sb_flags;
1028 	xsb->sb_timeo = sb->sb_timeo;
1029 }
1030 
1031 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1032 static int dummy;
1033 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
1034 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
1035     &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
1036 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1037     &sb_efficiency, 0, "");
1038