xref: /freebsd/sys/kern/uipc_sockbuf.c (revision e3aa18ad71782a73d3dd9dd3d526bbd2b607ca16)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1990, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)uipc_socket2.c	8.1 (Berkeley) 6/10/93
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_kern_tls.h"
38 #include "opt_param.h"
39 
40 #include <sys/param.h>
41 #include <sys/aio.h> /* for aio_swake proto */
42 #include <sys/kernel.h>
43 #include <sys/ktls.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/protosw.h>
50 #include <sys/resourcevar.h>
51 #include <sys/signalvar.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/sx.h>
55 #include <sys/sysctl.h>
56 
57 /*
58  * Function pointer set by the AIO routines so that the socket buffer code
59  * can call back into the AIO module if it is loaded.
60  */
61 void	(*aio_swake)(struct socket *, struct sockbuf *);
62 
63 /*
64  * Primitive routines for operating on socket buffers
65  */
66 
67 u_long	sb_max = SB_MAX;
68 u_long sb_max_adj =
69        (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
70 
71 static	u_long sb_efficiency = 8;	/* parameter for sbreserve() */
72 
73 #ifdef KERN_TLS
74 static void	sbcompress_ktls_rx(struct sockbuf *sb, struct mbuf *m,
75     struct mbuf *n);
76 #endif
77 static struct mbuf	*sbcut_internal(struct sockbuf *sb, int len);
78 static void	sbflush_internal(struct sockbuf *sb);
79 
80 /*
81  * Our own version of m_clrprotoflags(), that can preserve M_NOTREADY.
82  */
83 static void
84 sbm_clrprotoflags(struct mbuf *m, int flags)
85 {
86 	int mask;
87 
88 	mask = ~M_PROTOFLAGS;
89 	if (flags & PRUS_NOTREADY)
90 		mask |= M_NOTREADY;
91 	while (m) {
92 		m->m_flags &= mask;
93 		m = m->m_next;
94 	}
95 }
96 
97 /*
98  * Compress M_NOTREADY mbufs after they have been readied by sbready().
99  *
100  * sbcompress() skips M_NOTREADY mbufs since the data is not available to
101  * be copied at the time of sbcompress().  This function combines small
102  * mbufs similar to sbcompress() once mbufs are ready.  'm0' is the first
103  * mbuf sbready() marked ready, and 'end' is the first mbuf still not
104  * ready.
105  */
106 static void
107 sbready_compress(struct sockbuf *sb, struct mbuf *m0, struct mbuf *end)
108 {
109 	struct mbuf *m, *n;
110 	int ext_size;
111 
112 	SOCKBUF_LOCK_ASSERT(sb);
113 
114 	if ((sb->sb_flags & SB_NOCOALESCE) != 0)
115 		return;
116 
117 	for (m = m0; m != end; m = m->m_next) {
118 		MPASS((m->m_flags & M_NOTREADY) == 0);
119 		/*
120 		 * NB: In sbcompress(), 'n' is the last mbuf in the
121 		 * socket buffer and 'm' is the new mbuf being copied
122 		 * into the trailing space of 'n'.  Here, the roles
123 		 * are reversed and 'n' is the next mbuf after 'm'
124 		 * that is being copied into the trailing space of
125 		 * 'm'.
126 		 */
127 		n = m->m_next;
128 #ifdef KERN_TLS
129 		/* Try to coalesce adjacent ktls mbuf hdr/trailers. */
130 		if ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 &&
131 		    (m->m_flags & M_EXTPG) &&
132 		    (n->m_flags & M_EXTPG) &&
133 		    !mbuf_has_tls_session(m) &&
134 		    !mbuf_has_tls_session(n)) {
135 			int hdr_len, trail_len;
136 
137 			hdr_len = n->m_epg_hdrlen;
138 			trail_len = m->m_epg_trllen;
139 			if (trail_len != 0 && hdr_len != 0 &&
140 			    trail_len + hdr_len <= MBUF_PEXT_TRAIL_LEN) {
141 				/* copy n's header to m's trailer */
142 				memcpy(&m->m_epg_trail[trail_len],
143 				    n->m_epg_hdr, hdr_len);
144 				m->m_epg_trllen += hdr_len;
145 				m->m_len += hdr_len;
146 				n->m_epg_hdrlen = 0;
147 				n->m_len -= hdr_len;
148 			}
149 		}
150 #endif
151 
152 		/* Compress small unmapped mbufs into plain mbufs. */
153 		if ((m->m_flags & M_EXTPG) && m->m_len <= MLEN &&
154 		    !mbuf_has_tls_session(m)) {
155 			ext_size = m->m_ext.ext_size;
156 			if (mb_unmapped_compress(m) == 0)
157 				sb->sb_mbcnt -= ext_size;
158 		}
159 
160 		while ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 &&
161 		    M_WRITABLE(m) &&
162 		    (m->m_flags & M_EXTPG) == 0 &&
163 		    !mbuf_has_tls_session(n) &&
164 		    !mbuf_has_tls_session(m) &&
165 		    n->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
166 		    n->m_len <= M_TRAILINGSPACE(m) &&
167 		    m->m_type == n->m_type) {
168 			KASSERT(sb->sb_lastrecord != n,
169 		    ("%s: merging start of record (%p) into previous mbuf (%p)",
170 			    __func__, n, m));
171 			m_copydata(n, 0, n->m_len, mtodo(m, m->m_len));
172 			m->m_len += n->m_len;
173 			m->m_next = n->m_next;
174 			m->m_flags |= n->m_flags & M_EOR;
175 			if (sb->sb_mbtail == n)
176 				sb->sb_mbtail = m;
177 
178 			sb->sb_mbcnt -= MSIZE;
179 			if (n->m_flags & M_EXT)
180 				sb->sb_mbcnt -= n->m_ext.ext_size;
181 			m_free(n);
182 			n = m->m_next;
183 		}
184 	}
185 	SBLASTRECORDCHK(sb);
186 	SBLASTMBUFCHK(sb);
187 }
188 
189 /*
190  * Mark ready "count" units of I/O starting with "m".  Most mbufs
191  * count as a single unit of I/O except for M_EXTPG mbufs which
192  * are backed by multiple pages.
193  */
194 int
195 sbready(struct sockbuf *sb, struct mbuf *m0, int count)
196 {
197 	struct mbuf *m;
198 	u_int blocker;
199 
200 	SOCKBUF_LOCK_ASSERT(sb);
201 	KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb));
202 	KASSERT(count > 0, ("%s: invalid count %d", __func__, count));
203 
204 	m = m0;
205 	blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0;
206 
207 	while (count > 0) {
208 		KASSERT(m->m_flags & M_NOTREADY,
209 		    ("%s: m %p !M_NOTREADY", __func__, m));
210 		if ((m->m_flags & M_EXTPG) != 0 && m->m_epg_npgs != 0) {
211 			if (count < m->m_epg_nrdy) {
212 				m->m_epg_nrdy -= count;
213 				count = 0;
214 				break;
215 			}
216 			count -= m->m_epg_nrdy;
217 			m->m_epg_nrdy = 0;
218 		} else
219 			count--;
220 
221 		m->m_flags &= ~(M_NOTREADY | blocker);
222 		if (blocker)
223 			sb->sb_acc += m->m_len;
224 		m = m->m_next;
225 	}
226 
227 	/*
228 	 * If the first mbuf is still not fully ready because only
229 	 * some of its backing pages were readied, no further progress
230 	 * can be made.
231 	 */
232 	if (m0 == m) {
233 		MPASS(m->m_flags & M_NOTREADY);
234 		return (EINPROGRESS);
235 	}
236 
237 	if (!blocker) {
238 		sbready_compress(sb, m0, m);
239 		return (EINPROGRESS);
240 	}
241 
242 	/* This one was blocking all the queue. */
243 	for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) {
244 		KASSERT(m->m_flags & M_BLOCKED,
245 		    ("%s: m %p !M_BLOCKED", __func__, m));
246 		m->m_flags &= ~M_BLOCKED;
247 		sb->sb_acc += m->m_len;
248 	}
249 
250 	sb->sb_fnrdy = m;
251 	sbready_compress(sb, m0, m);
252 
253 	return (0);
254 }
255 
256 /*
257  * Adjust sockbuf state reflecting allocation of m.
258  */
259 void
260 sballoc(struct sockbuf *sb, struct mbuf *m)
261 {
262 
263 	SOCKBUF_LOCK_ASSERT(sb);
264 
265 	sb->sb_ccc += m->m_len;
266 
267 	if (sb->sb_fnrdy == NULL) {
268 		if (m->m_flags & M_NOTREADY)
269 			sb->sb_fnrdy = m;
270 		else
271 			sb->sb_acc += m->m_len;
272 	} else
273 		m->m_flags |= M_BLOCKED;
274 
275 	if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
276 		sb->sb_ctl += m->m_len;
277 
278 	sb->sb_mbcnt += MSIZE;
279 
280 	if (m->m_flags & M_EXT)
281 		sb->sb_mbcnt += m->m_ext.ext_size;
282 }
283 
284 /*
285  * Adjust sockbuf state reflecting freeing of m.
286  */
287 void
288 sbfree(struct sockbuf *sb, struct mbuf *m)
289 {
290 
291 #if 0	/* XXX: not yet: soclose() call path comes here w/o lock. */
292 	SOCKBUF_LOCK_ASSERT(sb);
293 #endif
294 
295 	sb->sb_ccc -= m->m_len;
296 
297 	if (!(m->m_flags & M_NOTAVAIL))
298 		sb->sb_acc -= m->m_len;
299 
300 	if (m == sb->sb_fnrdy) {
301 		struct mbuf *n;
302 
303 		KASSERT(m->m_flags & M_NOTREADY,
304 		    ("%s: m %p !M_NOTREADY", __func__, m));
305 
306 		n = m->m_next;
307 		while (n != NULL && !(n->m_flags & M_NOTREADY)) {
308 			n->m_flags &= ~M_BLOCKED;
309 			sb->sb_acc += n->m_len;
310 			n = n->m_next;
311 		}
312 		sb->sb_fnrdy = n;
313 	}
314 
315 	if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
316 		sb->sb_ctl -= m->m_len;
317 
318 	sb->sb_mbcnt -= MSIZE;
319 	if (m->m_flags & M_EXT)
320 		sb->sb_mbcnt -= m->m_ext.ext_size;
321 
322 	if (sb->sb_sndptr == m) {
323 		sb->sb_sndptr = NULL;
324 		sb->sb_sndptroff = 0;
325 	}
326 	if (sb->sb_sndptroff != 0)
327 		sb->sb_sndptroff -= m->m_len;
328 }
329 
330 #ifdef KERN_TLS
331 /*
332  * Similar to sballoc/sbfree but does not adjust state associated with
333  * the sb_mb chain such as sb_fnrdy or sb_sndptr*.  Also assumes mbufs
334  * are not ready.
335  */
336 void
337 sballoc_ktls_rx(struct sockbuf *sb, struct mbuf *m)
338 {
339 
340 	SOCKBUF_LOCK_ASSERT(sb);
341 
342 	sb->sb_ccc += m->m_len;
343 	sb->sb_tlscc += m->m_len;
344 
345 	sb->sb_mbcnt += MSIZE;
346 
347 	if (m->m_flags & M_EXT)
348 		sb->sb_mbcnt += m->m_ext.ext_size;
349 }
350 
351 void
352 sbfree_ktls_rx(struct sockbuf *sb, struct mbuf *m)
353 {
354 
355 #if 0	/* XXX: not yet: soclose() call path comes here w/o lock. */
356 	SOCKBUF_LOCK_ASSERT(sb);
357 #endif
358 
359 	sb->sb_ccc -= m->m_len;
360 	sb->sb_tlscc -= m->m_len;
361 
362 	sb->sb_mbcnt -= MSIZE;
363 
364 	if (m->m_flags & M_EXT)
365 		sb->sb_mbcnt -= m->m_ext.ext_size;
366 }
367 #endif
368 
369 /*
370  * Socantsendmore indicates that no more data will be sent on the socket; it
371  * would normally be applied to a socket when the user informs the system
372  * that no more data is to be sent, by the protocol code (in case
373  * PRU_SHUTDOWN).  Socantrcvmore indicates that no more data will be
374  * received, and will normally be applied to the socket by a protocol when it
375  * detects that the peer will send no more data.  Data queued for reading in
376  * the socket may yet be read.
377  */
378 void
379 socantsendmore_locked(struct socket *so)
380 {
381 
382 	SOCK_SENDBUF_LOCK_ASSERT(so);
383 
384 	so->so_snd.sb_state |= SBS_CANTSENDMORE;
385 	sowwakeup_locked(so);
386 	SOCK_SENDBUF_UNLOCK_ASSERT(so);
387 }
388 
389 void
390 socantsendmore(struct socket *so)
391 {
392 
393 	SOCK_SENDBUF_LOCK(so);
394 	socantsendmore_locked(so);
395 	SOCK_SENDBUF_UNLOCK_ASSERT(so);
396 }
397 
398 void
399 socantrcvmore_locked(struct socket *so)
400 {
401 
402 	SOCK_RECVBUF_LOCK_ASSERT(so);
403 
404 	so->so_rcv.sb_state |= SBS_CANTRCVMORE;
405 #ifdef KERN_TLS
406 	if (so->so_rcv.sb_flags & SB_TLS_RX)
407 		ktls_check_rx(&so->so_rcv);
408 #endif
409 	sorwakeup_locked(so);
410 	SOCK_RECVBUF_UNLOCK_ASSERT(so);
411 }
412 
413 void
414 socantrcvmore(struct socket *so)
415 {
416 
417 	SOCK_RECVBUF_LOCK(so);
418 	socantrcvmore_locked(so);
419 	SOCK_RECVBUF_UNLOCK_ASSERT(so);
420 }
421 
422 void
423 soroverflow_locked(struct socket *so)
424 {
425 
426 	SOCK_RECVBUF_LOCK_ASSERT(so);
427 
428 	if (so->so_options & SO_RERROR) {
429 		so->so_rerror = ENOBUFS;
430 		sorwakeup_locked(so);
431 	} else
432 		SOCK_RECVBUF_UNLOCK(so);
433 
434 	SOCK_RECVBUF_UNLOCK_ASSERT(so);
435 }
436 
437 void
438 soroverflow(struct socket *so)
439 {
440 
441 	SOCK_RECVBUF_LOCK(so);
442 	soroverflow_locked(so);
443 	SOCK_RECVBUF_UNLOCK_ASSERT(so);
444 }
445 
446 /*
447  * Wait for data to arrive at/drain from a socket buffer.
448  */
449 int
450 sbwait(struct socket *so, sb_which which)
451 {
452 	struct sockbuf *sb;
453 
454 	SOCK_BUF_LOCK_ASSERT(so, which);
455 
456 	sb = sobuf(so, which);
457 	sb->sb_flags |= SB_WAIT;
458 	return (msleep_sbt(&sb->sb_acc, soeventmtx(so, which),
459 	    (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
460 	    sb->sb_timeo, 0, 0));
461 }
462 
463 /*
464  * Wakeup processes waiting on a socket buffer.  Do asynchronous notification
465  * via SIGIO if the socket has the SS_ASYNC flag set.
466  *
467  * Called with the socket buffer lock held; will release the lock by the end
468  * of the function.  This allows the caller to acquire the socket buffer lock
469  * while testing for the need for various sorts of wakeup and hold it through
470  * to the point where it's no longer required.  We currently hold the lock
471  * through calls out to other subsystems (with the exception of kqueue), and
472  * then release it to avoid lock order issues.  It's not clear that's
473  * correct.
474  */
475 static __always_inline void
476 sowakeup(struct socket *so, const sb_which which)
477 {
478 	struct sockbuf *sb;
479 	int ret;
480 
481 	SOCK_BUF_LOCK_ASSERT(so, which);
482 
483 	sb = sobuf(so, which);
484 	selwakeuppri(sb->sb_sel, PSOCK);
485 	if (!SEL_WAITING(sb->sb_sel))
486 		sb->sb_flags &= ~SB_SEL;
487 	if (sb->sb_flags & SB_WAIT) {
488 		sb->sb_flags &= ~SB_WAIT;
489 		wakeup(&sb->sb_acc);
490 	}
491 	KNOTE_LOCKED(&sb->sb_sel->si_note, 0);
492 	if (sb->sb_upcall != NULL) {
493 		ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT);
494 		if (ret == SU_ISCONNECTED) {
495 			KASSERT(sb == &so->so_rcv,
496 			    ("SO_SND upcall returned SU_ISCONNECTED"));
497 			soupcall_clear(so, SO_RCV);
498 		}
499 	} else
500 		ret = SU_OK;
501 	if (sb->sb_flags & SB_AIO)
502 		sowakeup_aio(so, which);
503 	SOCK_BUF_UNLOCK(so, which);
504 	if (ret == SU_ISCONNECTED)
505 		soisconnected(so);
506 	if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
507 		pgsigio(&so->so_sigio, SIGIO, 0);
508 	SOCK_BUF_UNLOCK_ASSERT(so, which);
509 }
510 
511 /*
512  * Do we need to notify the other side when I/O is possible?
513  */
514 static __always_inline bool
515 sb_notify(const struct sockbuf *sb)
516 {
517 	return ((sb->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC |
518 	    SB_UPCALL | SB_AIO | SB_KNOTE)) != 0);
519 }
520 
521 void
522 sorwakeup_locked(struct socket *so)
523 {
524 	SOCK_RECVBUF_LOCK_ASSERT(so);
525 	if (sb_notify(&so->so_rcv))
526 		sowakeup(so, SO_RCV);
527 	else
528 		SOCK_RECVBUF_UNLOCK(so);
529 }
530 
531 void
532 sowwakeup_locked(struct socket *so)
533 {
534 	SOCK_SENDBUF_LOCK_ASSERT(so);
535 	if (sb_notify(&so->so_snd))
536 		sowakeup(so, SO_SND);
537 	else
538 		SOCK_SENDBUF_UNLOCK(so);
539 }
540 
541 /*
542  * Socket buffer (struct sockbuf) utility routines.
543  *
544  * Each socket contains two socket buffers: one for sending data and one for
545  * receiving data.  Each buffer contains a queue of mbufs, information about
546  * the number of mbufs and amount of data in the queue, and other fields
547  * allowing select() statements and notification on data availability to be
548  * implemented.
549  *
550  * Data stored in a socket buffer is maintained as a list of records.  Each
551  * record is a list of mbufs chained together with the m_next field.  Records
552  * are chained together with the m_nextpkt field. The upper level routine
553  * soreceive() expects the following conventions to be observed when placing
554  * information in the receive buffer:
555  *
556  * 1. If the protocol requires each message be preceded by the sender's name,
557  *    then a record containing that name must be present before any
558  *    associated data (mbuf's must be of type MT_SONAME).
559  * 2. If the protocol supports the exchange of ``access rights'' (really just
560  *    additional data associated with the message), and there are ``rights''
561  *    to be received, then a record containing this data should be present
562  *    (mbuf's must be of type MT_RIGHTS).
563  * 3. If a name or rights record exists, then it must be followed by a data
564  *    record, perhaps of zero length.
565  *
566  * Before using a new socket structure it is first necessary to reserve
567  * buffer space to the socket, by calling sbreserve().  This should commit
568  * some of the available buffer space in the system buffer pool for the
569  * socket (currently, it does nothing but enforce limits).  The space should
570  * be released by calling sbrelease() when the socket is destroyed.
571  */
572 int
573 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
574 {
575 	struct thread *td = curthread;
576 
577 	SOCK_SENDBUF_LOCK(so);
578 	SOCK_RECVBUF_LOCK(so);
579 	if (sbreserve_locked(so, SO_SND, sndcc, td) == 0)
580 		goto bad;
581 	if (sbreserve_locked(so, SO_RCV, rcvcc, td) == 0)
582 		goto bad2;
583 	if (so->so_rcv.sb_lowat == 0)
584 		so->so_rcv.sb_lowat = 1;
585 	if (so->so_snd.sb_lowat == 0)
586 		so->so_snd.sb_lowat = MCLBYTES;
587 	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
588 		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
589 	SOCK_RECVBUF_UNLOCK(so);
590 	SOCK_SENDBUF_UNLOCK(so);
591 	return (0);
592 bad2:
593 	sbrelease_locked(so, SO_SND);
594 bad:
595 	SOCK_RECVBUF_UNLOCK(so);
596 	SOCK_SENDBUF_UNLOCK(so);
597 	return (ENOBUFS);
598 }
599 
600 static int
601 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
602 {
603 	int error = 0;
604 	u_long tmp_sb_max = sb_max;
605 
606 	error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req);
607 	if (error || !req->newptr)
608 		return (error);
609 	if (tmp_sb_max < MSIZE + MCLBYTES)
610 		return (EINVAL);
611 	sb_max = tmp_sb_max;
612 	sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
613 	return (0);
614 }
615 
616 /*
617  * Allot mbufs to a sockbuf.  Attempt to scale mbmax so that mbcnt doesn't
618  * become limiting if buffering efficiency is near the normal case.
619  */
620 bool
621 sbreserve_locked(struct socket *so, sb_which which, u_long cc,
622     struct thread *td)
623 {
624 	struct sockbuf *sb = sobuf(so, which);
625 	rlim_t sbsize_limit;
626 
627 	SOCK_BUF_LOCK_ASSERT(so, which);
628 
629 	/*
630 	 * When a thread is passed, we take into account the thread's socket
631 	 * buffer size limit.  The caller will generally pass curthread, but
632 	 * in the TCP input path, NULL will be passed to indicate that no
633 	 * appropriate thread resource limits are available.  In that case,
634 	 * we don't apply a process limit.
635 	 */
636 	if (cc > sb_max_adj)
637 		return (false);
638 	if (td != NULL) {
639 		sbsize_limit = lim_cur(td, RLIMIT_SBSIZE);
640 	} else
641 		sbsize_limit = RLIM_INFINITY;
642 	if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
643 	    sbsize_limit))
644 		return (false);
645 	sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
646 	if (sb->sb_lowat > sb->sb_hiwat)
647 		sb->sb_lowat = sb->sb_hiwat;
648 	return (true);
649 }
650 
651 int
652 sbsetopt(struct socket *so, int cmd, u_long cc)
653 {
654 	struct sockbuf *sb;
655 	sb_which wh;
656 	short *flags;
657 	u_int *hiwat, *lowat;
658 	int error;
659 
660 	sb = NULL;
661 	SOCK_LOCK(so);
662 	if (SOLISTENING(so)) {
663 		switch (cmd) {
664 			case SO_SNDLOWAT:
665 			case SO_SNDBUF:
666 				lowat = &so->sol_sbsnd_lowat;
667 				hiwat = &so->sol_sbsnd_hiwat;
668 				flags = &so->sol_sbsnd_flags;
669 				break;
670 			case SO_RCVLOWAT:
671 			case SO_RCVBUF:
672 				lowat = &so->sol_sbrcv_lowat;
673 				hiwat = &so->sol_sbrcv_hiwat;
674 				flags = &so->sol_sbrcv_flags;
675 				break;
676 		}
677 	} else {
678 		switch (cmd) {
679 			case SO_SNDLOWAT:
680 			case SO_SNDBUF:
681 				sb = &so->so_snd;
682 				wh = SO_SND;
683 				break;
684 			case SO_RCVLOWAT:
685 			case SO_RCVBUF:
686 				sb = &so->so_rcv;
687 				wh = SO_RCV;
688 				break;
689 		}
690 		flags = &sb->sb_flags;
691 		hiwat = &sb->sb_hiwat;
692 		lowat = &sb->sb_lowat;
693 		SOCK_BUF_LOCK(so, wh);
694 	}
695 
696 	error = 0;
697 	switch (cmd) {
698 	case SO_SNDBUF:
699 	case SO_RCVBUF:
700 		if (SOLISTENING(so)) {
701 			if (cc > sb_max_adj) {
702 				error = ENOBUFS;
703 				break;
704 			}
705 			*hiwat = cc;
706 			if (*lowat > *hiwat)
707 				*lowat = *hiwat;
708 		} else {
709 			if (!sbreserve_locked(so, wh, cc, curthread))
710 				error = ENOBUFS;
711 		}
712 		if (error == 0)
713 			*flags &= ~SB_AUTOSIZE;
714 		break;
715 	case SO_SNDLOWAT:
716 	case SO_RCVLOWAT:
717 		/*
718 		 * Make sure the low-water is never greater than the
719 		 * high-water.
720 		 */
721 		*lowat = (cc > *hiwat) ? *hiwat : cc;
722 		break;
723 	}
724 
725 	if (!SOLISTENING(so))
726 		SOCK_BUF_UNLOCK(so, wh);
727 	SOCK_UNLOCK(so);
728 	return (error);
729 }
730 
731 /*
732  * Free mbufs held by a socket, and reserved mbuf space.
733  */
734 static void
735 sbrelease_internal(struct socket *so, sb_which which)
736 {
737 	struct sockbuf *sb = sobuf(so, which);
738 
739 	sbflush_internal(sb);
740 	(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
741 	    RLIM_INFINITY);
742 	sb->sb_mbmax = 0;
743 }
744 
745 void
746 sbrelease_locked(struct socket *so, sb_which which)
747 {
748 
749 	SOCK_BUF_LOCK_ASSERT(so, which);
750 
751 	sbrelease_internal(so, which);
752 }
753 
754 void
755 sbrelease(struct socket *so, sb_which which)
756 {
757 
758 	SOCK_BUF_LOCK(so, which);
759 	sbrelease_locked(so, which);
760 	SOCK_BUF_UNLOCK(so, which);
761 }
762 
763 void
764 sbdestroy(struct socket *so, sb_which which)
765 {
766 #ifdef KERN_TLS
767 	struct sockbuf *sb = sobuf(so, which);
768 
769 	if (sb->sb_tls_info != NULL)
770 		ktls_free(sb->sb_tls_info);
771 	sb->sb_tls_info = NULL;
772 #endif
773 	sbrelease_internal(so, which);
774 }
775 
776 /*
777  * Routines to add and remove data from an mbuf queue.
778  *
779  * The routines sbappend() or sbappendrecord() are normally called to append
780  * new mbufs to a socket buffer, after checking that adequate space is
781  * available, comparing the function sbspace() with the amount of data to be
782  * added.  sbappendrecord() differs from sbappend() in that data supplied is
783  * treated as the beginning of a new record.  To place a sender's address,
784  * optional access rights, and data in a socket receive buffer,
785  * sbappendaddr() should be used.  To place access rights and data in a
786  * socket receive buffer, sbappendrights() should be used.  In either case,
787  * the new data begins a new record.  Note that unlike sbappend() and
788  * sbappendrecord(), these routines check for the caller that there will be
789  * enough space to store the data.  Each fails if there is not enough space,
790  * or if it cannot find mbufs to store additional information in.
791  *
792  * Reliable protocols may use the socket send buffer to hold data awaiting
793  * acknowledgement.  Data is normally copied from a socket send buffer in a
794  * protocol with m_copy for output to a peer, and then removing the data from
795  * the socket buffer with sbdrop() or sbdroprecord() when the data is
796  * acknowledged by the peer.
797  */
798 #ifdef SOCKBUF_DEBUG
799 void
800 sblastrecordchk(struct sockbuf *sb, const char *file, int line)
801 {
802 	struct mbuf *m = sb->sb_mb;
803 
804 	SOCKBUF_LOCK_ASSERT(sb);
805 
806 	while (m && m->m_nextpkt)
807 		m = m->m_nextpkt;
808 
809 	if (m != sb->sb_lastrecord) {
810 		printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
811 			__func__, sb->sb_mb, sb->sb_lastrecord, m);
812 		printf("packet chain:\n");
813 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
814 			printf("\t%p\n", m);
815 		panic("%s from %s:%u", __func__, file, line);
816 	}
817 }
818 
819 void
820 sblastmbufchk(struct sockbuf *sb, const char *file, int line)
821 {
822 	struct mbuf *m = sb->sb_mb;
823 	struct mbuf *n;
824 
825 	SOCKBUF_LOCK_ASSERT(sb);
826 
827 	while (m && m->m_nextpkt)
828 		m = m->m_nextpkt;
829 
830 	while (m && m->m_next)
831 		m = m->m_next;
832 
833 	if (m != sb->sb_mbtail) {
834 		printf("%s: sb_mb %p sb_mbtail %p last %p\n",
835 			__func__, sb->sb_mb, sb->sb_mbtail, m);
836 		printf("packet tree:\n");
837 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
838 			printf("\t");
839 			for (n = m; n != NULL; n = n->m_next)
840 				printf("%p ", n);
841 			printf("\n");
842 		}
843 		panic("%s from %s:%u", __func__, file, line);
844 	}
845 
846 #ifdef KERN_TLS
847 	m = sb->sb_mtls;
848 	while (m && m->m_next)
849 		m = m->m_next;
850 
851 	if (m != sb->sb_mtlstail) {
852 		printf("%s: sb_mtls %p sb_mtlstail %p last %p\n",
853 			__func__, sb->sb_mtls, sb->sb_mtlstail, m);
854 		printf("TLS packet tree:\n");
855 		printf("\t");
856 		for (m = sb->sb_mtls; m != NULL; m = m->m_next) {
857 			printf("%p ", m);
858 		}
859 		printf("\n");
860 		panic("%s from %s:%u", __func__, file, line);
861 	}
862 #endif
863 }
864 #endif /* SOCKBUF_DEBUG */
865 
866 #define SBLINKRECORD(sb, m0) do {					\
867 	SOCKBUF_LOCK_ASSERT(sb);					\
868 	if ((sb)->sb_lastrecord != NULL)				\
869 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
870 	else								\
871 		(sb)->sb_mb = (m0);					\
872 	(sb)->sb_lastrecord = (m0);					\
873 } while (/*CONSTCOND*/0)
874 
875 /*
876  * Append mbuf chain m to the last record in the socket buffer sb.  The
877  * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
878  * are discarded and mbufs are compacted where possible.
879  */
880 void
881 sbappend_locked(struct sockbuf *sb, struct mbuf *m, int flags)
882 {
883 	struct mbuf *n;
884 
885 	SOCKBUF_LOCK_ASSERT(sb);
886 
887 	if (m == NULL)
888 		return;
889 	sbm_clrprotoflags(m, flags);
890 	SBLASTRECORDCHK(sb);
891 	n = sb->sb_mb;
892 	if (n) {
893 		while (n->m_nextpkt)
894 			n = n->m_nextpkt;
895 		do {
896 			if (n->m_flags & M_EOR) {
897 				sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
898 				return;
899 			}
900 		} while (n->m_next && (n = n->m_next));
901 	} else {
902 		/*
903 		 * XXX Would like to simply use sb_mbtail here, but
904 		 * XXX I need to verify that I won't miss an EOR that
905 		 * XXX way.
906 		 */
907 		if ((n = sb->sb_lastrecord) != NULL) {
908 			do {
909 				if (n->m_flags & M_EOR) {
910 					sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
911 					return;
912 				}
913 			} while (n->m_next && (n = n->m_next));
914 		} else {
915 			/*
916 			 * If this is the first record in the socket buffer,
917 			 * it's also the last record.
918 			 */
919 			sb->sb_lastrecord = m;
920 		}
921 	}
922 	sbcompress(sb, m, n);
923 	SBLASTRECORDCHK(sb);
924 }
925 
926 /*
927  * Append mbuf chain m to the last record in the socket buffer sb.  The
928  * additional space associated the mbuf chain is recorded in sb.  Empty mbufs
929  * are discarded and mbufs are compacted where possible.
930  */
931 void
932 sbappend(struct sockbuf *sb, struct mbuf *m, int flags)
933 {
934 
935 	SOCKBUF_LOCK(sb);
936 	sbappend_locked(sb, m, flags);
937 	SOCKBUF_UNLOCK(sb);
938 }
939 
940 #ifdef KERN_TLS
941 /*
942  * Append an mbuf containing encrypted TLS data.  The data
943  * is marked M_NOTREADY until it has been decrypted and
944  * stored as a TLS record.
945  */
946 static void
947 sbappend_ktls_rx(struct sockbuf *sb, struct mbuf *m)
948 {
949 	struct mbuf *n;
950 
951 	SBLASTMBUFCHK(sb);
952 
953 	/* Remove all packet headers and mbuf tags to get a pure data chain. */
954 	m_demote(m, 1, 0);
955 
956 	for (n = m; n != NULL; n = n->m_next)
957 		n->m_flags |= M_NOTREADY;
958 	sbcompress_ktls_rx(sb, m, sb->sb_mtlstail);
959 	ktls_check_rx(sb);
960 }
961 #endif
962 
963 /*
964  * This version of sbappend() should only be used when the caller absolutely
965  * knows that there will never be more than one record in the socket buffer,
966  * that is, a stream protocol (such as TCP).
967  */
968 void
969 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags)
970 {
971 	SOCKBUF_LOCK_ASSERT(sb);
972 
973 	KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
974 
975 #ifdef KERN_TLS
976 	/*
977 	 * Decrypted TLS records are appended as records via
978 	 * sbappendrecord().  TCP passes encrypted TLS records to this
979 	 * function which must be scheduled for decryption.
980 	 */
981 	if (sb->sb_flags & SB_TLS_RX) {
982 		sbappend_ktls_rx(sb, m);
983 		return;
984 	}
985 #endif
986 
987 	KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
988 
989 	SBLASTMBUFCHK(sb);
990 
991 #ifdef KERN_TLS
992 	if (sb->sb_tls_info != NULL)
993 		ktls_seq(sb, m);
994 #endif
995 
996 	/* Remove all packet headers and mbuf tags to get a pure data chain. */
997 	m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0);
998 
999 	sbcompress(sb, m, sb->sb_mbtail);
1000 
1001 	sb->sb_lastrecord = sb->sb_mb;
1002 	SBLASTRECORDCHK(sb);
1003 }
1004 
1005 /*
1006  * This version of sbappend() should only be used when the caller absolutely
1007  * knows that there will never be more than one record in the socket buffer,
1008  * that is, a stream protocol (such as TCP).
1009  */
1010 void
1011 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags)
1012 {
1013 
1014 	SOCKBUF_LOCK(sb);
1015 	sbappendstream_locked(sb, m, flags);
1016 	SOCKBUF_UNLOCK(sb);
1017 }
1018 
1019 #ifdef SOCKBUF_DEBUG
1020 void
1021 sbcheck(struct sockbuf *sb, const char *file, int line)
1022 {
1023 	struct mbuf *m, *n, *fnrdy;
1024 	u_long acc, ccc, mbcnt;
1025 #ifdef KERN_TLS
1026 	u_long tlscc;
1027 #endif
1028 
1029 	SOCKBUF_LOCK_ASSERT(sb);
1030 
1031 	acc = ccc = mbcnt = 0;
1032 	fnrdy = NULL;
1033 
1034 	for (m = sb->sb_mb; m; m = n) {
1035 	    n = m->m_nextpkt;
1036 	    for (; m; m = m->m_next) {
1037 		if (m->m_len == 0) {
1038 			printf("sb %p empty mbuf %p\n", sb, m);
1039 			goto fail;
1040 		}
1041 		if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) {
1042 			if (m != sb->sb_fnrdy) {
1043 				printf("sb %p: fnrdy %p != m %p\n",
1044 				    sb, sb->sb_fnrdy, m);
1045 				goto fail;
1046 			}
1047 			fnrdy = m;
1048 		}
1049 		if (fnrdy) {
1050 			if (!(m->m_flags & M_NOTAVAIL)) {
1051 				printf("sb %p: fnrdy %p, m %p is avail\n",
1052 				    sb, sb->sb_fnrdy, m);
1053 				goto fail;
1054 			}
1055 		} else
1056 			acc += m->m_len;
1057 		ccc += m->m_len;
1058 		mbcnt += MSIZE;
1059 		if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
1060 			mbcnt += m->m_ext.ext_size;
1061 	    }
1062 	}
1063 #ifdef KERN_TLS
1064 	/*
1065 	 * Account for mbufs "detached" by ktls_detach_record() while
1066 	 * they are decrypted by ktls_decrypt().  tlsdcc gives a count
1067 	 * of the detached bytes that are included in ccc.  The mbufs
1068 	 * and clusters are not included in the socket buffer
1069 	 * accounting.
1070 	 */
1071 	ccc += sb->sb_tlsdcc;
1072 
1073 	tlscc = 0;
1074 	for (m = sb->sb_mtls; m; m = m->m_next) {
1075 		if (m->m_nextpkt != NULL) {
1076 			printf("sb %p TLS mbuf %p with nextpkt\n", sb, m);
1077 			goto fail;
1078 		}
1079 		if ((m->m_flags & M_NOTREADY) == 0) {
1080 			printf("sb %p TLS mbuf %p ready\n", sb, m);
1081 			goto fail;
1082 		}
1083 		tlscc += m->m_len;
1084 		ccc += m->m_len;
1085 		mbcnt += MSIZE;
1086 		if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
1087 			mbcnt += m->m_ext.ext_size;
1088 	}
1089 
1090 	if (sb->sb_tlscc != tlscc) {
1091 		printf("tlscc %ld/%u dcc %u\n", tlscc, sb->sb_tlscc,
1092 		    sb->sb_tlsdcc);
1093 		goto fail;
1094 	}
1095 #endif
1096 	if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) {
1097 		printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n",
1098 		    acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt);
1099 #ifdef KERN_TLS
1100 		printf("tlscc %ld/%u dcc %u\n", tlscc, sb->sb_tlscc,
1101 		    sb->sb_tlsdcc);
1102 #endif
1103 		goto fail;
1104 	}
1105 	return;
1106 fail:
1107 	panic("%s from %s:%u", __func__, file, line);
1108 }
1109 #endif
1110 
1111 /*
1112  * As above, except the mbuf chain begins a new record.
1113  */
1114 void
1115 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0)
1116 {
1117 	struct mbuf *m;
1118 
1119 	SOCKBUF_LOCK_ASSERT(sb);
1120 
1121 	if (m0 == NULL)
1122 		return;
1123 	m_clrprotoflags(m0);
1124 	/*
1125 	 * Put the first mbuf on the queue.  Note this permits zero length
1126 	 * records.
1127 	 */
1128 	sballoc(sb, m0);
1129 	SBLASTRECORDCHK(sb);
1130 	SBLINKRECORD(sb, m0);
1131 	sb->sb_mbtail = m0;
1132 	m = m0->m_next;
1133 	m0->m_next = 0;
1134 	if (m && (m0->m_flags & M_EOR)) {
1135 		m0->m_flags &= ~M_EOR;
1136 		m->m_flags |= M_EOR;
1137 	}
1138 	/* always call sbcompress() so it can do SBLASTMBUFCHK() */
1139 	sbcompress(sb, m, m0);
1140 }
1141 
1142 /*
1143  * As above, except the mbuf chain begins a new record.
1144  */
1145 void
1146 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
1147 {
1148 
1149 	SOCKBUF_LOCK(sb);
1150 	sbappendrecord_locked(sb, m0);
1151 	SOCKBUF_UNLOCK(sb);
1152 }
1153 
1154 /* Helper routine that appends data, control, and address to a sockbuf. */
1155 static int
1156 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa,
1157     struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last)
1158 {
1159 	struct mbuf *m, *n, *nlast;
1160 #if MSIZE <= 256
1161 	if (asa->sa_len > MLEN)
1162 		return (0);
1163 #endif
1164 	m = m_get(M_NOWAIT, MT_SONAME);
1165 	if (m == NULL)
1166 		return (0);
1167 	m->m_len = asa->sa_len;
1168 	bcopy(asa, mtod(m, caddr_t), asa->sa_len);
1169 	if (m0) {
1170 		M_ASSERT_NO_SND_TAG(m0);
1171 		m_clrprotoflags(m0);
1172 		m_tag_delete_chain(m0, NULL);
1173 		/*
1174 		 * Clear some persistent info from pkthdr.
1175 		 * We don't use m_demote(), because some netgraph consumers
1176 		 * expect M_PKTHDR presence.
1177 		 */
1178 		m0->m_pkthdr.rcvif = NULL;
1179 		m0->m_pkthdr.flowid = 0;
1180 		m0->m_pkthdr.csum_flags = 0;
1181 		m0->m_pkthdr.fibnum = 0;
1182 		m0->m_pkthdr.rsstype = 0;
1183 	}
1184 	if (ctrl_last)
1185 		ctrl_last->m_next = m0;	/* concatenate data to control */
1186 	else
1187 		control = m0;
1188 	m->m_next = control;
1189 	for (n = m; n->m_next != NULL; n = n->m_next)
1190 		sballoc(sb, n);
1191 	sballoc(sb, n);
1192 	nlast = n;
1193 	SBLINKRECORD(sb, m);
1194 
1195 	sb->sb_mbtail = nlast;
1196 	SBLASTMBUFCHK(sb);
1197 
1198 	SBLASTRECORDCHK(sb);
1199 	return (1);
1200 }
1201 
1202 /*
1203  * Append address and data, and optionally, control (ancillary) data to the
1204  * receive queue of a socket.  If present, m0 must include a packet header
1205  * with total length.  Returns 0 if no space in sockbuf or insufficient
1206  * mbufs.
1207  */
1208 int
1209 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
1210     struct mbuf *m0, struct mbuf *control)
1211 {
1212 	struct mbuf *ctrl_last;
1213 	int space = asa->sa_len;
1214 
1215 	SOCKBUF_LOCK_ASSERT(sb);
1216 
1217 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
1218 		panic("sbappendaddr_locked");
1219 	if (m0)
1220 		space += m0->m_pkthdr.len;
1221 	space += m_length(control, &ctrl_last);
1222 
1223 	if (space > sbspace(sb))
1224 		return (0);
1225 	return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
1226 }
1227 
1228 /*
1229  * Append address and data, and optionally, control (ancillary) data to the
1230  * receive queue of a socket.  If present, m0 must include a packet header
1231  * with total length.  Returns 0 if insufficient mbufs.  Does not validate space
1232  * on the receiving sockbuf.
1233  */
1234 int
1235 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa,
1236     struct mbuf *m0, struct mbuf *control)
1237 {
1238 	struct mbuf *ctrl_last;
1239 
1240 	SOCKBUF_LOCK_ASSERT(sb);
1241 
1242 	ctrl_last = (control == NULL) ? NULL : m_last(control);
1243 	return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
1244 }
1245 
1246 /*
1247  * Append address and data, and optionally, control (ancillary) data to the
1248  * receive queue of a socket.  If present, m0 must include a packet header
1249  * with total length.  Returns 0 if no space in sockbuf or insufficient
1250  * mbufs.
1251  */
1252 int
1253 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
1254     struct mbuf *m0, struct mbuf *control)
1255 {
1256 	int retval;
1257 
1258 	SOCKBUF_LOCK(sb);
1259 	retval = sbappendaddr_locked(sb, asa, m0, control);
1260 	SOCKBUF_UNLOCK(sb);
1261 	return (retval);
1262 }
1263 
1264 void
1265 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
1266     struct mbuf *control, int flags)
1267 {
1268 	struct mbuf *m, *mlast;
1269 
1270 	sbm_clrprotoflags(m0, flags);
1271 	m_last(control)->m_next = m0;
1272 
1273 	SBLASTRECORDCHK(sb);
1274 
1275 	for (m = control; m->m_next; m = m->m_next)
1276 		sballoc(sb, m);
1277 	sballoc(sb, m);
1278 	mlast = m;
1279 	SBLINKRECORD(sb, control);
1280 
1281 	sb->sb_mbtail = mlast;
1282 	SBLASTMBUFCHK(sb);
1283 
1284 	SBLASTRECORDCHK(sb);
1285 }
1286 
1287 void
1288 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control,
1289     int flags)
1290 {
1291 
1292 	SOCKBUF_LOCK(sb);
1293 	sbappendcontrol_locked(sb, m0, control, flags);
1294 	SOCKBUF_UNLOCK(sb);
1295 }
1296 
1297 /*
1298  * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
1299  * (n).  If (n) is NULL, the buffer is presumed empty.
1300  *
1301  * When the data is compressed, mbufs in the chain may be handled in one of
1302  * three ways:
1303  *
1304  * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
1305  *     record boundary, and no change in data type).
1306  *
1307  * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
1308  *     an mbuf already in the socket buffer.  This can occur if an
1309  *     appropriate mbuf exists, there is room, both mbufs are not marked as
1310  *     not ready, and no merging of data types will occur.
1311  *
1312  * (3) The mbuf may be appended to the end of the existing mbuf chain.
1313  *
1314  * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
1315  * end-of-record.
1316  */
1317 void
1318 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
1319 {
1320 	int eor = 0;
1321 	struct mbuf *o;
1322 
1323 	SOCKBUF_LOCK_ASSERT(sb);
1324 
1325 	while (m) {
1326 		eor |= m->m_flags & M_EOR;
1327 		if (m->m_len == 0 &&
1328 		    (eor == 0 ||
1329 		     (((o = m->m_next) || (o = n)) &&
1330 		      o->m_type == m->m_type))) {
1331 			if (sb->sb_lastrecord == m)
1332 				sb->sb_lastrecord = m->m_next;
1333 			m = m_free(m);
1334 			continue;
1335 		}
1336 		if (n && (n->m_flags & M_EOR) == 0 &&
1337 		    M_WRITABLE(n) &&
1338 		    ((sb->sb_flags & SB_NOCOALESCE) == 0) &&
1339 		    !(m->m_flags & M_NOTREADY) &&
1340 		    !(n->m_flags & (M_NOTREADY | M_EXTPG)) &&
1341 		    !mbuf_has_tls_session(m) &&
1342 		    !mbuf_has_tls_session(n) &&
1343 		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1344 		    m->m_len <= M_TRAILINGSPACE(n) &&
1345 		    n->m_type == m->m_type) {
1346 			m_copydata(m, 0, m->m_len, mtodo(n, n->m_len));
1347 			n->m_len += m->m_len;
1348 			sb->sb_ccc += m->m_len;
1349 			if (sb->sb_fnrdy == NULL)
1350 				sb->sb_acc += m->m_len;
1351 			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
1352 				/* XXX: Probably don't need.*/
1353 				sb->sb_ctl += m->m_len;
1354 			m = m_free(m);
1355 			continue;
1356 		}
1357 		if (m->m_len <= MLEN && (m->m_flags & M_EXTPG) &&
1358 		    (m->m_flags & M_NOTREADY) == 0 &&
1359 		    !mbuf_has_tls_session(m))
1360 			(void)mb_unmapped_compress(m);
1361 		if (n)
1362 			n->m_next = m;
1363 		else
1364 			sb->sb_mb = m;
1365 		sb->sb_mbtail = m;
1366 		sballoc(sb, m);
1367 		n = m;
1368 		m->m_flags &= ~M_EOR;
1369 		m = m->m_next;
1370 		n->m_next = 0;
1371 	}
1372 	if (eor) {
1373 		KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
1374 		n->m_flags |= eor;
1375 	}
1376 	SBLASTMBUFCHK(sb);
1377 }
1378 
1379 #ifdef KERN_TLS
1380 /*
1381  * A version of sbcompress() for encrypted TLS RX mbufs.  These mbufs
1382  * are appended to the 'sb_mtls' chain instead of 'sb_mb' and are also
1383  * a bit simpler (no EOR markers, always MT_DATA, etc.).
1384  */
1385 static void
1386 sbcompress_ktls_rx(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
1387 {
1388 
1389 	SOCKBUF_LOCK_ASSERT(sb);
1390 
1391 	while (m) {
1392 		KASSERT((m->m_flags & M_EOR) == 0,
1393 		    ("TLS RX mbuf %p with EOR", m));
1394 		KASSERT(m->m_type == MT_DATA,
1395 		    ("TLS RX mbuf %p is not MT_DATA", m));
1396 		KASSERT((m->m_flags & M_NOTREADY) != 0,
1397 		    ("TLS RX mbuf %p ready", m));
1398 		KASSERT((m->m_flags & M_EXTPG) == 0,
1399 		    ("TLS RX mbuf %p unmapped", m));
1400 
1401 		if (m->m_len == 0) {
1402 			m = m_free(m);
1403 			continue;
1404 		}
1405 
1406 		/*
1407 		 * Even though both 'n' and 'm' are NOTREADY, it's ok
1408 		 * to coalesce the data.
1409 		 */
1410 		if (n &&
1411 		    M_WRITABLE(n) &&
1412 		    ((sb->sb_flags & SB_NOCOALESCE) == 0) &&
1413 		    !(n->m_flags & (M_EXTPG)) &&
1414 		    m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1415 		    m->m_len <= M_TRAILINGSPACE(n)) {
1416 			m_copydata(m, 0, m->m_len, mtodo(n, n->m_len));
1417 			n->m_len += m->m_len;
1418 			sb->sb_ccc += m->m_len;
1419 			sb->sb_tlscc += m->m_len;
1420 			m = m_free(m);
1421 			continue;
1422 		}
1423 		if (n)
1424 			n->m_next = m;
1425 		else
1426 			sb->sb_mtls = m;
1427 		sb->sb_mtlstail = m;
1428 		sballoc_ktls_rx(sb, m);
1429 		n = m;
1430 		m = m->m_next;
1431 		n->m_next = NULL;
1432 	}
1433 	SBLASTMBUFCHK(sb);
1434 }
1435 #endif
1436 
1437 /*
1438  * Free all mbufs in a sockbuf.  Check that all resources are reclaimed.
1439  */
1440 static void
1441 sbflush_internal(struct sockbuf *sb)
1442 {
1443 
1444 	while (sb->sb_mbcnt || sb->sb_tlsdcc) {
1445 		/*
1446 		 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty:
1447 		 * we would loop forever. Panic instead.
1448 		 */
1449 		if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len))
1450 			break;
1451 		m_freem(sbcut_internal(sb, (int)sb->sb_ccc));
1452 	}
1453 	KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
1454 	    ("%s: ccc %u mb %p mbcnt %u", __func__,
1455 	    sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
1456 }
1457 
1458 void
1459 sbflush_locked(struct sockbuf *sb)
1460 {
1461 
1462 	SOCKBUF_LOCK_ASSERT(sb);
1463 	sbflush_internal(sb);
1464 }
1465 
1466 void
1467 sbflush(struct sockbuf *sb)
1468 {
1469 
1470 	SOCKBUF_LOCK(sb);
1471 	sbflush_locked(sb);
1472 	SOCKBUF_UNLOCK(sb);
1473 }
1474 
1475 /*
1476  * Cut data from (the front of) a sockbuf.
1477  */
1478 static struct mbuf *
1479 sbcut_internal(struct sockbuf *sb, int len)
1480 {
1481 	struct mbuf *m, *next, *mfree;
1482 	bool is_tls;
1483 
1484 	KASSERT(len >= 0, ("%s: len is %d but it is supposed to be >= 0",
1485 	    __func__, len));
1486 	KASSERT(len <= sb->sb_ccc, ("%s: len: %d is > ccc: %u",
1487 	    __func__, len, sb->sb_ccc));
1488 
1489 	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1490 	is_tls = false;
1491 	mfree = NULL;
1492 
1493 	while (len > 0) {
1494 		if (m == NULL) {
1495 #ifdef KERN_TLS
1496 			if (next == NULL && !is_tls) {
1497 				if (sb->sb_tlsdcc != 0) {
1498 					MPASS(len >= sb->sb_tlsdcc);
1499 					len -= sb->sb_tlsdcc;
1500 					sb->sb_ccc -= sb->sb_tlsdcc;
1501 					sb->sb_tlsdcc = 0;
1502 					if (len == 0)
1503 						break;
1504 				}
1505 				next = sb->sb_mtls;
1506 				is_tls = true;
1507 			}
1508 #endif
1509 			KASSERT(next, ("%s: no next, len %d", __func__, len));
1510 			m = next;
1511 			next = m->m_nextpkt;
1512 		}
1513 		if (m->m_len > len) {
1514 			KASSERT(!(m->m_flags & M_NOTAVAIL),
1515 			    ("%s: m %p M_NOTAVAIL", __func__, m));
1516 			m->m_len -= len;
1517 			m->m_data += len;
1518 			sb->sb_ccc -= len;
1519 			sb->sb_acc -= len;
1520 			if (sb->sb_sndptroff != 0)
1521 				sb->sb_sndptroff -= len;
1522 			if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
1523 				sb->sb_ctl -= len;
1524 			break;
1525 		}
1526 		len -= m->m_len;
1527 #ifdef KERN_TLS
1528 		if (is_tls)
1529 			sbfree_ktls_rx(sb, m);
1530 		else
1531 #endif
1532 			sbfree(sb, m);
1533 		/*
1534 		 * Do not put M_NOTREADY buffers to the free list, they
1535 		 * are referenced from outside.
1536 		 */
1537 		if (m->m_flags & M_NOTREADY && !is_tls)
1538 			m = m->m_next;
1539 		else {
1540 			struct mbuf *n;
1541 
1542 			n = m->m_next;
1543 			m->m_next = mfree;
1544 			mfree = m;
1545 			m = n;
1546 		}
1547 	}
1548 	/*
1549 	 * Free any zero-length mbufs from the buffer.
1550 	 * For SOCK_DGRAM sockets such mbufs represent empty records.
1551 	 * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer,
1552 	 * when sosend_generic() needs to send only control data.
1553 	 */
1554 	while (m && m->m_len == 0) {
1555 		struct mbuf *n;
1556 
1557 		sbfree(sb, m);
1558 		n = m->m_next;
1559 		m->m_next = mfree;
1560 		mfree = m;
1561 		m = n;
1562 	}
1563 #ifdef KERN_TLS
1564 	if (is_tls) {
1565 		sb->sb_mb = NULL;
1566 		sb->sb_mtls = m;
1567 		if (m == NULL)
1568 			sb->sb_mtlstail = NULL;
1569 	} else
1570 #endif
1571 	if (m) {
1572 		sb->sb_mb = m;
1573 		m->m_nextpkt = next;
1574 	} else
1575 		sb->sb_mb = next;
1576 	/*
1577 	 * First part is an inline SB_EMPTY_FIXUP().  Second part makes sure
1578 	 * sb_lastrecord is up-to-date if we dropped part of the last record.
1579 	 */
1580 	m = sb->sb_mb;
1581 	if (m == NULL) {
1582 		sb->sb_mbtail = NULL;
1583 		sb->sb_lastrecord = NULL;
1584 	} else if (m->m_nextpkt == NULL) {
1585 		sb->sb_lastrecord = m;
1586 	}
1587 
1588 	return (mfree);
1589 }
1590 
1591 /*
1592  * Drop data from (the front of) a sockbuf.
1593  */
1594 void
1595 sbdrop_locked(struct sockbuf *sb, int len)
1596 {
1597 
1598 	SOCKBUF_LOCK_ASSERT(sb);
1599 	m_freem(sbcut_internal(sb, len));
1600 }
1601 
1602 /*
1603  * Drop data from (the front of) a sockbuf,
1604  * and return it to caller.
1605  */
1606 struct mbuf *
1607 sbcut_locked(struct sockbuf *sb, int len)
1608 {
1609 
1610 	SOCKBUF_LOCK_ASSERT(sb);
1611 	return (sbcut_internal(sb, len));
1612 }
1613 
1614 void
1615 sbdrop(struct sockbuf *sb, int len)
1616 {
1617 	struct mbuf *mfree;
1618 
1619 	SOCKBUF_LOCK(sb);
1620 	mfree = sbcut_internal(sb, len);
1621 	SOCKBUF_UNLOCK(sb);
1622 
1623 	m_freem(mfree);
1624 }
1625 
1626 struct mbuf *
1627 sbsndptr_noadv(struct sockbuf *sb, uint32_t off, uint32_t *moff)
1628 {
1629 	struct mbuf *m;
1630 
1631 	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
1632 	if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) {
1633 		*moff = off;
1634 		if (sb->sb_sndptr == NULL) {
1635 			sb->sb_sndptr = sb->sb_mb;
1636 			sb->sb_sndptroff = 0;
1637 		}
1638 		return (sb->sb_mb);
1639 	} else {
1640 		m = sb->sb_sndptr;
1641 		off -= sb->sb_sndptroff;
1642 	}
1643 	*moff = off;
1644 	return (m);
1645 }
1646 
1647 void
1648 sbsndptr_adv(struct sockbuf *sb, struct mbuf *mb, uint32_t len)
1649 {
1650 	/*
1651 	 * A small copy was done, advance forward the sb_sbsndptr to cover
1652 	 * it.
1653 	 */
1654 	struct mbuf *m;
1655 
1656 	if (mb != sb->sb_sndptr) {
1657 		/* Did not copyout at the same mbuf */
1658 		return;
1659 	}
1660 	m = mb;
1661 	while (m && (len > 0)) {
1662 		if (len >= m->m_len) {
1663 			len -= m->m_len;
1664 			if (m->m_next) {
1665 				sb->sb_sndptroff += m->m_len;
1666 				sb->sb_sndptr = m->m_next;
1667 			}
1668 			m = m->m_next;
1669 		} else {
1670 			len = 0;
1671 		}
1672 	}
1673 }
1674 
1675 /*
1676  * Return the first mbuf and the mbuf data offset for the provided
1677  * send offset without changing the "sb_sndptroff" field.
1678  */
1679 struct mbuf *
1680 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff)
1681 {
1682 	struct mbuf *m;
1683 
1684 	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
1685 
1686 	/*
1687 	 * If the "off" is below the stored offset, which happens on
1688 	 * retransmits, just use "sb_mb":
1689 	 */
1690 	if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) {
1691 		m = sb->sb_mb;
1692 	} else {
1693 		m = sb->sb_sndptr;
1694 		off -= sb->sb_sndptroff;
1695 	}
1696 	while (off > 0 && m != NULL) {
1697 		if (off < m->m_len)
1698 			break;
1699 		off -= m->m_len;
1700 		m = m->m_next;
1701 	}
1702 	*moff = off;
1703 	return (m);
1704 }
1705 
1706 /*
1707  * Drop a record off the front of a sockbuf and move the next record to the
1708  * front.
1709  */
1710 void
1711 sbdroprecord_locked(struct sockbuf *sb)
1712 {
1713 	struct mbuf *m;
1714 
1715 	SOCKBUF_LOCK_ASSERT(sb);
1716 
1717 	m = sb->sb_mb;
1718 	if (m) {
1719 		sb->sb_mb = m->m_nextpkt;
1720 		do {
1721 			sbfree(sb, m);
1722 			m = m_free(m);
1723 		} while (m);
1724 	}
1725 	SB_EMPTY_FIXUP(sb);
1726 }
1727 
1728 /*
1729  * Drop a record off the front of a sockbuf and move the next record to the
1730  * front.
1731  */
1732 void
1733 sbdroprecord(struct sockbuf *sb)
1734 {
1735 
1736 	SOCKBUF_LOCK(sb);
1737 	sbdroprecord_locked(sb);
1738 	SOCKBUF_UNLOCK(sb);
1739 }
1740 
1741 /*
1742  * Create a "control" mbuf containing the specified data with the specified
1743  * type for presentation on a socket buffer.
1744  */
1745 struct mbuf *
1746 sbcreatecontrol(const void *p, u_int size, int type, int level, int wait)
1747 {
1748 	struct cmsghdr *cp;
1749 	struct mbuf *m;
1750 
1751 	MBUF_CHECKSLEEP(wait);
1752 
1753 	if (wait == M_NOWAIT) {
1754 		if (CMSG_SPACE(size) > MCLBYTES)
1755 			return (NULL);
1756 	} else
1757 		KASSERT(CMSG_SPACE(size) <= MCLBYTES,
1758 		    ("%s: passed CMSG_SPACE(%u) > MCLBYTES", __func__, size));
1759 
1760 	if (CMSG_SPACE(size) > MLEN)
1761 		m = m_getcl(wait, MT_CONTROL, 0);
1762 	else
1763 		m = m_get(wait, MT_CONTROL);
1764 	if (m == NULL)
1765 		return (NULL);
1766 
1767 	KASSERT(CMSG_SPACE(size) <= M_TRAILINGSPACE(m),
1768 	    ("sbcreatecontrol: short mbuf"));
1769 	/*
1770 	 * Don't leave the padding between the msg header and the
1771 	 * cmsg data and the padding after the cmsg data un-initialized.
1772 	 */
1773 	cp = mtod(m, struct cmsghdr *);
1774 	bzero(cp, CMSG_SPACE(size));
1775 	if (p != NULL)
1776 		(void)memcpy(CMSG_DATA(cp), p, size);
1777 	m->m_len = CMSG_SPACE(size);
1778 	cp->cmsg_len = CMSG_LEN(size);
1779 	cp->cmsg_level = level;
1780 	cp->cmsg_type = type;
1781 	return (m);
1782 }
1783 
1784 /*
1785  * This does the same for socket buffers that sotoxsocket does for sockets:
1786  * generate an user-format data structure describing the socket buffer.  Note
1787  * that the xsockbuf structure, since it is always embedded in a socket, does
1788  * not include a self pointer nor a length.  We make this entry point public
1789  * in case some other mechanism needs it.
1790  */
1791 void
1792 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1793 {
1794 
1795 	xsb->sb_cc = sb->sb_ccc;
1796 	xsb->sb_hiwat = sb->sb_hiwat;
1797 	xsb->sb_mbcnt = sb->sb_mbcnt;
1798 	xsb->sb_mbmax = sb->sb_mbmax;
1799 	xsb->sb_lowat = sb->sb_lowat;
1800 	xsb->sb_flags = sb->sb_flags;
1801 	xsb->sb_timeo = sb->sb_timeo;
1802 }
1803 
1804 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1805 static int dummy;
1806 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW | CTLFLAG_SKIP, &dummy, 0, "");
1807 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf,
1808     CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, &sb_max, 0,
1809     sysctl_handle_sb_max, "LU",
1810     "Maximum socket buffer size");
1811 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1812     &sb_efficiency, 0, "Socket buffer size waste factor");
1813