1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_kern_tls.h" 38 #include "opt_param.h" 39 40 #include <sys/param.h> 41 #include <sys/aio.h> /* for aio_swake proto */ 42 #include <sys/kernel.h> 43 #include <sys/ktls.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/protosw.h> 50 #include <sys/resourcevar.h> 51 #include <sys/signalvar.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/sx.h> 55 #include <sys/sysctl.h> 56 57 /* 58 * Function pointer set by the AIO routines so that the socket buffer code 59 * can call back into the AIO module if it is loaded. 60 */ 61 void (*aio_swake)(struct socket *, struct sockbuf *); 62 63 /* 64 * Primitive routines for operating on socket buffers 65 */ 66 67 u_long sb_max = SB_MAX; 68 u_long sb_max_adj = 69 (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 70 71 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 72 73 #ifdef KERN_TLS 74 static void sbcompress_ktls_rx(struct sockbuf *sb, struct mbuf *m, 75 struct mbuf *n); 76 #endif 77 static struct mbuf *sbcut_internal(struct sockbuf *sb, int len); 78 static void sbflush_internal(struct sockbuf *sb); 79 80 /* 81 * Our own version of m_clrprotoflags(), that can preserve M_NOTREADY. 82 */ 83 static void 84 sbm_clrprotoflags(struct mbuf *m, int flags) 85 { 86 int mask; 87 88 mask = ~M_PROTOFLAGS; 89 if (flags & PRUS_NOTREADY) 90 mask |= M_NOTREADY; 91 while (m) { 92 m->m_flags &= mask; 93 m = m->m_next; 94 } 95 } 96 97 /* 98 * Compress M_NOTREADY mbufs after they have been readied by sbready(). 99 * 100 * sbcompress() skips M_NOTREADY mbufs since the data is not available to 101 * be copied at the time of sbcompress(). This function combines small 102 * mbufs similar to sbcompress() once mbufs are ready. 'm0' is the first 103 * mbuf sbready() marked ready, and 'end' is the first mbuf still not 104 * ready. 105 */ 106 static void 107 sbready_compress(struct sockbuf *sb, struct mbuf *m0, struct mbuf *end) 108 { 109 struct mbuf *m, *n; 110 int ext_size; 111 112 SOCKBUF_LOCK_ASSERT(sb); 113 114 if ((sb->sb_flags & SB_NOCOALESCE) != 0) 115 return; 116 117 for (m = m0; m != end; m = m->m_next) { 118 MPASS((m->m_flags & M_NOTREADY) == 0); 119 /* 120 * NB: In sbcompress(), 'n' is the last mbuf in the 121 * socket buffer and 'm' is the new mbuf being copied 122 * into the trailing space of 'n'. Here, the roles 123 * are reversed and 'n' is the next mbuf after 'm' 124 * that is being copied into the trailing space of 125 * 'm'. 126 */ 127 n = m->m_next; 128 #ifdef KERN_TLS 129 /* Try to coalesce adjacent ktls mbuf hdr/trailers. */ 130 if ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 && 131 (m->m_flags & M_EXTPG) && 132 (n->m_flags & M_EXTPG) && 133 !mbuf_has_tls_session(m) && 134 !mbuf_has_tls_session(n)) { 135 int hdr_len, trail_len; 136 137 hdr_len = n->m_epg_hdrlen; 138 trail_len = m->m_epg_trllen; 139 if (trail_len != 0 && hdr_len != 0 && 140 trail_len + hdr_len <= MBUF_PEXT_TRAIL_LEN) { 141 /* copy n's header to m's trailer */ 142 memcpy(&m->m_epg_trail[trail_len], 143 n->m_epg_hdr, hdr_len); 144 m->m_epg_trllen += hdr_len; 145 m->m_len += hdr_len; 146 n->m_epg_hdrlen = 0; 147 n->m_len -= hdr_len; 148 } 149 } 150 #endif 151 152 /* Compress small unmapped mbufs into plain mbufs. */ 153 if ((m->m_flags & M_EXTPG) && m->m_len <= MLEN && 154 !mbuf_has_tls_session(m)) { 155 ext_size = m->m_ext.ext_size; 156 if (mb_unmapped_compress(m) == 0) { 157 sb->sb_mbcnt -= ext_size; 158 sb->sb_ccnt -= 1; 159 } 160 } 161 162 while ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 && 163 M_WRITABLE(m) && 164 (m->m_flags & M_EXTPG) == 0 && 165 !mbuf_has_tls_session(n) && 166 !mbuf_has_tls_session(m) && 167 n->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 168 n->m_len <= M_TRAILINGSPACE(m) && 169 m->m_type == n->m_type) { 170 KASSERT(sb->sb_lastrecord != n, 171 ("%s: merging start of record (%p) into previous mbuf (%p)", 172 __func__, n, m)); 173 m_copydata(n, 0, n->m_len, mtodo(m, m->m_len)); 174 m->m_len += n->m_len; 175 m->m_next = n->m_next; 176 m->m_flags |= n->m_flags & M_EOR; 177 if (sb->sb_mbtail == n) 178 sb->sb_mbtail = m; 179 180 sb->sb_mbcnt -= MSIZE; 181 sb->sb_mcnt -= 1; 182 if (n->m_flags & M_EXT) { 183 sb->sb_mbcnt -= n->m_ext.ext_size; 184 sb->sb_ccnt -= 1; 185 } 186 m_free(n); 187 n = m->m_next; 188 } 189 } 190 SBLASTRECORDCHK(sb); 191 SBLASTMBUFCHK(sb); 192 } 193 194 /* 195 * Mark ready "count" units of I/O starting with "m". Most mbufs 196 * count as a single unit of I/O except for M_EXTPG mbufs which 197 * are backed by multiple pages. 198 */ 199 int 200 sbready(struct sockbuf *sb, struct mbuf *m0, int count) 201 { 202 struct mbuf *m; 203 u_int blocker; 204 205 SOCKBUF_LOCK_ASSERT(sb); 206 KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb)); 207 KASSERT(count > 0, ("%s: invalid count %d", __func__, count)); 208 209 m = m0; 210 blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0; 211 212 while (count > 0) { 213 KASSERT(m->m_flags & M_NOTREADY, 214 ("%s: m %p !M_NOTREADY", __func__, m)); 215 if ((m->m_flags & M_EXTPG) != 0 && m->m_epg_npgs != 0) { 216 if (count < m->m_epg_nrdy) { 217 m->m_epg_nrdy -= count; 218 count = 0; 219 break; 220 } 221 count -= m->m_epg_nrdy; 222 m->m_epg_nrdy = 0; 223 } else 224 count--; 225 226 m->m_flags &= ~(M_NOTREADY | blocker); 227 if (blocker) 228 sb->sb_acc += m->m_len; 229 m = m->m_next; 230 } 231 232 /* 233 * If the first mbuf is still not fully ready because only 234 * some of its backing pages were readied, no further progress 235 * can be made. 236 */ 237 if (m0 == m) { 238 MPASS(m->m_flags & M_NOTREADY); 239 return (EINPROGRESS); 240 } 241 242 if (!blocker) { 243 sbready_compress(sb, m0, m); 244 return (EINPROGRESS); 245 } 246 247 /* This one was blocking all the queue. */ 248 for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) { 249 KASSERT(m->m_flags & M_BLOCKED, 250 ("%s: m %p !M_BLOCKED", __func__, m)); 251 m->m_flags &= ~M_BLOCKED; 252 sb->sb_acc += m->m_len; 253 } 254 255 sb->sb_fnrdy = m; 256 sbready_compress(sb, m0, m); 257 258 return (0); 259 } 260 261 /* 262 * Adjust sockbuf state reflecting allocation of m. 263 */ 264 void 265 sballoc(struct sockbuf *sb, struct mbuf *m) 266 { 267 268 SOCKBUF_LOCK_ASSERT(sb); 269 270 sb->sb_ccc += m->m_len; 271 272 if (sb->sb_fnrdy == NULL) { 273 if (m->m_flags & M_NOTREADY) 274 sb->sb_fnrdy = m; 275 else 276 sb->sb_acc += m->m_len; 277 } else 278 m->m_flags |= M_BLOCKED; 279 280 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 281 sb->sb_ctl += m->m_len; 282 283 sb->sb_mbcnt += MSIZE; 284 sb->sb_mcnt += 1; 285 286 if (m->m_flags & M_EXT) { 287 sb->sb_mbcnt += m->m_ext.ext_size; 288 sb->sb_ccnt += 1; 289 } 290 } 291 292 /* 293 * Adjust sockbuf state reflecting freeing of m. 294 */ 295 void 296 sbfree(struct sockbuf *sb, struct mbuf *m) 297 { 298 299 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ 300 SOCKBUF_LOCK_ASSERT(sb); 301 #endif 302 303 sb->sb_ccc -= m->m_len; 304 305 if (!(m->m_flags & M_NOTAVAIL)) 306 sb->sb_acc -= m->m_len; 307 308 if (m == sb->sb_fnrdy) { 309 struct mbuf *n; 310 311 KASSERT(m->m_flags & M_NOTREADY, 312 ("%s: m %p !M_NOTREADY", __func__, m)); 313 314 n = m->m_next; 315 while (n != NULL && !(n->m_flags & M_NOTREADY)) { 316 n->m_flags &= ~M_BLOCKED; 317 sb->sb_acc += n->m_len; 318 n = n->m_next; 319 } 320 sb->sb_fnrdy = n; 321 } 322 323 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 324 sb->sb_ctl -= m->m_len; 325 326 sb->sb_mbcnt -= MSIZE; 327 sb->sb_mcnt -= 1; 328 if (m->m_flags & M_EXT) { 329 sb->sb_mbcnt -= m->m_ext.ext_size; 330 sb->sb_ccnt -= 1; 331 } 332 333 if (sb->sb_sndptr == m) { 334 sb->sb_sndptr = NULL; 335 sb->sb_sndptroff = 0; 336 } 337 if (sb->sb_sndptroff != 0) 338 sb->sb_sndptroff -= m->m_len; 339 } 340 341 #ifdef KERN_TLS 342 /* 343 * Similar to sballoc/sbfree but does not adjust state associated with 344 * the sb_mb chain such as sb_fnrdy or sb_sndptr*. Also assumes mbufs 345 * are not ready. 346 */ 347 void 348 sballoc_ktls_rx(struct sockbuf *sb, struct mbuf *m) 349 { 350 351 SOCKBUF_LOCK_ASSERT(sb); 352 353 sb->sb_ccc += m->m_len; 354 sb->sb_tlscc += m->m_len; 355 356 sb->sb_mbcnt += MSIZE; 357 sb->sb_mcnt += 1; 358 359 if (m->m_flags & M_EXT) { 360 sb->sb_mbcnt += m->m_ext.ext_size; 361 sb->sb_ccnt += 1; 362 } 363 } 364 365 void 366 sbfree_ktls_rx(struct sockbuf *sb, struct mbuf *m) 367 { 368 369 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ 370 SOCKBUF_LOCK_ASSERT(sb); 371 #endif 372 373 sb->sb_ccc -= m->m_len; 374 sb->sb_tlscc -= m->m_len; 375 376 sb->sb_mbcnt -= MSIZE; 377 sb->sb_mcnt -= 1; 378 379 if (m->m_flags & M_EXT) { 380 sb->sb_mbcnt -= m->m_ext.ext_size; 381 sb->sb_ccnt -= 1; 382 } 383 } 384 #endif 385 386 /* 387 * Socantsendmore indicates that no more data will be sent on the socket; it 388 * would normally be applied to a socket when the user informs the system 389 * that no more data is to be sent, by the protocol code (in case 390 * PRU_SHUTDOWN). Socantrcvmore indicates that no more data will be 391 * received, and will normally be applied to the socket by a protocol when it 392 * detects that the peer will send no more data. Data queued for reading in 393 * the socket may yet be read. 394 */ 395 void 396 socantsendmore_locked(struct socket *so) 397 { 398 399 SOCKBUF_LOCK_ASSERT(&so->so_snd); 400 401 so->so_snd.sb_state |= SBS_CANTSENDMORE; 402 sowwakeup_locked(so); 403 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); 404 } 405 406 void 407 socantsendmore(struct socket *so) 408 { 409 410 SOCKBUF_LOCK(&so->so_snd); 411 socantsendmore_locked(so); 412 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); 413 } 414 415 void 416 socantrcvmore_locked(struct socket *so) 417 { 418 419 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 420 421 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 422 #ifdef KERN_TLS 423 if (so->so_rcv.sb_flags & SB_TLS_RX) 424 ktls_check_rx(&so->so_rcv); 425 #endif 426 sorwakeup_locked(so); 427 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); 428 } 429 430 void 431 socantrcvmore(struct socket *so) 432 { 433 434 SOCKBUF_LOCK(&so->so_rcv); 435 socantrcvmore_locked(so); 436 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); 437 } 438 439 /* 440 * Wait for data to arrive at/drain from a socket buffer. 441 */ 442 int 443 sbwait(struct sockbuf *sb) 444 { 445 446 SOCKBUF_LOCK_ASSERT(sb); 447 448 sb->sb_flags |= SB_WAIT; 449 return (msleep_sbt(&sb->sb_acc, SOCKBUF_MTX(sb), 450 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", 451 sb->sb_timeo, 0, 0)); 452 } 453 454 int 455 sblock(struct sockbuf *sb, int flags) 456 { 457 458 KASSERT((flags & SBL_VALID) == flags, 459 ("sblock: flags invalid (0x%x)", flags)); 460 461 if (flags & SBL_WAIT) { 462 if ((sb->sb_flags & SB_NOINTR) || 463 (flags & SBL_NOINTR)) { 464 sx_xlock(&sb->sb_sx); 465 return (0); 466 } 467 return (sx_xlock_sig(&sb->sb_sx)); 468 } else { 469 if (sx_try_xlock(&sb->sb_sx) == 0) 470 return (EWOULDBLOCK); 471 return (0); 472 } 473 } 474 475 void 476 sbunlock(struct sockbuf *sb) 477 { 478 479 sx_xunlock(&sb->sb_sx); 480 } 481 482 /* 483 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 484 * via SIGIO if the socket has the SS_ASYNC flag set. 485 * 486 * Called with the socket buffer lock held; will release the lock by the end 487 * of the function. This allows the caller to acquire the socket buffer lock 488 * while testing for the need for various sorts of wakeup and hold it through 489 * to the point where it's no longer required. We currently hold the lock 490 * through calls out to other subsystems (with the exception of kqueue), and 491 * then release it to avoid lock order issues. It's not clear that's 492 * correct. 493 */ 494 void 495 sowakeup(struct socket *so, struct sockbuf *sb) 496 { 497 int ret; 498 499 SOCKBUF_LOCK_ASSERT(sb); 500 501 selwakeuppri(sb->sb_sel, PSOCK); 502 if (!SEL_WAITING(sb->sb_sel)) 503 sb->sb_flags &= ~SB_SEL; 504 if (sb->sb_flags & SB_WAIT) { 505 sb->sb_flags &= ~SB_WAIT; 506 wakeup(&sb->sb_acc); 507 } 508 KNOTE_LOCKED(&sb->sb_sel->si_note, 0); 509 if (sb->sb_upcall != NULL) { 510 ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT); 511 if (ret == SU_ISCONNECTED) { 512 KASSERT(sb == &so->so_rcv, 513 ("SO_SND upcall returned SU_ISCONNECTED")); 514 soupcall_clear(so, SO_RCV); 515 } 516 } else 517 ret = SU_OK; 518 if (sb->sb_flags & SB_AIO) 519 sowakeup_aio(so, sb); 520 SOCKBUF_UNLOCK(sb); 521 if (ret == SU_ISCONNECTED) 522 soisconnected(so); 523 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 524 pgsigio(&so->so_sigio, SIGIO, 0); 525 mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED); 526 } 527 528 /* 529 * Socket buffer (struct sockbuf) utility routines. 530 * 531 * Each socket contains two socket buffers: one for sending data and one for 532 * receiving data. Each buffer contains a queue of mbufs, information about 533 * the number of mbufs and amount of data in the queue, and other fields 534 * allowing select() statements and notification on data availability to be 535 * implemented. 536 * 537 * Data stored in a socket buffer is maintained as a list of records. Each 538 * record is a list of mbufs chained together with the m_next field. Records 539 * are chained together with the m_nextpkt field. The upper level routine 540 * soreceive() expects the following conventions to be observed when placing 541 * information in the receive buffer: 542 * 543 * 1. If the protocol requires each message be preceded by the sender's name, 544 * then a record containing that name must be present before any 545 * associated data (mbuf's must be of type MT_SONAME). 546 * 2. If the protocol supports the exchange of ``access rights'' (really just 547 * additional data associated with the message), and there are ``rights'' 548 * to be received, then a record containing this data should be present 549 * (mbuf's must be of type MT_RIGHTS). 550 * 3. If a name or rights record exists, then it must be followed by a data 551 * record, perhaps of zero length. 552 * 553 * Before using a new socket structure it is first necessary to reserve 554 * buffer space to the socket, by calling sbreserve(). This should commit 555 * some of the available buffer space in the system buffer pool for the 556 * socket (currently, it does nothing but enforce limits). The space should 557 * be released by calling sbrelease() when the socket is destroyed. 558 */ 559 int 560 soreserve(struct socket *so, u_long sndcc, u_long rcvcc) 561 { 562 struct thread *td = curthread; 563 564 SOCKBUF_LOCK(&so->so_snd); 565 SOCKBUF_LOCK(&so->so_rcv); 566 if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0) 567 goto bad; 568 if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0) 569 goto bad2; 570 if (so->so_rcv.sb_lowat == 0) 571 so->so_rcv.sb_lowat = 1; 572 if (so->so_snd.sb_lowat == 0) 573 so->so_snd.sb_lowat = MCLBYTES; 574 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 575 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 576 SOCKBUF_UNLOCK(&so->so_rcv); 577 SOCKBUF_UNLOCK(&so->so_snd); 578 return (0); 579 bad2: 580 sbrelease_locked(&so->so_snd, so); 581 bad: 582 SOCKBUF_UNLOCK(&so->so_rcv); 583 SOCKBUF_UNLOCK(&so->so_snd); 584 return (ENOBUFS); 585 } 586 587 static int 588 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 589 { 590 int error = 0; 591 u_long tmp_sb_max = sb_max; 592 593 error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req); 594 if (error || !req->newptr) 595 return (error); 596 if (tmp_sb_max < MSIZE + MCLBYTES) 597 return (EINVAL); 598 sb_max = tmp_sb_max; 599 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 600 return (0); 601 } 602 603 /* 604 * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't 605 * become limiting if buffering efficiency is near the normal case. 606 */ 607 int 608 sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, 609 struct thread *td) 610 { 611 rlim_t sbsize_limit; 612 613 SOCKBUF_LOCK_ASSERT(sb); 614 615 /* 616 * When a thread is passed, we take into account the thread's socket 617 * buffer size limit. The caller will generally pass curthread, but 618 * in the TCP input path, NULL will be passed to indicate that no 619 * appropriate thread resource limits are available. In that case, 620 * we don't apply a process limit. 621 */ 622 if (cc > sb_max_adj) 623 return (0); 624 if (td != NULL) { 625 sbsize_limit = lim_cur(td, RLIMIT_SBSIZE); 626 } else 627 sbsize_limit = RLIM_INFINITY; 628 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, 629 sbsize_limit)) 630 return (0); 631 sb->sb_mbmax = min(cc * sb_efficiency, sb_max); 632 if (sb->sb_lowat > sb->sb_hiwat) 633 sb->sb_lowat = sb->sb_hiwat; 634 return (1); 635 } 636 637 int 638 sbsetopt(struct socket *so, int cmd, u_long cc) 639 { 640 struct sockbuf *sb; 641 short *flags; 642 u_int *hiwat, *lowat; 643 int error; 644 645 sb = NULL; 646 SOCK_LOCK(so); 647 if (SOLISTENING(so)) { 648 switch (cmd) { 649 case SO_SNDLOWAT: 650 case SO_SNDBUF: 651 lowat = &so->sol_sbsnd_lowat; 652 hiwat = &so->sol_sbsnd_hiwat; 653 flags = &so->sol_sbsnd_flags; 654 break; 655 case SO_RCVLOWAT: 656 case SO_RCVBUF: 657 lowat = &so->sol_sbrcv_lowat; 658 hiwat = &so->sol_sbrcv_hiwat; 659 flags = &so->sol_sbrcv_flags; 660 break; 661 } 662 } else { 663 switch (cmd) { 664 case SO_SNDLOWAT: 665 case SO_SNDBUF: 666 sb = &so->so_snd; 667 break; 668 case SO_RCVLOWAT: 669 case SO_RCVBUF: 670 sb = &so->so_rcv; 671 break; 672 } 673 flags = &sb->sb_flags; 674 hiwat = &sb->sb_hiwat; 675 lowat = &sb->sb_lowat; 676 SOCKBUF_LOCK(sb); 677 } 678 679 error = 0; 680 switch (cmd) { 681 case SO_SNDBUF: 682 case SO_RCVBUF: 683 if (SOLISTENING(so)) { 684 if (cc > sb_max_adj) { 685 error = ENOBUFS; 686 break; 687 } 688 *hiwat = cc; 689 if (*lowat > *hiwat) 690 *lowat = *hiwat; 691 } else { 692 if (!sbreserve_locked(sb, cc, so, curthread)) 693 error = ENOBUFS; 694 } 695 if (error == 0) 696 *flags &= ~SB_AUTOSIZE; 697 break; 698 case SO_SNDLOWAT: 699 case SO_RCVLOWAT: 700 /* 701 * Make sure the low-water is never greater than the 702 * high-water. 703 */ 704 *lowat = (cc > *hiwat) ? *hiwat : cc; 705 break; 706 } 707 708 if (!SOLISTENING(so)) 709 SOCKBUF_UNLOCK(sb); 710 SOCK_UNLOCK(so); 711 return (error); 712 } 713 714 /* 715 * Free mbufs held by a socket, and reserved mbuf space. 716 */ 717 void 718 sbrelease_internal(struct sockbuf *sb, struct socket *so) 719 { 720 721 sbflush_internal(sb); 722 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 723 RLIM_INFINITY); 724 sb->sb_mbmax = 0; 725 } 726 727 void 728 sbrelease_locked(struct sockbuf *sb, struct socket *so) 729 { 730 731 SOCKBUF_LOCK_ASSERT(sb); 732 733 sbrelease_internal(sb, so); 734 } 735 736 void 737 sbrelease(struct sockbuf *sb, struct socket *so) 738 { 739 740 SOCKBUF_LOCK(sb); 741 sbrelease_locked(sb, so); 742 SOCKBUF_UNLOCK(sb); 743 } 744 745 void 746 sbdestroy(struct sockbuf *sb, struct socket *so) 747 { 748 749 sbrelease_internal(sb, so); 750 #ifdef KERN_TLS 751 if (sb->sb_tls_info != NULL) 752 ktls_free(sb->sb_tls_info); 753 sb->sb_tls_info = NULL; 754 #endif 755 } 756 757 /* 758 * Routines to add and remove data from an mbuf queue. 759 * 760 * The routines sbappend() or sbappendrecord() are normally called to append 761 * new mbufs to a socket buffer, after checking that adequate space is 762 * available, comparing the function sbspace() with the amount of data to be 763 * added. sbappendrecord() differs from sbappend() in that data supplied is 764 * treated as the beginning of a new record. To place a sender's address, 765 * optional access rights, and data in a socket receive buffer, 766 * sbappendaddr() should be used. To place access rights and data in a 767 * socket receive buffer, sbappendrights() should be used. In either case, 768 * the new data begins a new record. Note that unlike sbappend() and 769 * sbappendrecord(), these routines check for the caller that there will be 770 * enough space to store the data. Each fails if there is not enough space, 771 * or if it cannot find mbufs to store additional information in. 772 * 773 * Reliable protocols may use the socket send buffer to hold data awaiting 774 * acknowledgement. Data is normally copied from a socket send buffer in a 775 * protocol with m_copy for output to a peer, and then removing the data from 776 * the socket buffer with sbdrop() or sbdroprecord() when the data is 777 * acknowledged by the peer. 778 */ 779 #ifdef SOCKBUF_DEBUG 780 void 781 sblastrecordchk(struct sockbuf *sb, const char *file, int line) 782 { 783 struct mbuf *m = sb->sb_mb; 784 785 SOCKBUF_LOCK_ASSERT(sb); 786 787 while (m && m->m_nextpkt) 788 m = m->m_nextpkt; 789 790 if (m != sb->sb_lastrecord) { 791 printf("%s: sb_mb %p sb_lastrecord %p last %p\n", 792 __func__, sb->sb_mb, sb->sb_lastrecord, m); 793 printf("packet chain:\n"); 794 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) 795 printf("\t%p\n", m); 796 panic("%s from %s:%u", __func__, file, line); 797 } 798 } 799 800 void 801 sblastmbufchk(struct sockbuf *sb, const char *file, int line) 802 { 803 struct mbuf *m = sb->sb_mb; 804 struct mbuf *n; 805 806 SOCKBUF_LOCK_ASSERT(sb); 807 808 while (m && m->m_nextpkt) 809 m = m->m_nextpkt; 810 811 while (m && m->m_next) 812 m = m->m_next; 813 814 if (m != sb->sb_mbtail) { 815 printf("%s: sb_mb %p sb_mbtail %p last %p\n", 816 __func__, sb->sb_mb, sb->sb_mbtail, m); 817 printf("packet tree:\n"); 818 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) { 819 printf("\t"); 820 for (n = m; n != NULL; n = n->m_next) 821 printf("%p ", n); 822 printf("\n"); 823 } 824 panic("%s from %s:%u", __func__, file, line); 825 } 826 827 #ifdef KERN_TLS 828 m = sb->sb_mtls; 829 while (m && m->m_next) 830 m = m->m_next; 831 832 if (m != sb->sb_mtlstail) { 833 printf("%s: sb_mtls %p sb_mtlstail %p last %p\n", 834 __func__, sb->sb_mtls, sb->sb_mtlstail, m); 835 printf("TLS packet tree:\n"); 836 printf("\t"); 837 for (m = sb->sb_mtls; m != NULL; m = m->m_next) { 838 printf("%p ", m); 839 } 840 printf("\n"); 841 panic("%s from %s:%u", __func__, file, line); 842 } 843 #endif 844 } 845 #endif /* SOCKBUF_DEBUG */ 846 847 #define SBLINKRECORD(sb, m0) do { \ 848 SOCKBUF_LOCK_ASSERT(sb); \ 849 if ((sb)->sb_lastrecord != NULL) \ 850 (sb)->sb_lastrecord->m_nextpkt = (m0); \ 851 else \ 852 (sb)->sb_mb = (m0); \ 853 (sb)->sb_lastrecord = (m0); \ 854 } while (/*CONSTCOND*/0) 855 856 /* 857 * Append mbuf chain m to the last record in the socket buffer sb. The 858 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 859 * are discarded and mbufs are compacted where possible. 860 */ 861 void 862 sbappend_locked(struct sockbuf *sb, struct mbuf *m, int flags) 863 { 864 struct mbuf *n; 865 866 SOCKBUF_LOCK_ASSERT(sb); 867 868 if (m == NULL) 869 return; 870 sbm_clrprotoflags(m, flags); 871 SBLASTRECORDCHK(sb); 872 n = sb->sb_mb; 873 if (n) { 874 while (n->m_nextpkt) 875 n = n->m_nextpkt; 876 do { 877 if (n->m_flags & M_EOR) { 878 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 879 return; 880 } 881 } while (n->m_next && (n = n->m_next)); 882 } else { 883 /* 884 * XXX Would like to simply use sb_mbtail here, but 885 * XXX I need to verify that I won't miss an EOR that 886 * XXX way. 887 */ 888 if ((n = sb->sb_lastrecord) != NULL) { 889 do { 890 if (n->m_flags & M_EOR) { 891 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 892 return; 893 } 894 } while (n->m_next && (n = n->m_next)); 895 } else { 896 /* 897 * If this is the first record in the socket buffer, 898 * it's also the last record. 899 */ 900 sb->sb_lastrecord = m; 901 } 902 } 903 sbcompress(sb, m, n); 904 SBLASTRECORDCHK(sb); 905 } 906 907 /* 908 * Append mbuf chain m to the last record in the socket buffer sb. The 909 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 910 * are discarded and mbufs are compacted where possible. 911 */ 912 void 913 sbappend(struct sockbuf *sb, struct mbuf *m, int flags) 914 { 915 916 SOCKBUF_LOCK(sb); 917 sbappend_locked(sb, m, flags); 918 SOCKBUF_UNLOCK(sb); 919 } 920 921 #ifdef KERN_TLS 922 /* 923 * Append an mbuf containing encrypted TLS data. The data 924 * is marked M_NOTREADY until it has been decrypted and 925 * stored as a TLS record. 926 */ 927 static void 928 sbappend_ktls_rx(struct sockbuf *sb, struct mbuf *m) 929 { 930 struct mbuf *n; 931 932 SBLASTMBUFCHK(sb); 933 934 /* Remove all packet headers and mbuf tags to get a pure data chain. */ 935 m_demote(m, 1, 0); 936 937 for (n = m; n != NULL; n = n->m_next) 938 n->m_flags |= M_NOTREADY; 939 sbcompress_ktls_rx(sb, m, sb->sb_mtlstail); 940 ktls_check_rx(sb); 941 } 942 #endif 943 944 /* 945 * This version of sbappend() should only be used when the caller absolutely 946 * knows that there will never be more than one record in the socket buffer, 947 * that is, a stream protocol (such as TCP). 948 */ 949 void 950 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags) 951 { 952 SOCKBUF_LOCK_ASSERT(sb); 953 954 KASSERT(m->m_nextpkt == NULL,("sbappendstream 0")); 955 956 #ifdef KERN_TLS 957 /* 958 * Decrypted TLS records are appended as records via 959 * sbappendrecord(). TCP passes encrypted TLS records to this 960 * function which must be scheduled for decryption. 961 */ 962 if (sb->sb_flags & SB_TLS_RX) { 963 sbappend_ktls_rx(sb, m); 964 return; 965 } 966 #endif 967 968 KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1")); 969 970 SBLASTMBUFCHK(sb); 971 972 #ifdef KERN_TLS 973 if (sb->sb_tls_info != NULL) 974 ktls_seq(sb, m); 975 #endif 976 977 /* Remove all packet headers and mbuf tags to get a pure data chain. */ 978 m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0); 979 980 sbcompress(sb, m, sb->sb_mbtail); 981 982 sb->sb_lastrecord = sb->sb_mb; 983 SBLASTRECORDCHK(sb); 984 } 985 986 /* 987 * This version of sbappend() should only be used when the caller absolutely 988 * knows that there will never be more than one record in the socket buffer, 989 * that is, a stream protocol (such as TCP). 990 */ 991 void 992 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags) 993 { 994 995 SOCKBUF_LOCK(sb); 996 sbappendstream_locked(sb, m, flags); 997 SOCKBUF_UNLOCK(sb); 998 } 999 1000 #ifdef SOCKBUF_DEBUG 1001 void 1002 sbcheck(struct sockbuf *sb, const char *file, int line) 1003 { 1004 struct mbuf *m, *n, *fnrdy; 1005 u_long acc, ccc, mbcnt; 1006 #ifdef KERN_TLS 1007 u_long tlscc; 1008 #endif 1009 1010 SOCKBUF_LOCK_ASSERT(sb); 1011 1012 acc = ccc = mbcnt = 0; 1013 fnrdy = NULL; 1014 1015 for (m = sb->sb_mb; m; m = n) { 1016 n = m->m_nextpkt; 1017 for (; m; m = m->m_next) { 1018 if (m->m_len == 0) { 1019 printf("sb %p empty mbuf %p\n", sb, m); 1020 goto fail; 1021 } 1022 if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) { 1023 if (m != sb->sb_fnrdy) { 1024 printf("sb %p: fnrdy %p != m %p\n", 1025 sb, sb->sb_fnrdy, m); 1026 goto fail; 1027 } 1028 fnrdy = m; 1029 } 1030 if (fnrdy) { 1031 if (!(m->m_flags & M_NOTAVAIL)) { 1032 printf("sb %p: fnrdy %p, m %p is avail\n", 1033 sb, sb->sb_fnrdy, m); 1034 goto fail; 1035 } 1036 } else 1037 acc += m->m_len; 1038 ccc += m->m_len; 1039 mbcnt += MSIZE; 1040 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 1041 mbcnt += m->m_ext.ext_size; 1042 } 1043 } 1044 #ifdef KERN_TLS 1045 /* 1046 * Account for mbufs "detached" by ktls_detach_record() while 1047 * they are decrypted by ktls_decrypt(). tlsdcc gives a count 1048 * of the detached bytes that are included in ccc. The mbufs 1049 * and clusters are not included in the socket buffer 1050 * accounting. 1051 */ 1052 ccc += sb->sb_tlsdcc; 1053 1054 tlscc = 0; 1055 for (m = sb->sb_mtls; m; m = m->m_next) { 1056 if (m->m_nextpkt != NULL) { 1057 printf("sb %p TLS mbuf %p with nextpkt\n", sb, m); 1058 goto fail; 1059 } 1060 if ((m->m_flags & M_NOTREADY) == 0) { 1061 printf("sb %p TLS mbuf %p ready\n", sb, m); 1062 goto fail; 1063 } 1064 tlscc += m->m_len; 1065 ccc += m->m_len; 1066 mbcnt += MSIZE; 1067 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 1068 mbcnt += m->m_ext.ext_size; 1069 } 1070 1071 if (sb->sb_tlscc != tlscc) { 1072 printf("tlscc %ld/%u dcc %u\n", tlscc, sb->sb_tlscc, 1073 sb->sb_tlsdcc); 1074 goto fail; 1075 } 1076 #endif 1077 if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) { 1078 printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n", 1079 acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt); 1080 #ifdef KERN_TLS 1081 printf("tlscc %ld/%u dcc %u\n", tlscc, sb->sb_tlscc, 1082 sb->sb_tlsdcc); 1083 #endif 1084 goto fail; 1085 } 1086 return; 1087 fail: 1088 panic("%s from %s:%u", __func__, file, line); 1089 } 1090 #endif 1091 1092 /* 1093 * As above, except the mbuf chain begins a new record. 1094 */ 1095 void 1096 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0) 1097 { 1098 struct mbuf *m; 1099 1100 SOCKBUF_LOCK_ASSERT(sb); 1101 1102 if (m0 == NULL) 1103 return; 1104 m_clrprotoflags(m0); 1105 /* 1106 * Put the first mbuf on the queue. Note this permits zero length 1107 * records. 1108 */ 1109 sballoc(sb, m0); 1110 SBLASTRECORDCHK(sb); 1111 SBLINKRECORD(sb, m0); 1112 sb->sb_mbtail = m0; 1113 m = m0->m_next; 1114 m0->m_next = 0; 1115 if (m && (m0->m_flags & M_EOR)) { 1116 m0->m_flags &= ~M_EOR; 1117 m->m_flags |= M_EOR; 1118 } 1119 /* always call sbcompress() so it can do SBLASTMBUFCHK() */ 1120 sbcompress(sb, m, m0); 1121 } 1122 1123 /* 1124 * As above, except the mbuf chain begins a new record. 1125 */ 1126 void 1127 sbappendrecord(struct sockbuf *sb, struct mbuf *m0) 1128 { 1129 1130 SOCKBUF_LOCK(sb); 1131 sbappendrecord_locked(sb, m0); 1132 SOCKBUF_UNLOCK(sb); 1133 } 1134 1135 /* Helper routine that appends data, control, and address to a sockbuf. */ 1136 static int 1137 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa, 1138 struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last) 1139 { 1140 struct mbuf *m, *n, *nlast; 1141 #if MSIZE <= 256 1142 if (asa->sa_len > MLEN) 1143 return (0); 1144 #endif 1145 m = m_get(M_NOWAIT, MT_SONAME); 1146 if (m == NULL) 1147 return (0); 1148 m->m_len = asa->sa_len; 1149 bcopy(asa, mtod(m, caddr_t), asa->sa_len); 1150 if (m0) { 1151 m_clrprotoflags(m0); 1152 m_tag_delete_chain(m0, NULL); 1153 /* 1154 * Clear some persistent info from pkthdr. 1155 * We don't use m_demote(), because some netgraph consumers 1156 * expect M_PKTHDR presence. 1157 */ 1158 m0->m_pkthdr.rcvif = NULL; 1159 m0->m_pkthdr.flowid = 0; 1160 m0->m_pkthdr.csum_flags = 0; 1161 m0->m_pkthdr.fibnum = 0; 1162 m0->m_pkthdr.rsstype = 0; 1163 } 1164 if (ctrl_last) 1165 ctrl_last->m_next = m0; /* concatenate data to control */ 1166 else 1167 control = m0; 1168 m->m_next = control; 1169 for (n = m; n->m_next != NULL; n = n->m_next) 1170 sballoc(sb, n); 1171 sballoc(sb, n); 1172 nlast = n; 1173 SBLINKRECORD(sb, m); 1174 1175 sb->sb_mbtail = nlast; 1176 SBLASTMBUFCHK(sb); 1177 1178 SBLASTRECORDCHK(sb); 1179 return (1); 1180 } 1181 1182 /* 1183 * Append address and data, and optionally, control (ancillary) data to the 1184 * receive queue of a socket. If present, m0 must include a packet header 1185 * with total length. Returns 0 if no space in sockbuf or insufficient 1186 * mbufs. 1187 */ 1188 int 1189 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, 1190 struct mbuf *m0, struct mbuf *control) 1191 { 1192 struct mbuf *ctrl_last; 1193 int space = asa->sa_len; 1194 1195 SOCKBUF_LOCK_ASSERT(sb); 1196 1197 if (m0 && (m0->m_flags & M_PKTHDR) == 0) 1198 panic("sbappendaddr_locked"); 1199 if (m0) 1200 space += m0->m_pkthdr.len; 1201 space += m_length(control, &ctrl_last); 1202 1203 if (space > sbspace(sb)) 1204 return (0); 1205 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 1206 } 1207 1208 /* 1209 * Append address and data, and optionally, control (ancillary) data to the 1210 * receive queue of a socket. If present, m0 must include a packet header 1211 * with total length. Returns 0 if insufficient mbufs. Does not validate space 1212 * on the receiving sockbuf. 1213 */ 1214 int 1215 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa, 1216 struct mbuf *m0, struct mbuf *control) 1217 { 1218 struct mbuf *ctrl_last; 1219 1220 SOCKBUF_LOCK_ASSERT(sb); 1221 1222 ctrl_last = (control == NULL) ? NULL : m_last(control); 1223 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 1224 } 1225 1226 /* 1227 * Append address and data, and optionally, control (ancillary) data to the 1228 * receive queue of a socket. If present, m0 must include a packet header 1229 * with total length. Returns 0 if no space in sockbuf or insufficient 1230 * mbufs. 1231 */ 1232 int 1233 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, 1234 struct mbuf *m0, struct mbuf *control) 1235 { 1236 int retval; 1237 1238 SOCKBUF_LOCK(sb); 1239 retval = sbappendaddr_locked(sb, asa, m0, control); 1240 SOCKBUF_UNLOCK(sb); 1241 return (retval); 1242 } 1243 1244 void 1245 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, 1246 struct mbuf *control, int flags) 1247 { 1248 struct mbuf *m, *mlast; 1249 1250 sbm_clrprotoflags(m0, flags); 1251 m_last(control)->m_next = m0; 1252 1253 SBLASTRECORDCHK(sb); 1254 1255 for (m = control; m->m_next; m = m->m_next) 1256 sballoc(sb, m); 1257 sballoc(sb, m); 1258 mlast = m; 1259 SBLINKRECORD(sb, control); 1260 1261 sb->sb_mbtail = mlast; 1262 SBLASTMBUFCHK(sb); 1263 1264 SBLASTRECORDCHK(sb); 1265 } 1266 1267 void 1268 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, 1269 int flags) 1270 { 1271 1272 SOCKBUF_LOCK(sb); 1273 sbappendcontrol_locked(sb, m0, control, flags); 1274 SOCKBUF_UNLOCK(sb); 1275 } 1276 1277 /* 1278 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf 1279 * (n). If (n) is NULL, the buffer is presumed empty. 1280 * 1281 * When the data is compressed, mbufs in the chain may be handled in one of 1282 * three ways: 1283 * 1284 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no 1285 * record boundary, and no change in data type). 1286 * 1287 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into 1288 * an mbuf already in the socket buffer. This can occur if an 1289 * appropriate mbuf exists, there is room, both mbufs are not marked as 1290 * not ready, and no merging of data types will occur. 1291 * 1292 * (3) The mbuf may be appended to the end of the existing mbuf chain. 1293 * 1294 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as 1295 * end-of-record. 1296 */ 1297 void 1298 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 1299 { 1300 int eor = 0; 1301 struct mbuf *o; 1302 1303 SOCKBUF_LOCK_ASSERT(sb); 1304 1305 while (m) { 1306 eor |= m->m_flags & M_EOR; 1307 if (m->m_len == 0 && 1308 (eor == 0 || 1309 (((o = m->m_next) || (o = n)) && 1310 o->m_type == m->m_type))) { 1311 if (sb->sb_lastrecord == m) 1312 sb->sb_lastrecord = m->m_next; 1313 m = m_free(m); 1314 continue; 1315 } 1316 if (n && (n->m_flags & M_EOR) == 0 && 1317 M_WRITABLE(n) && 1318 ((sb->sb_flags & SB_NOCOALESCE) == 0) && 1319 !(m->m_flags & M_NOTREADY) && 1320 !(n->m_flags & (M_NOTREADY | M_EXTPG)) && 1321 !mbuf_has_tls_session(m) && 1322 !mbuf_has_tls_session(n) && 1323 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 1324 m->m_len <= M_TRAILINGSPACE(n) && 1325 n->m_type == m->m_type) { 1326 m_copydata(m, 0, m->m_len, mtodo(n, n->m_len)); 1327 n->m_len += m->m_len; 1328 sb->sb_ccc += m->m_len; 1329 if (sb->sb_fnrdy == NULL) 1330 sb->sb_acc += m->m_len; 1331 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1332 /* XXX: Probably don't need.*/ 1333 sb->sb_ctl += m->m_len; 1334 m = m_free(m); 1335 continue; 1336 } 1337 if (m->m_len <= MLEN && (m->m_flags & M_EXTPG) && 1338 (m->m_flags & M_NOTREADY) == 0 && 1339 !mbuf_has_tls_session(m)) 1340 (void)mb_unmapped_compress(m); 1341 if (n) 1342 n->m_next = m; 1343 else 1344 sb->sb_mb = m; 1345 sb->sb_mbtail = m; 1346 sballoc(sb, m); 1347 n = m; 1348 m->m_flags &= ~M_EOR; 1349 m = m->m_next; 1350 n->m_next = 0; 1351 } 1352 if (eor) { 1353 KASSERT(n != NULL, ("sbcompress: eor && n == NULL")); 1354 n->m_flags |= eor; 1355 } 1356 SBLASTMBUFCHK(sb); 1357 } 1358 1359 #ifdef KERN_TLS 1360 /* 1361 * A version of sbcompress() for encrypted TLS RX mbufs. These mbufs 1362 * are appended to the 'sb_mtls' chain instead of 'sb_mb' and are also 1363 * a bit simpler (no EOR markers, always MT_DATA, etc.). 1364 */ 1365 static void 1366 sbcompress_ktls_rx(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 1367 { 1368 1369 SOCKBUF_LOCK_ASSERT(sb); 1370 1371 while (m) { 1372 KASSERT((m->m_flags & M_EOR) == 0, 1373 ("TLS RX mbuf %p with EOR", m)); 1374 KASSERT(m->m_type == MT_DATA, 1375 ("TLS RX mbuf %p is not MT_DATA", m)); 1376 KASSERT((m->m_flags & M_NOTREADY) != 0, 1377 ("TLS RX mbuf %p ready", m)); 1378 KASSERT((m->m_flags & M_EXTPG) == 0, 1379 ("TLS RX mbuf %p unmapped", m)); 1380 1381 if (m->m_len == 0) { 1382 m = m_free(m); 1383 continue; 1384 } 1385 1386 /* 1387 * Even though both 'n' and 'm' are NOTREADY, it's ok 1388 * to coalesce the data. 1389 */ 1390 if (n && 1391 M_WRITABLE(n) && 1392 ((sb->sb_flags & SB_NOCOALESCE) == 0) && 1393 !(n->m_flags & (M_EXTPG)) && 1394 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 1395 m->m_len <= M_TRAILINGSPACE(n)) { 1396 m_copydata(m, 0, m->m_len, mtodo(n, n->m_len)); 1397 n->m_len += m->m_len; 1398 sb->sb_ccc += m->m_len; 1399 sb->sb_tlscc += m->m_len; 1400 m = m_free(m); 1401 continue; 1402 } 1403 if (n) 1404 n->m_next = m; 1405 else 1406 sb->sb_mtls = m; 1407 sb->sb_mtlstail = m; 1408 sballoc_ktls_rx(sb, m); 1409 n = m; 1410 m = m->m_next; 1411 n->m_next = NULL; 1412 } 1413 SBLASTMBUFCHK(sb); 1414 } 1415 #endif 1416 1417 /* 1418 * Free all mbufs in a sockbuf. Check that all resources are reclaimed. 1419 */ 1420 static void 1421 sbflush_internal(struct sockbuf *sb) 1422 { 1423 1424 while (sb->sb_mbcnt || sb->sb_tlsdcc) { 1425 /* 1426 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty: 1427 * we would loop forever. Panic instead. 1428 */ 1429 if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len)) 1430 break; 1431 m_freem(sbcut_internal(sb, (int)sb->sb_ccc)); 1432 } 1433 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0, 1434 ("%s: ccc %u mb %p mbcnt %u", __func__, 1435 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt)); 1436 } 1437 1438 void 1439 sbflush_locked(struct sockbuf *sb) 1440 { 1441 1442 SOCKBUF_LOCK_ASSERT(sb); 1443 sbflush_internal(sb); 1444 } 1445 1446 void 1447 sbflush(struct sockbuf *sb) 1448 { 1449 1450 SOCKBUF_LOCK(sb); 1451 sbflush_locked(sb); 1452 SOCKBUF_UNLOCK(sb); 1453 } 1454 1455 /* 1456 * Cut data from (the front of) a sockbuf. 1457 */ 1458 static struct mbuf * 1459 sbcut_internal(struct sockbuf *sb, int len) 1460 { 1461 struct mbuf *m, *next, *mfree; 1462 bool is_tls; 1463 1464 KASSERT(len >= 0, ("%s: len is %d but it is supposed to be >= 0", 1465 __func__, len)); 1466 KASSERT(len <= sb->sb_ccc, ("%s: len: %d is > ccc: %u", 1467 __func__, len, sb->sb_ccc)); 1468 1469 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 1470 is_tls = false; 1471 mfree = NULL; 1472 1473 while (len > 0) { 1474 if (m == NULL) { 1475 #ifdef KERN_TLS 1476 if (next == NULL && !is_tls) { 1477 if (sb->sb_tlsdcc != 0) { 1478 MPASS(len >= sb->sb_tlsdcc); 1479 len -= sb->sb_tlsdcc; 1480 sb->sb_ccc -= sb->sb_tlsdcc; 1481 sb->sb_tlsdcc = 0; 1482 if (len == 0) 1483 break; 1484 } 1485 next = sb->sb_mtls; 1486 is_tls = true; 1487 } 1488 #endif 1489 KASSERT(next, ("%s: no next, len %d", __func__, len)); 1490 m = next; 1491 next = m->m_nextpkt; 1492 } 1493 if (m->m_len > len) { 1494 KASSERT(!(m->m_flags & M_NOTAVAIL), 1495 ("%s: m %p M_NOTAVAIL", __func__, m)); 1496 m->m_len -= len; 1497 m->m_data += len; 1498 sb->sb_ccc -= len; 1499 sb->sb_acc -= len; 1500 if (sb->sb_sndptroff != 0) 1501 sb->sb_sndptroff -= len; 1502 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1503 sb->sb_ctl -= len; 1504 break; 1505 } 1506 len -= m->m_len; 1507 #ifdef KERN_TLS 1508 if (is_tls) 1509 sbfree_ktls_rx(sb, m); 1510 else 1511 #endif 1512 sbfree(sb, m); 1513 /* 1514 * Do not put M_NOTREADY buffers to the free list, they 1515 * are referenced from outside. 1516 */ 1517 if (m->m_flags & M_NOTREADY && !is_tls) 1518 m = m->m_next; 1519 else { 1520 struct mbuf *n; 1521 1522 n = m->m_next; 1523 m->m_next = mfree; 1524 mfree = m; 1525 m = n; 1526 } 1527 } 1528 /* 1529 * Free any zero-length mbufs from the buffer. 1530 * For SOCK_DGRAM sockets such mbufs represent empty records. 1531 * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer, 1532 * when sosend_generic() needs to send only control data. 1533 */ 1534 while (m && m->m_len == 0) { 1535 struct mbuf *n; 1536 1537 sbfree(sb, m); 1538 n = m->m_next; 1539 m->m_next = mfree; 1540 mfree = m; 1541 m = n; 1542 } 1543 #ifdef KERN_TLS 1544 if (is_tls) { 1545 sb->sb_mb = NULL; 1546 sb->sb_mtls = m; 1547 if (m == NULL) 1548 sb->sb_mtlstail = NULL; 1549 } else 1550 #endif 1551 if (m) { 1552 sb->sb_mb = m; 1553 m->m_nextpkt = next; 1554 } else 1555 sb->sb_mb = next; 1556 /* 1557 * First part is an inline SB_EMPTY_FIXUP(). Second part makes sure 1558 * sb_lastrecord is up-to-date if we dropped part of the last record. 1559 */ 1560 m = sb->sb_mb; 1561 if (m == NULL) { 1562 sb->sb_mbtail = NULL; 1563 sb->sb_lastrecord = NULL; 1564 } else if (m->m_nextpkt == NULL) { 1565 sb->sb_lastrecord = m; 1566 } 1567 1568 return (mfree); 1569 } 1570 1571 /* 1572 * Drop data from (the front of) a sockbuf. 1573 */ 1574 void 1575 sbdrop_locked(struct sockbuf *sb, int len) 1576 { 1577 1578 SOCKBUF_LOCK_ASSERT(sb); 1579 m_freem(sbcut_internal(sb, len)); 1580 } 1581 1582 /* 1583 * Drop data from (the front of) a sockbuf, 1584 * and return it to caller. 1585 */ 1586 struct mbuf * 1587 sbcut_locked(struct sockbuf *sb, int len) 1588 { 1589 1590 SOCKBUF_LOCK_ASSERT(sb); 1591 return (sbcut_internal(sb, len)); 1592 } 1593 1594 void 1595 sbdrop(struct sockbuf *sb, int len) 1596 { 1597 struct mbuf *mfree; 1598 1599 SOCKBUF_LOCK(sb); 1600 mfree = sbcut_internal(sb, len); 1601 SOCKBUF_UNLOCK(sb); 1602 1603 m_freem(mfree); 1604 } 1605 1606 struct mbuf * 1607 sbsndptr_noadv(struct sockbuf *sb, uint32_t off, uint32_t *moff) 1608 { 1609 struct mbuf *m; 1610 1611 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1612 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) { 1613 *moff = off; 1614 if (sb->sb_sndptr == NULL) { 1615 sb->sb_sndptr = sb->sb_mb; 1616 sb->sb_sndptroff = 0; 1617 } 1618 return (sb->sb_mb); 1619 } else { 1620 m = sb->sb_sndptr; 1621 off -= sb->sb_sndptroff; 1622 } 1623 *moff = off; 1624 return (m); 1625 } 1626 1627 void 1628 sbsndptr_adv(struct sockbuf *sb, struct mbuf *mb, uint32_t len) 1629 { 1630 /* 1631 * A small copy was done, advance forward the sb_sbsndptr to cover 1632 * it. 1633 */ 1634 struct mbuf *m; 1635 1636 if (mb != sb->sb_sndptr) { 1637 /* Did not copyout at the same mbuf */ 1638 return; 1639 } 1640 m = mb; 1641 while (m && (len > 0)) { 1642 if (len >= m->m_len) { 1643 len -= m->m_len; 1644 if (m->m_next) { 1645 sb->sb_sndptroff += m->m_len; 1646 sb->sb_sndptr = m->m_next; 1647 } 1648 m = m->m_next; 1649 } else { 1650 len = 0; 1651 } 1652 } 1653 } 1654 1655 /* 1656 * Return the first mbuf and the mbuf data offset for the provided 1657 * send offset without changing the "sb_sndptroff" field. 1658 */ 1659 struct mbuf * 1660 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff) 1661 { 1662 struct mbuf *m; 1663 1664 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1665 1666 /* 1667 * If the "off" is below the stored offset, which happens on 1668 * retransmits, just use "sb_mb": 1669 */ 1670 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) { 1671 m = sb->sb_mb; 1672 } else { 1673 m = sb->sb_sndptr; 1674 off -= sb->sb_sndptroff; 1675 } 1676 while (off > 0 && m != NULL) { 1677 if (off < m->m_len) 1678 break; 1679 off -= m->m_len; 1680 m = m->m_next; 1681 } 1682 *moff = off; 1683 return (m); 1684 } 1685 1686 /* 1687 * Drop a record off the front of a sockbuf and move the next record to the 1688 * front. 1689 */ 1690 void 1691 sbdroprecord_locked(struct sockbuf *sb) 1692 { 1693 struct mbuf *m; 1694 1695 SOCKBUF_LOCK_ASSERT(sb); 1696 1697 m = sb->sb_mb; 1698 if (m) { 1699 sb->sb_mb = m->m_nextpkt; 1700 do { 1701 sbfree(sb, m); 1702 m = m_free(m); 1703 } while (m); 1704 } 1705 SB_EMPTY_FIXUP(sb); 1706 } 1707 1708 /* 1709 * Drop a record off the front of a sockbuf and move the next record to the 1710 * front. 1711 */ 1712 void 1713 sbdroprecord(struct sockbuf *sb) 1714 { 1715 1716 SOCKBUF_LOCK(sb); 1717 sbdroprecord_locked(sb); 1718 SOCKBUF_UNLOCK(sb); 1719 } 1720 1721 /* 1722 * Create a "control" mbuf containing the specified data with the specified 1723 * type for presentation on a socket buffer. 1724 */ 1725 struct mbuf * 1726 sbcreatecontrol_how(void *p, int size, int type, int level, int wait) 1727 { 1728 struct cmsghdr *cp; 1729 struct mbuf *m; 1730 1731 MBUF_CHECKSLEEP(wait); 1732 if (CMSG_SPACE((u_int)size) > MCLBYTES) 1733 return ((struct mbuf *) NULL); 1734 if (CMSG_SPACE((u_int)size) > MLEN) 1735 m = m_getcl(wait, MT_CONTROL, 0); 1736 else 1737 m = m_get(wait, MT_CONTROL); 1738 if (m == NULL) 1739 return ((struct mbuf *) NULL); 1740 cp = mtod(m, struct cmsghdr *); 1741 m->m_len = 0; 1742 KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m), 1743 ("sbcreatecontrol: short mbuf")); 1744 /* 1745 * Don't leave the padding between the msg header and the 1746 * cmsg data and the padding after the cmsg data un-initialized. 1747 */ 1748 bzero(cp, CMSG_SPACE((u_int)size)); 1749 if (p != NULL) 1750 (void)memcpy(CMSG_DATA(cp), p, size); 1751 m->m_len = CMSG_SPACE(size); 1752 cp->cmsg_len = CMSG_LEN(size); 1753 cp->cmsg_level = level; 1754 cp->cmsg_type = type; 1755 return (m); 1756 } 1757 1758 struct mbuf * 1759 sbcreatecontrol(caddr_t p, int size, int type, int level) 1760 { 1761 1762 return (sbcreatecontrol_how(p, size, type, level, M_NOWAIT)); 1763 } 1764 1765 /* 1766 * This does the same for socket buffers that sotoxsocket does for sockets: 1767 * generate an user-format data structure describing the socket buffer. Note 1768 * that the xsockbuf structure, since it is always embedded in a socket, does 1769 * not include a self pointer nor a length. We make this entry point public 1770 * in case some other mechanism needs it. 1771 */ 1772 void 1773 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) 1774 { 1775 1776 xsb->sb_cc = sb->sb_ccc; 1777 xsb->sb_hiwat = sb->sb_hiwat; 1778 xsb->sb_mbcnt = sb->sb_mbcnt; 1779 xsb->sb_mcnt = sb->sb_mcnt; 1780 xsb->sb_ccnt = sb->sb_ccnt; 1781 xsb->sb_mbmax = sb->sb_mbmax; 1782 xsb->sb_lowat = sb->sb_lowat; 1783 xsb->sb_flags = sb->sb_flags; 1784 xsb->sb_timeo = sb->sb_timeo; 1785 } 1786 1787 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ 1788 static int dummy; 1789 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW | CTLFLAG_SKIP, &dummy, 0, ""); 1790 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, 1791 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sb_max, 0, 1792 sysctl_handle_sb_max, "LU", 1793 "Maximum socket buffer size"); 1794 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 1795 &sb_efficiency, 0, "Socket buffer size waste factor"); 1796