1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_kern_tls.h" 38 #include "opt_param.h" 39 40 #include <sys/param.h> 41 #include <sys/aio.h> /* for aio_swake proto */ 42 #include <sys/kernel.h> 43 #include <sys/ktls.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/protosw.h> 50 #include <sys/resourcevar.h> 51 #include <sys/signalvar.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #include <sys/sx.h> 55 #include <sys/sysctl.h> 56 57 #include <netinet/in.h> 58 59 /* 60 * Function pointer set by the AIO routines so that the socket buffer code 61 * can call back into the AIO module if it is loaded. 62 */ 63 void (*aio_swake)(struct socket *, struct sockbuf *); 64 65 /* 66 * Primitive routines for operating on socket buffers 67 */ 68 69 u_long sb_max = SB_MAX; 70 u_long sb_max_adj = 71 (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 72 73 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 74 75 #ifdef KERN_TLS 76 static void sbcompress_ktls_rx(struct sockbuf *sb, struct mbuf *m, 77 struct mbuf *n); 78 #endif 79 static struct mbuf *sbcut_internal(struct sockbuf *sb, int len); 80 static void sbflush_internal(struct sockbuf *sb); 81 82 /* 83 * Our own version of m_clrprotoflags(), that can preserve M_NOTREADY. 84 */ 85 static void 86 sbm_clrprotoflags(struct mbuf *m, int flags) 87 { 88 int mask; 89 90 mask = ~M_PROTOFLAGS; 91 if (flags & PRUS_NOTREADY) 92 mask |= M_NOTREADY; 93 while (m) { 94 m->m_flags &= mask; 95 m = m->m_next; 96 } 97 } 98 99 /* 100 * Compress M_NOTREADY mbufs after they have been readied by sbready(). 101 * 102 * sbcompress() skips M_NOTREADY mbufs since the data is not available to 103 * be copied at the time of sbcompress(). This function combines small 104 * mbufs similar to sbcompress() once mbufs are ready. 'm0' is the first 105 * mbuf sbready() marked ready, and 'end' is the first mbuf still not 106 * ready. 107 */ 108 static void 109 sbready_compress(struct sockbuf *sb, struct mbuf *m0, struct mbuf *end) 110 { 111 struct mbuf *m, *n; 112 int ext_size; 113 114 SOCKBUF_LOCK_ASSERT(sb); 115 116 if ((sb->sb_flags & SB_NOCOALESCE) != 0) 117 return; 118 119 for (m = m0; m != end; m = m->m_next) { 120 MPASS((m->m_flags & M_NOTREADY) == 0); 121 /* 122 * NB: In sbcompress(), 'n' is the last mbuf in the 123 * socket buffer and 'm' is the new mbuf being copied 124 * into the trailing space of 'n'. Here, the roles 125 * are reversed and 'n' is the next mbuf after 'm' 126 * that is being copied into the trailing space of 127 * 'm'. 128 */ 129 n = m->m_next; 130 #ifdef KERN_TLS 131 /* Try to coalesce adjacent ktls mbuf hdr/trailers. */ 132 if ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 && 133 (m->m_flags & M_EXTPG) && 134 (n->m_flags & M_EXTPG) && 135 !mbuf_has_tls_session(m) && 136 !mbuf_has_tls_session(n)) { 137 int hdr_len, trail_len; 138 139 hdr_len = n->m_epg_hdrlen; 140 trail_len = m->m_epg_trllen; 141 if (trail_len != 0 && hdr_len != 0 && 142 trail_len + hdr_len <= MBUF_PEXT_TRAIL_LEN) { 143 /* copy n's header to m's trailer */ 144 memcpy(&m->m_epg_trail[trail_len], 145 n->m_epg_hdr, hdr_len); 146 m->m_epg_trllen += hdr_len; 147 m->m_len += hdr_len; 148 n->m_epg_hdrlen = 0; 149 n->m_len -= hdr_len; 150 } 151 } 152 #endif 153 154 /* Compress small unmapped mbufs into plain mbufs. */ 155 if ((m->m_flags & M_EXTPG) && m->m_len <= MLEN && 156 !mbuf_has_tls_session(m)) { 157 ext_size = m->m_ext.ext_size; 158 if (mb_unmapped_compress(m) == 0) 159 sb->sb_mbcnt -= ext_size; 160 } 161 162 while ((n != NULL) && (n != end) && (m->m_flags & M_EOR) == 0 && 163 M_WRITABLE(m) && 164 (m->m_flags & M_EXTPG) == 0 && 165 !mbuf_has_tls_session(n) && 166 !mbuf_has_tls_session(m) && 167 n->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 168 n->m_len <= M_TRAILINGSPACE(m) && 169 m->m_type == n->m_type) { 170 KASSERT(sb->sb_lastrecord != n, 171 ("%s: merging start of record (%p) into previous mbuf (%p)", 172 __func__, n, m)); 173 m_copydata(n, 0, n->m_len, mtodo(m, m->m_len)); 174 m->m_len += n->m_len; 175 m->m_next = n->m_next; 176 m->m_flags |= n->m_flags & M_EOR; 177 if (sb->sb_mbtail == n) 178 sb->sb_mbtail = m; 179 180 sb->sb_mbcnt -= MSIZE; 181 if (n->m_flags & M_EXT) 182 sb->sb_mbcnt -= n->m_ext.ext_size; 183 m_free(n); 184 n = m->m_next; 185 } 186 } 187 SBLASTRECORDCHK(sb); 188 SBLASTMBUFCHK(sb); 189 } 190 191 /* 192 * Mark ready "count" units of I/O starting with "m". Most mbufs 193 * count as a single unit of I/O except for M_EXTPG mbufs which 194 * are backed by multiple pages. 195 */ 196 int 197 sbready(struct sockbuf *sb, struct mbuf *m0, int count) 198 { 199 struct mbuf *m; 200 u_int blocker; 201 202 SOCKBUF_LOCK_ASSERT(sb); 203 KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb)); 204 KASSERT(count > 0, ("%s: invalid count %d", __func__, count)); 205 206 m = m0; 207 blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0; 208 209 while (count > 0) { 210 KASSERT(m->m_flags & M_NOTREADY, 211 ("%s: m %p !M_NOTREADY", __func__, m)); 212 if ((m->m_flags & M_EXTPG) != 0 && m->m_epg_npgs != 0) { 213 if (count < m->m_epg_nrdy) { 214 m->m_epg_nrdy -= count; 215 count = 0; 216 break; 217 } 218 count -= m->m_epg_nrdy; 219 m->m_epg_nrdy = 0; 220 } else 221 count--; 222 223 m->m_flags &= ~(M_NOTREADY | blocker); 224 if (blocker) 225 sb->sb_acc += m->m_len; 226 m = m->m_next; 227 } 228 229 /* 230 * If the first mbuf is still not fully ready because only 231 * some of its backing pages were readied, no further progress 232 * can be made. 233 */ 234 if (m0 == m) { 235 MPASS(m->m_flags & M_NOTREADY); 236 return (EINPROGRESS); 237 } 238 239 if (!blocker) { 240 sbready_compress(sb, m0, m); 241 return (EINPROGRESS); 242 } 243 244 /* This one was blocking all the queue. */ 245 for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) { 246 KASSERT(m->m_flags & M_BLOCKED, 247 ("%s: m %p !M_BLOCKED", __func__, m)); 248 m->m_flags &= ~M_BLOCKED; 249 sb->sb_acc += m->m_len; 250 } 251 252 sb->sb_fnrdy = m; 253 sbready_compress(sb, m0, m); 254 255 return (0); 256 } 257 258 /* 259 * Adjust sockbuf state reflecting allocation of m. 260 */ 261 void 262 sballoc(struct sockbuf *sb, struct mbuf *m) 263 { 264 265 SOCKBUF_LOCK_ASSERT(sb); 266 267 sb->sb_ccc += m->m_len; 268 269 if (sb->sb_fnrdy == NULL) { 270 if (m->m_flags & M_NOTREADY) 271 sb->sb_fnrdy = m; 272 else 273 sb->sb_acc += m->m_len; 274 } else 275 m->m_flags |= M_BLOCKED; 276 277 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 278 sb->sb_ctl += m->m_len; 279 280 sb->sb_mbcnt += MSIZE; 281 282 if (m->m_flags & M_EXT) 283 sb->sb_mbcnt += m->m_ext.ext_size; 284 } 285 286 /* 287 * Adjust sockbuf state reflecting freeing of m. 288 */ 289 void 290 sbfree(struct sockbuf *sb, struct mbuf *m) 291 { 292 293 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ 294 SOCKBUF_LOCK_ASSERT(sb); 295 #endif 296 297 sb->sb_ccc -= m->m_len; 298 299 if (!(m->m_flags & M_NOTAVAIL)) 300 sb->sb_acc -= m->m_len; 301 302 if (m == sb->sb_fnrdy) { 303 struct mbuf *n; 304 305 KASSERT(m->m_flags & M_NOTREADY, 306 ("%s: m %p !M_NOTREADY", __func__, m)); 307 308 n = m->m_next; 309 while (n != NULL && !(n->m_flags & M_NOTREADY)) { 310 n->m_flags &= ~M_BLOCKED; 311 sb->sb_acc += n->m_len; 312 n = n->m_next; 313 } 314 sb->sb_fnrdy = n; 315 } 316 317 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 318 sb->sb_ctl -= m->m_len; 319 320 sb->sb_mbcnt -= MSIZE; 321 if (m->m_flags & M_EXT) 322 sb->sb_mbcnt -= m->m_ext.ext_size; 323 324 if (sb->sb_sndptr == m) { 325 sb->sb_sndptr = NULL; 326 sb->sb_sndptroff = 0; 327 } 328 if (sb->sb_sndptroff != 0) 329 sb->sb_sndptroff -= m->m_len; 330 } 331 332 #ifdef KERN_TLS 333 /* 334 * Similar to sballoc/sbfree but does not adjust state associated with 335 * the sb_mb chain such as sb_fnrdy or sb_sndptr*. Also assumes mbufs 336 * are not ready. 337 */ 338 void 339 sballoc_ktls_rx(struct sockbuf *sb, struct mbuf *m) 340 { 341 342 SOCKBUF_LOCK_ASSERT(sb); 343 344 sb->sb_ccc += m->m_len; 345 sb->sb_tlscc += m->m_len; 346 347 sb->sb_mbcnt += MSIZE; 348 349 if (m->m_flags & M_EXT) 350 sb->sb_mbcnt += m->m_ext.ext_size; 351 } 352 353 void 354 sbfree_ktls_rx(struct sockbuf *sb, struct mbuf *m) 355 { 356 357 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ 358 SOCKBUF_LOCK_ASSERT(sb); 359 #endif 360 361 sb->sb_ccc -= m->m_len; 362 sb->sb_tlscc -= m->m_len; 363 364 sb->sb_mbcnt -= MSIZE; 365 366 if (m->m_flags & M_EXT) 367 sb->sb_mbcnt -= m->m_ext.ext_size; 368 } 369 #endif 370 371 /* 372 * Socantsendmore indicates that no more data will be sent on the socket; it 373 * would normally be applied to a socket when the user informs the system 374 * that no more data is to be sent, by the protocol code (in case 375 * PRU_SHUTDOWN). Socantrcvmore indicates that no more data will be 376 * received, and will normally be applied to the socket by a protocol when it 377 * detects that the peer will send no more data. Data queued for reading in 378 * the socket may yet be read. 379 */ 380 void 381 socantsendmore_locked(struct socket *so) 382 { 383 384 SOCK_SENDBUF_LOCK_ASSERT(so); 385 386 so->so_snd.sb_state |= SBS_CANTSENDMORE; 387 sowwakeup_locked(so); 388 SOCK_SENDBUF_UNLOCK_ASSERT(so); 389 } 390 391 void 392 socantsendmore(struct socket *so) 393 { 394 395 SOCK_SENDBUF_LOCK(so); 396 socantsendmore_locked(so); 397 SOCK_SENDBUF_UNLOCK_ASSERT(so); 398 } 399 400 void 401 socantrcvmore_locked(struct socket *so) 402 { 403 404 SOCK_RECVBUF_LOCK_ASSERT(so); 405 406 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 407 #ifdef KERN_TLS 408 if (so->so_rcv.sb_flags & SB_TLS_RX) 409 ktls_check_rx(&so->so_rcv); 410 #endif 411 sorwakeup_locked(so); 412 SOCK_RECVBUF_UNLOCK_ASSERT(so); 413 } 414 415 void 416 socantrcvmore(struct socket *so) 417 { 418 419 SOCK_RECVBUF_LOCK(so); 420 socantrcvmore_locked(so); 421 SOCK_RECVBUF_UNLOCK_ASSERT(so); 422 } 423 424 void 425 soroverflow_locked(struct socket *so) 426 { 427 428 SOCK_RECVBUF_LOCK_ASSERT(so); 429 430 if (so->so_options & SO_RERROR) { 431 so->so_rerror = ENOBUFS; 432 sorwakeup_locked(so); 433 } else 434 SOCK_RECVBUF_UNLOCK(so); 435 436 SOCK_RECVBUF_UNLOCK_ASSERT(so); 437 } 438 439 void 440 soroverflow(struct socket *so) 441 { 442 443 SOCK_RECVBUF_LOCK(so); 444 soroverflow_locked(so); 445 SOCK_RECVBUF_UNLOCK_ASSERT(so); 446 } 447 448 /* 449 * Wait for data to arrive at/drain from a socket buffer. 450 */ 451 int 452 sbwait(struct socket *so, sb_which which) 453 { 454 struct sockbuf *sb; 455 456 SOCK_BUF_LOCK_ASSERT(so, which); 457 458 sb = sobuf(so, which); 459 sb->sb_flags |= SB_WAIT; 460 return (msleep_sbt(&sb->sb_acc, soeventmtx(so, which), 461 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", 462 sb->sb_timeo, 0, 0)); 463 } 464 465 /* 466 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 467 * via SIGIO if the socket has the SS_ASYNC flag set. 468 * 469 * Called with the socket buffer lock held; will release the lock by the end 470 * of the function. This allows the caller to acquire the socket buffer lock 471 * while testing for the need for various sorts of wakeup and hold it through 472 * to the point where it's no longer required. We currently hold the lock 473 * through calls out to other subsystems (with the exception of kqueue), and 474 * then release it to avoid lock order issues. It's not clear that's 475 * correct. 476 */ 477 static __always_inline void 478 sowakeup(struct socket *so, const sb_which which) 479 { 480 struct sockbuf *sb; 481 int ret; 482 483 SOCK_BUF_LOCK_ASSERT(so, which); 484 485 sb = sobuf(so, which); 486 selwakeuppri(sb->sb_sel, PSOCK); 487 if (!SEL_WAITING(sb->sb_sel)) 488 sb->sb_flags &= ~SB_SEL; 489 if (sb->sb_flags & SB_WAIT) { 490 sb->sb_flags &= ~SB_WAIT; 491 wakeup(&sb->sb_acc); 492 } 493 KNOTE_LOCKED(&sb->sb_sel->si_note, 0); 494 if (sb->sb_upcall != NULL) { 495 ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT); 496 if (ret == SU_ISCONNECTED) { 497 KASSERT(sb == &so->so_rcv, 498 ("SO_SND upcall returned SU_ISCONNECTED")); 499 soupcall_clear(so, SO_RCV); 500 } 501 } else 502 ret = SU_OK; 503 if (sb->sb_flags & SB_AIO) 504 sowakeup_aio(so, which); 505 SOCK_BUF_UNLOCK(so, which); 506 if (ret == SU_ISCONNECTED) 507 soisconnected(so); 508 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 509 pgsigio(&so->so_sigio, SIGIO, 0); 510 SOCK_BUF_UNLOCK_ASSERT(so, which); 511 } 512 513 /* 514 * Do we need to notify the other side when I/O is possible? 515 */ 516 static __always_inline bool 517 sb_notify(const struct sockbuf *sb) 518 { 519 return ((sb->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | 520 SB_UPCALL | SB_AIO | SB_KNOTE)) != 0); 521 } 522 523 void 524 sorwakeup_locked(struct socket *so) 525 { 526 SOCK_RECVBUF_LOCK_ASSERT(so); 527 if (sb_notify(&so->so_rcv)) 528 sowakeup(so, SO_RCV); 529 else 530 SOCK_RECVBUF_UNLOCK(so); 531 } 532 533 void 534 sowwakeup_locked(struct socket *so) 535 { 536 SOCK_SENDBUF_LOCK_ASSERT(so); 537 if (sb_notify(&so->so_snd)) 538 sowakeup(so, SO_SND); 539 else 540 SOCK_SENDBUF_UNLOCK(so); 541 } 542 543 /* 544 * Socket buffer (struct sockbuf) utility routines. 545 * 546 * Each socket contains two socket buffers: one for sending data and one for 547 * receiving data. Each buffer contains a queue of mbufs, information about 548 * the number of mbufs and amount of data in the queue, and other fields 549 * allowing select() statements and notification on data availability to be 550 * implemented. 551 * 552 * Data stored in a socket buffer is maintained as a list of records. Each 553 * record is a list of mbufs chained together with the m_next field. Records 554 * are chained together with the m_nextpkt field. The upper level routine 555 * soreceive() expects the following conventions to be observed when placing 556 * information in the receive buffer: 557 * 558 * 1. If the protocol requires each message be preceded by the sender's name, 559 * then a record containing that name must be present before any 560 * associated data (mbuf's must be of type MT_SONAME). 561 * 2. If the protocol supports the exchange of ``access rights'' (really just 562 * additional data associated with the message), and there are ``rights'' 563 * to be received, then a record containing this data should be present 564 * (mbuf's must be of type MT_RIGHTS). 565 * 3. If a name or rights record exists, then it must be followed by a data 566 * record, perhaps of zero length. 567 * 568 * Before using a new socket structure it is first necessary to reserve 569 * buffer space to the socket, by calling sbreserve(). This should commit 570 * some of the available buffer space in the system buffer pool for the 571 * socket (currently, it does nothing but enforce limits). The space should 572 * be released by calling sbrelease() when the socket is destroyed. 573 */ 574 int 575 soreserve(struct socket *so, u_long sndcc, u_long rcvcc) 576 { 577 struct thread *td = curthread; 578 579 SOCK_SENDBUF_LOCK(so); 580 SOCK_RECVBUF_LOCK(so); 581 if (sbreserve_locked(so, SO_SND, sndcc, td) == 0) 582 goto bad; 583 if (sbreserve_locked(so, SO_RCV, rcvcc, td) == 0) 584 goto bad2; 585 if (so->so_rcv.sb_lowat == 0) 586 so->so_rcv.sb_lowat = 1; 587 if (so->so_snd.sb_lowat == 0) 588 so->so_snd.sb_lowat = MCLBYTES; 589 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 590 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 591 SOCK_RECVBUF_UNLOCK(so); 592 SOCK_SENDBUF_UNLOCK(so); 593 return (0); 594 bad2: 595 sbrelease_locked(so, SO_SND); 596 bad: 597 SOCK_RECVBUF_UNLOCK(so); 598 SOCK_SENDBUF_UNLOCK(so); 599 return (ENOBUFS); 600 } 601 602 static int 603 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 604 { 605 int error = 0; 606 u_long tmp_sb_max = sb_max; 607 608 error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req); 609 if (error || !req->newptr) 610 return (error); 611 if (tmp_sb_max < MSIZE + MCLBYTES) 612 return (EINVAL); 613 sb_max = tmp_sb_max; 614 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 615 return (0); 616 } 617 618 /* 619 * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't 620 * become limiting if buffering efficiency is near the normal case. 621 */ 622 bool 623 sbreserve_locked(struct socket *so, sb_which which, u_long cc, 624 struct thread *td) 625 { 626 struct sockbuf *sb = sobuf(so, which); 627 rlim_t sbsize_limit; 628 629 SOCK_BUF_LOCK_ASSERT(so, which); 630 631 /* 632 * When a thread is passed, we take into account the thread's socket 633 * buffer size limit. The caller will generally pass curthread, but 634 * in the TCP input path, NULL will be passed to indicate that no 635 * appropriate thread resource limits are available. In that case, 636 * we don't apply a process limit. 637 */ 638 if (cc > sb_max_adj) 639 return (false); 640 if (td != NULL) { 641 sbsize_limit = lim_cur(td, RLIMIT_SBSIZE); 642 } else 643 sbsize_limit = RLIM_INFINITY; 644 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, 645 sbsize_limit)) 646 return (false); 647 sb->sb_mbmax = min(cc * sb_efficiency, sb_max); 648 if (sb->sb_lowat > sb->sb_hiwat) 649 sb->sb_lowat = sb->sb_hiwat; 650 return (true); 651 } 652 653 int 654 sbsetopt(struct socket *so, int cmd, u_long cc) 655 { 656 struct sockbuf *sb; 657 sb_which wh; 658 short *flags; 659 u_int *hiwat, *lowat; 660 int error; 661 662 sb = NULL; 663 SOCK_LOCK(so); 664 if (SOLISTENING(so)) { 665 switch (cmd) { 666 case SO_SNDLOWAT: 667 case SO_SNDBUF: 668 lowat = &so->sol_sbsnd_lowat; 669 hiwat = &so->sol_sbsnd_hiwat; 670 flags = &so->sol_sbsnd_flags; 671 break; 672 case SO_RCVLOWAT: 673 case SO_RCVBUF: 674 lowat = &so->sol_sbrcv_lowat; 675 hiwat = &so->sol_sbrcv_hiwat; 676 flags = &so->sol_sbrcv_flags; 677 break; 678 } 679 } else { 680 switch (cmd) { 681 case SO_SNDLOWAT: 682 case SO_SNDBUF: 683 sb = &so->so_snd; 684 wh = SO_SND; 685 break; 686 case SO_RCVLOWAT: 687 case SO_RCVBUF: 688 sb = &so->so_rcv; 689 wh = SO_RCV; 690 break; 691 } 692 flags = &sb->sb_flags; 693 hiwat = &sb->sb_hiwat; 694 lowat = &sb->sb_lowat; 695 SOCK_BUF_LOCK(so, wh); 696 } 697 698 error = 0; 699 switch (cmd) { 700 case SO_SNDBUF: 701 case SO_RCVBUF: 702 if (SOLISTENING(so)) { 703 if (cc > sb_max_adj) { 704 error = ENOBUFS; 705 break; 706 } 707 *hiwat = cc; 708 if (*lowat > *hiwat) 709 *lowat = *hiwat; 710 } else { 711 if (!sbreserve_locked(so, wh, cc, curthread)) 712 error = ENOBUFS; 713 } 714 if (error == 0) 715 *flags &= ~SB_AUTOSIZE; 716 break; 717 case SO_SNDLOWAT: 718 case SO_RCVLOWAT: 719 /* 720 * Make sure the low-water is never greater than the 721 * high-water. 722 */ 723 *lowat = (cc > *hiwat) ? *hiwat : cc; 724 break; 725 } 726 727 if (!SOLISTENING(so)) 728 SOCK_BUF_UNLOCK(so, wh); 729 SOCK_UNLOCK(so); 730 return (error); 731 } 732 733 /* 734 * Free mbufs held by a socket, and reserved mbuf space. 735 */ 736 static void 737 sbrelease_internal(struct socket *so, sb_which which) 738 { 739 struct sockbuf *sb = sobuf(so, which); 740 741 sbflush_internal(sb); 742 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 743 RLIM_INFINITY); 744 sb->sb_mbmax = 0; 745 } 746 747 void 748 sbrelease_locked(struct socket *so, sb_which which) 749 { 750 751 SOCK_BUF_LOCK_ASSERT(so, which); 752 753 sbrelease_internal(so, which); 754 } 755 756 void 757 sbrelease(struct socket *so, sb_which which) 758 { 759 760 SOCK_BUF_LOCK(so, which); 761 sbrelease_locked(so, which); 762 SOCK_BUF_UNLOCK(so, which); 763 } 764 765 void 766 sbdestroy(struct socket *so, sb_which which) 767 { 768 #ifdef KERN_TLS 769 struct sockbuf *sb = sobuf(so, which); 770 771 if (sb->sb_tls_info != NULL) 772 ktls_free(sb->sb_tls_info); 773 sb->sb_tls_info = NULL; 774 #endif 775 sbrelease_internal(so, which); 776 } 777 778 /* 779 * Routines to add and remove data from an mbuf queue. 780 * 781 * The routines sbappend() or sbappendrecord() are normally called to append 782 * new mbufs to a socket buffer, after checking that adequate space is 783 * available, comparing the function sbspace() with the amount of data to be 784 * added. sbappendrecord() differs from sbappend() in that data supplied is 785 * treated as the beginning of a new record. To place a sender's address, 786 * optional access rights, and data in a socket receive buffer, 787 * sbappendaddr() should be used. To place access rights and data in a 788 * socket receive buffer, sbappendrights() should be used. In either case, 789 * the new data begins a new record. Note that unlike sbappend() and 790 * sbappendrecord(), these routines check for the caller that there will be 791 * enough space to store the data. Each fails if there is not enough space, 792 * or if it cannot find mbufs to store additional information in. 793 * 794 * Reliable protocols may use the socket send buffer to hold data awaiting 795 * acknowledgement. Data is normally copied from a socket send buffer in a 796 * protocol with m_copy for output to a peer, and then removing the data from 797 * the socket buffer with sbdrop() or sbdroprecord() when the data is 798 * acknowledged by the peer. 799 */ 800 #ifdef SOCKBUF_DEBUG 801 void 802 sblastrecordchk(struct sockbuf *sb, const char *file, int line) 803 { 804 struct mbuf *m = sb->sb_mb; 805 806 SOCKBUF_LOCK_ASSERT(sb); 807 808 while (m && m->m_nextpkt) 809 m = m->m_nextpkt; 810 811 if (m != sb->sb_lastrecord) { 812 printf("%s: sb_mb %p sb_lastrecord %p last %p\n", 813 __func__, sb->sb_mb, sb->sb_lastrecord, m); 814 printf("packet chain:\n"); 815 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) 816 printf("\t%p\n", m); 817 panic("%s from %s:%u", __func__, file, line); 818 } 819 } 820 821 void 822 sblastmbufchk(struct sockbuf *sb, const char *file, int line) 823 { 824 struct mbuf *m = sb->sb_mb; 825 struct mbuf *n; 826 827 SOCKBUF_LOCK_ASSERT(sb); 828 829 while (m && m->m_nextpkt) 830 m = m->m_nextpkt; 831 832 while (m && m->m_next) 833 m = m->m_next; 834 835 if (m != sb->sb_mbtail) { 836 printf("%s: sb_mb %p sb_mbtail %p last %p\n", 837 __func__, sb->sb_mb, sb->sb_mbtail, m); 838 printf("packet tree:\n"); 839 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) { 840 printf("\t"); 841 for (n = m; n != NULL; n = n->m_next) 842 printf("%p ", n); 843 printf("\n"); 844 } 845 panic("%s from %s:%u", __func__, file, line); 846 } 847 848 #ifdef KERN_TLS 849 m = sb->sb_mtls; 850 while (m && m->m_next) 851 m = m->m_next; 852 853 if (m != sb->sb_mtlstail) { 854 printf("%s: sb_mtls %p sb_mtlstail %p last %p\n", 855 __func__, sb->sb_mtls, sb->sb_mtlstail, m); 856 printf("TLS packet tree:\n"); 857 printf("\t"); 858 for (m = sb->sb_mtls; m != NULL; m = m->m_next) { 859 printf("%p ", m); 860 } 861 printf("\n"); 862 panic("%s from %s:%u", __func__, file, line); 863 } 864 #endif 865 } 866 #endif /* SOCKBUF_DEBUG */ 867 868 #define SBLINKRECORD(sb, m0) do { \ 869 SOCKBUF_LOCK_ASSERT(sb); \ 870 if ((sb)->sb_lastrecord != NULL) \ 871 (sb)->sb_lastrecord->m_nextpkt = (m0); \ 872 else \ 873 (sb)->sb_mb = (m0); \ 874 (sb)->sb_lastrecord = (m0); \ 875 } while (/*CONSTCOND*/0) 876 877 /* 878 * Append mbuf chain m to the last record in the socket buffer sb. The 879 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 880 * are discarded and mbufs are compacted where possible. 881 */ 882 void 883 sbappend_locked(struct sockbuf *sb, struct mbuf *m, int flags) 884 { 885 struct mbuf *n; 886 887 SOCKBUF_LOCK_ASSERT(sb); 888 889 if (m == NULL) 890 return; 891 sbm_clrprotoflags(m, flags); 892 SBLASTRECORDCHK(sb); 893 n = sb->sb_mb; 894 if (n) { 895 while (n->m_nextpkt) 896 n = n->m_nextpkt; 897 do { 898 if (n->m_flags & M_EOR) { 899 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 900 return; 901 } 902 } while (n->m_next && (n = n->m_next)); 903 } else { 904 /* 905 * XXX Would like to simply use sb_mbtail here, but 906 * XXX I need to verify that I won't miss an EOR that 907 * XXX way. 908 */ 909 if ((n = sb->sb_lastrecord) != NULL) { 910 do { 911 if (n->m_flags & M_EOR) { 912 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 913 return; 914 } 915 } while (n->m_next && (n = n->m_next)); 916 } else { 917 /* 918 * If this is the first record in the socket buffer, 919 * it's also the last record. 920 */ 921 sb->sb_lastrecord = m; 922 } 923 } 924 sbcompress(sb, m, n); 925 SBLASTRECORDCHK(sb); 926 } 927 928 /* 929 * Append mbuf chain m to the last record in the socket buffer sb. The 930 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 931 * are discarded and mbufs are compacted where possible. 932 */ 933 void 934 sbappend(struct sockbuf *sb, struct mbuf *m, int flags) 935 { 936 937 SOCKBUF_LOCK(sb); 938 sbappend_locked(sb, m, flags); 939 SOCKBUF_UNLOCK(sb); 940 } 941 942 #ifdef KERN_TLS 943 /* 944 * Append an mbuf containing encrypted TLS data. The data 945 * is marked M_NOTREADY until it has been decrypted and 946 * stored as a TLS record. 947 */ 948 static void 949 sbappend_ktls_rx(struct sockbuf *sb, struct mbuf *m) 950 { 951 struct ifnet *ifp; 952 struct mbuf *n; 953 int flags; 954 955 ifp = NULL; 956 flags = M_NOTREADY; 957 958 SBLASTMBUFCHK(sb); 959 960 /* Mbuf chain must start with a packet header. */ 961 MPASS((m->m_flags & M_PKTHDR) != 0); 962 963 /* Remove all packet headers and mbuf tags to get a pure data chain. */ 964 for (n = m; n != NULL; n = n->m_next) { 965 if (n->m_flags & M_PKTHDR) { 966 ifp = m->m_pkthdr.leaf_rcvif; 967 if ((n->m_pkthdr.csum_flags & CSUM_TLS_MASK) == 968 CSUM_TLS_DECRYPTED) { 969 /* Mark all mbufs in this packet decrypted. */ 970 flags = M_NOTREADY | M_DECRYPTED; 971 } else { 972 flags = M_NOTREADY; 973 } 974 m_demote_pkthdr(n); 975 } 976 977 n->m_flags &= M_DEMOTEFLAGS; 978 n->m_flags |= flags; 979 980 MPASS((n->m_flags & M_NOTREADY) != 0); 981 } 982 983 sbcompress_ktls_rx(sb, m, sb->sb_mtlstail); 984 ktls_check_rx(sb); 985 986 /* Check for incoming packet route changes: */ 987 if (ifp != NULL && sb->sb_tls_info->rx_ifp != NULL && 988 sb->sb_tls_info->rx_ifp != ifp) 989 ktls_input_ifp_mismatch(sb, ifp); 990 } 991 #endif 992 993 /* 994 * This version of sbappend() should only be used when the caller absolutely 995 * knows that there will never be more than one record in the socket buffer, 996 * that is, a stream protocol (such as TCP). 997 */ 998 void 999 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags) 1000 { 1001 SOCKBUF_LOCK_ASSERT(sb); 1002 1003 KASSERT(m->m_nextpkt == NULL,("sbappendstream 0")); 1004 1005 #ifdef KERN_TLS 1006 /* 1007 * Decrypted TLS records are appended as records via 1008 * sbappendrecord(). TCP passes encrypted TLS records to this 1009 * function which must be scheduled for decryption. 1010 */ 1011 if (sb->sb_flags & SB_TLS_RX) { 1012 sbappend_ktls_rx(sb, m); 1013 return; 1014 } 1015 #endif 1016 1017 KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1")); 1018 1019 SBLASTMBUFCHK(sb); 1020 1021 #ifdef KERN_TLS 1022 if (sb->sb_tls_info != NULL) 1023 ktls_seq(sb, m); 1024 #endif 1025 1026 /* Remove all packet headers and mbuf tags to get a pure data chain. */ 1027 m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0); 1028 1029 sbcompress(sb, m, sb->sb_mbtail); 1030 1031 sb->sb_lastrecord = sb->sb_mb; 1032 SBLASTRECORDCHK(sb); 1033 } 1034 1035 /* 1036 * This version of sbappend() should only be used when the caller absolutely 1037 * knows that there will never be more than one record in the socket buffer, 1038 * that is, a stream protocol (such as TCP). 1039 */ 1040 void 1041 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags) 1042 { 1043 1044 SOCKBUF_LOCK(sb); 1045 sbappendstream_locked(sb, m, flags); 1046 SOCKBUF_UNLOCK(sb); 1047 } 1048 1049 #ifdef SOCKBUF_DEBUG 1050 void 1051 sbcheck(struct sockbuf *sb, const char *file, int line) 1052 { 1053 struct mbuf *m, *n, *fnrdy; 1054 u_long acc, ccc, mbcnt; 1055 #ifdef KERN_TLS 1056 u_long tlscc; 1057 #endif 1058 1059 SOCKBUF_LOCK_ASSERT(sb); 1060 1061 acc = ccc = mbcnt = 0; 1062 fnrdy = NULL; 1063 1064 for (m = sb->sb_mb; m; m = n) { 1065 n = m->m_nextpkt; 1066 for (; m; m = m->m_next) { 1067 if (m->m_len == 0) { 1068 printf("sb %p empty mbuf %p\n", sb, m); 1069 goto fail; 1070 } 1071 if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) { 1072 if (m != sb->sb_fnrdy) { 1073 printf("sb %p: fnrdy %p != m %p\n", 1074 sb, sb->sb_fnrdy, m); 1075 goto fail; 1076 } 1077 fnrdy = m; 1078 } 1079 if (fnrdy) { 1080 if (!(m->m_flags & M_NOTAVAIL)) { 1081 printf("sb %p: fnrdy %p, m %p is avail\n", 1082 sb, sb->sb_fnrdy, m); 1083 goto fail; 1084 } 1085 } else 1086 acc += m->m_len; 1087 ccc += m->m_len; 1088 mbcnt += MSIZE; 1089 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 1090 mbcnt += m->m_ext.ext_size; 1091 } 1092 } 1093 #ifdef KERN_TLS 1094 /* 1095 * Account for mbufs "detached" by ktls_detach_record() while 1096 * they are decrypted by ktls_decrypt(). tlsdcc gives a count 1097 * of the detached bytes that are included in ccc. The mbufs 1098 * and clusters are not included in the socket buffer 1099 * accounting. 1100 */ 1101 ccc += sb->sb_tlsdcc; 1102 1103 tlscc = 0; 1104 for (m = sb->sb_mtls; m; m = m->m_next) { 1105 if (m->m_nextpkt != NULL) { 1106 printf("sb %p TLS mbuf %p with nextpkt\n", sb, m); 1107 goto fail; 1108 } 1109 if ((m->m_flags & M_NOTREADY) == 0) { 1110 printf("sb %p TLS mbuf %p ready\n", sb, m); 1111 goto fail; 1112 } 1113 tlscc += m->m_len; 1114 ccc += m->m_len; 1115 mbcnt += MSIZE; 1116 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 1117 mbcnt += m->m_ext.ext_size; 1118 } 1119 1120 if (sb->sb_tlscc != tlscc) { 1121 printf("tlscc %ld/%u dcc %u\n", tlscc, sb->sb_tlscc, 1122 sb->sb_tlsdcc); 1123 goto fail; 1124 } 1125 #endif 1126 if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) { 1127 printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n", 1128 acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt); 1129 #ifdef KERN_TLS 1130 printf("tlscc %ld/%u dcc %u\n", tlscc, sb->sb_tlscc, 1131 sb->sb_tlsdcc); 1132 #endif 1133 goto fail; 1134 } 1135 return; 1136 fail: 1137 panic("%s from %s:%u", __func__, file, line); 1138 } 1139 #endif 1140 1141 /* 1142 * As above, except the mbuf chain begins a new record. 1143 */ 1144 void 1145 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0) 1146 { 1147 struct mbuf *m; 1148 1149 SOCKBUF_LOCK_ASSERT(sb); 1150 1151 if (m0 == NULL) 1152 return; 1153 m_clrprotoflags(m0); 1154 /* 1155 * Put the first mbuf on the queue. Note this permits zero length 1156 * records. 1157 */ 1158 sballoc(sb, m0); 1159 SBLASTRECORDCHK(sb); 1160 SBLINKRECORD(sb, m0); 1161 sb->sb_mbtail = m0; 1162 m = m0->m_next; 1163 m0->m_next = 0; 1164 if (m && (m0->m_flags & M_EOR)) { 1165 m0->m_flags &= ~M_EOR; 1166 m->m_flags |= M_EOR; 1167 } 1168 /* always call sbcompress() so it can do SBLASTMBUFCHK() */ 1169 sbcompress(sb, m, m0); 1170 } 1171 1172 /* 1173 * As above, except the mbuf chain begins a new record. 1174 */ 1175 void 1176 sbappendrecord(struct sockbuf *sb, struct mbuf *m0) 1177 { 1178 1179 SOCKBUF_LOCK(sb); 1180 sbappendrecord_locked(sb, m0); 1181 SOCKBUF_UNLOCK(sb); 1182 } 1183 1184 /* Helper routine that appends data, control, and address to a sockbuf. */ 1185 static int 1186 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa, 1187 struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last) 1188 { 1189 struct mbuf *m, *n, *nlast; 1190 #if MSIZE <= 256 1191 if (asa->sa_len > MLEN) 1192 return (0); 1193 #endif 1194 m = m_get(M_NOWAIT, MT_SONAME); 1195 if (m == NULL) 1196 return (0); 1197 m->m_len = asa->sa_len; 1198 bcopy(asa, mtod(m, caddr_t), asa->sa_len); 1199 if (m0) { 1200 M_ASSERT_NO_SND_TAG(m0); 1201 m_clrprotoflags(m0); 1202 m_tag_delete_chain(m0, NULL); 1203 /* 1204 * Clear some persistent info from pkthdr. 1205 * We don't use m_demote(), because some netgraph consumers 1206 * expect M_PKTHDR presence. 1207 */ 1208 m0->m_pkthdr.rcvif = NULL; 1209 m0->m_pkthdr.flowid = 0; 1210 m0->m_pkthdr.csum_flags = 0; 1211 m0->m_pkthdr.fibnum = 0; 1212 m0->m_pkthdr.rsstype = 0; 1213 } 1214 if (ctrl_last) 1215 ctrl_last->m_next = m0; /* concatenate data to control */ 1216 else 1217 control = m0; 1218 m->m_next = control; 1219 for (n = m; n->m_next != NULL; n = n->m_next) 1220 sballoc(sb, n); 1221 sballoc(sb, n); 1222 nlast = n; 1223 SBLINKRECORD(sb, m); 1224 1225 sb->sb_mbtail = nlast; 1226 SBLASTMBUFCHK(sb); 1227 1228 SBLASTRECORDCHK(sb); 1229 return (1); 1230 } 1231 1232 /* 1233 * Append address and data, and optionally, control (ancillary) data to the 1234 * receive queue of a socket. If present, m0 must include a packet header 1235 * with total length. Returns 0 if no space in sockbuf or insufficient 1236 * mbufs. 1237 */ 1238 int 1239 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, 1240 struct mbuf *m0, struct mbuf *control) 1241 { 1242 struct mbuf *ctrl_last; 1243 int space = asa->sa_len; 1244 1245 SOCKBUF_LOCK_ASSERT(sb); 1246 1247 if (m0 && (m0->m_flags & M_PKTHDR) == 0) 1248 panic("sbappendaddr_locked"); 1249 if (m0) 1250 space += m0->m_pkthdr.len; 1251 space += m_length(control, &ctrl_last); 1252 1253 if (space > sbspace(sb)) 1254 return (0); 1255 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 1256 } 1257 1258 /* 1259 * Append address and data, and optionally, control (ancillary) data to the 1260 * receive queue of a socket. If present, m0 must include a packet header 1261 * with total length. Returns 0 if insufficient mbufs. Does not validate space 1262 * on the receiving sockbuf. 1263 */ 1264 int 1265 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa, 1266 struct mbuf *m0, struct mbuf *control) 1267 { 1268 struct mbuf *ctrl_last; 1269 1270 SOCKBUF_LOCK_ASSERT(sb); 1271 1272 ctrl_last = (control == NULL) ? NULL : m_last(control); 1273 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 1274 } 1275 1276 /* 1277 * Append address and data, and optionally, control (ancillary) data to the 1278 * receive queue of a socket. If present, m0 must include a packet header 1279 * with total length. Returns 0 if no space in sockbuf or insufficient 1280 * mbufs. 1281 */ 1282 int 1283 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, 1284 struct mbuf *m0, struct mbuf *control) 1285 { 1286 int retval; 1287 1288 SOCKBUF_LOCK(sb); 1289 retval = sbappendaddr_locked(sb, asa, m0, control); 1290 SOCKBUF_UNLOCK(sb); 1291 return (retval); 1292 } 1293 1294 void 1295 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, 1296 struct mbuf *control, int flags) 1297 { 1298 struct mbuf *m, *mlast; 1299 1300 sbm_clrprotoflags(m0, flags); 1301 m_last(control)->m_next = m0; 1302 1303 SBLASTRECORDCHK(sb); 1304 1305 for (m = control; m->m_next; m = m->m_next) 1306 sballoc(sb, m); 1307 sballoc(sb, m); 1308 mlast = m; 1309 SBLINKRECORD(sb, control); 1310 1311 sb->sb_mbtail = mlast; 1312 SBLASTMBUFCHK(sb); 1313 1314 SBLASTRECORDCHK(sb); 1315 } 1316 1317 void 1318 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, 1319 int flags) 1320 { 1321 1322 SOCKBUF_LOCK(sb); 1323 sbappendcontrol_locked(sb, m0, control, flags); 1324 SOCKBUF_UNLOCK(sb); 1325 } 1326 1327 /* 1328 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf 1329 * (n). If (n) is NULL, the buffer is presumed empty. 1330 * 1331 * When the data is compressed, mbufs in the chain may be handled in one of 1332 * three ways: 1333 * 1334 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no 1335 * record boundary, and no change in data type). 1336 * 1337 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into 1338 * an mbuf already in the socket buffer. This can occur if an 1339 * appropriate mbuf exists, there is room, both mbufs are not marked as 1340 * not ready, and no merging of data types will occur. 1341 * 1342 * (3) The mbuf may be appended to the end of the existing mbuf chain. 1343 * 1344 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as 1345 * end-of-record. 1346 */ 1347 void 1348 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 1349 { 1350 int eor = 0; 1351 struct mbuf *o; 1352 1353 SOCKBUF_LOCK_ASSERT(sb); 1354 1355 while (m) { 1356 eor |= m->m_flags & M_EOR; 1357 if (m->m_len == 0 && 1358 (eor == 0 || 1359 (((o = m->m_next) || (o = n)) && 1360 o->m_type == m->m_type))) { 1361 if (sb->sb_lastrecord == m) 1362 sb->sb_lastrecord = m->m_next; 1363 m = m_free(m); 1364 continue; 1365 } 1366 if (n && (n->m_flags & M_EOR) == 0 && 1367 M_WRITABLE(n) && 1368 ((sb->sb_flags & SB_NOCOALESCE) == 0) && 1369 !(m->m_flags & M_NOTREADY) && 1370 !(n->m_flags & (M_NOTREADY | M_EXTPG)) && 1371 !mbuf_has_tls_session(m) && 1372 !mbuf_has_tls_session(n) && 1373 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 1374 m->m_len <= M_TRAILINGSPACE(n) && 1375 n->m_type == m->m_type) { 1376 m_copydata(m, 0, m->m_len, mtodo(n, n->m_len)); 1377 n->m_len += m->m_len; 1378 sb->sb_ccc += m->m_len; 1379 if (sb->sb_fnrdy == NULL) 1380 sb->sb_acc += m->m_len; 1381 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1382 /* XXX: Probably don't need.*/ 1383 sb->sb_ctl += m->m_len; 1384 m = m_free(m); 1385 continue; 1386 } 1387 if (m->m_len <= MLEN && (m->m_flags & M_EXTPG) && 1388 (m->m_flags & M_NOTREADY) == 0 && 1389 !mbuf_has_tls_session(m)) 1390 (void)mb_unmapped_compress(m); 1391 if (n) 1392 n->m_next = m; 1393 else 1394 sb->sb_mb = m; 1395 sb->sb_mbtail = m; 1396 sballoc(sb, m); 1397 n = m; 1398 m->m_flags &= ~M_EOR; 1399 m = m->m_next; 1400 n->m_next = 0; 1401 } 1402 if (eor) { 1403 KASSERT(n != NULL, ("sbcompress: eor && n == NULL")); 1404 n->m_flags |= eor; 1405 } 1406 SBLASTMBUFCHK(sb); 1407 } 1408 1409 #ifdef KERN_TLS 1410 /* 1411 * A version of sbcompress() for encrypted TLS RX mbufs. These mbufs 1412 * are appended to the 'sb_mtls' chain instead of 'sb_mb' and are also 1413 * a bit simpler (no EOR markers, always MT_DATA, etc.). 1414 */ 1415 static void 1416 sbcompress_ktls_rx(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 1417 { 1418 1419 SOCKBUF_LOCK_ASSERT(sb); 1420 1421 while (m) { 1422 KASSERT((m->m_flags & M_EOR) == 0, 1423 ("TLS RX mbuf %p with EOR", m)); 1424 KASSERT(m->m_type == MT_DATA, 1425 ("TLS RX mbuf %p is not MT_DATA", m)); 1426 KASSERT((m->m_flags & M_NOTREADY) != 0, 1427 ("TLS RX mbuf %p ready", m)); 1428 KASSERT((m->m_flags & M_EXTPG) == 0, 1429 ("TLS RX mbuf %p unmapped", m)); 1430 1431 if (m->m_len == 0) { 1432 m = m_free(m); 1433 continue; 1434 } 1435 1436 /* 1437 * Even though both 'n' and 'm' are NOTREADY, it's ok 1438 * to coalesce the data. 1439 */ 1440 if (n && 1441 M_WRITABLE(n) && 1442 ((sb->sb_flags & SB_NOCOALESCE) == 0) && 1443 !((m->m_flags ^ n->m_flags) & M_DECRYPTED) && 1444 !(n->m_flags & M_EXTPG) && 1445 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 1446 m->m_len <= M_TRAILINGSPACE(n)) { 1447 m_copydata(m, 0, m->m_len, mtodo(n, n->m_len)); 1448 n->m_len += m->m_len; 1449 sb->sb_ccc += m->m_len; 1450 sb->sb_tlscc += m->m_len; 1451 m = m_free(m); 1452 continue; 1453 } 1454 if (n) 1455 n->m_next = m; 1456 else 1457 sb->sb_mtls = m; 1458 sb->sb_mtlstail = m; 1459 sballoc_ktls_rx(sb, m); 1460 n = m; 1461 m = m->m_next; 1462 n->m_next = NULL; 1463 } 1464 SBLASTMBUFCHK(sb); 1465 } 1466 #endif 1467 1468 /* 1469 * Free all mbufs in a sockbuf. Check that all resources are reclaimed. 1470 */ 1471 static void 1472 sbflush_internal(struct sockbuf *sb) 1473 { 1474 1475 while (sb->sb_mbcnt || sb->sb_tlsdcc) { 1476 /* 1477 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty: 1478 * we would loop forever. Panic instead. 1479 */ 1480 if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len)) 1481 break; 1482 m_freem(sbcut_internal(sb, (int)sb->sb_ccc)); 1483 } 1484 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0, 1485 ("%s: ccc %u mb %p mbcnt %u", __func__, 1486 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt)); 1487 } 1488 1489 void 1490 sbflush_locked(struct sockbuf *sb) 1491 { 1492 1493 SOCKBUF_LOCK_ASSERT(sb); 1494 sbflush_internal(sb); 1495 } 1496 1497 void 1498 sbflush(struct sockbuf *sb) 1499 { 1500 1501 SOCKBUF_LOCK(sb); 1502 sbflush_locked(sb); 1503 SOCKBUF_UNLOCK(sb); 1504 } 1505 1506 /* 1507 * Cut data from (the front of) a sockbuf. 1508 */ 1509 static struct mbuf * 1510 sbcut_internal(struct sockbuf *sb, int len) 1511 { 1512 struct mbuf *m, *next, *mfree; 1513 bool is_tls; 1514 1515 KASSERT(len >= 0, ("%s: len is %d but it is supposed to be >= 0", 1516 __func__, len)); 1517 KASSERT(len <= sb->sb_ccc, ("%s: len: %d is > ccc: %u", 1518 __func__, len, sb->sb_ccc)); 1519 1520 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 1521 is_tls = false; 1522 mfree = NULL; 1523 1524 while (len > 0) { 1525 if (m == NULL) { 1526 #ifdef KERN_TLS 1527 if (next == NULL && !is_tls) { 1528 if (sb->sb_tlsdcc != 0) { 1529 MPASS(len >= sb->sb_tlsdcc); 1530 len -= sb->sb_tlsdcc; 1531 sb->sb_ccc -= sb->sb_tlsdcc; 1532 sb->sb_tlsdcc = 0; 1533 if (len == 0) 1534 break; 1535 } 1536 next = sb->sb_mtls; 1537 is_tls = true; 1538 } 1539 #endif 1540 KASSERT(next, ("%s: no next, len %d", __func__, len)); 1541 m = next; 1542 next = m->m_nextpkt; 1543 } 1544 if (m->m_len > len) { 1545 KASSERT(!(m->m_flags & M_NOTAVAIL), 1546 ("%s: m %p M_NOTAVAIL", __func__, m)); 1547 m->m_len -= len; 1548 m->m_data += len; 1549 sb->sb_ccc -= len; 1550 sb->sb_acc -= len; 1551 if (sb->sb_sndptroff != 0) 1552 sb->sb_sndptroff -= len; 1553 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1554 sb->sb_ctl -= len; 1555 break; 1556 } 1557 len -= m->m_len; 1558 #ifdef KERN_TLS 1559 if (is_tls) 1560 sbfree_ktls_rx(sb, m); 1561 else 1562 #endif 1563 sbfree(sb, m); 1564 /* 1565 * Do not put M_NOTREADY buffers to the free list, they 1566 * are referenced from outside. 1567 */ 1568 if (m->m_flags & M_NOTREADY && !is_tls) 1569 m = m->m_next; 1570 else { 1571 struct mbuf *n; 1572 1573 n = m->m_next; 1574 m->m_next = mfree; 1575 mfree = m; 1576 m = n; 1577 } 1578 } 1579 /* 1580 * Free any zero-length mbufs from the buffer. 1581 * For SOCK_DGRAM sockets such mbufs represent empty records. 1582 * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer, 1583 * when sosend_generic() needs to send only control data. 1584 */ 1585 while (m && m->m_len == 0) { 1586 struct mbuf *n; 1587 1588 sbfree(sb, m); 1589 n = m->m_next; 1590 m->m_next = mfree; 1591 mfree = m; 1592 m = n; 1593 } 1594 #ifdef KERN_TLS 1595 if (is_tls) { 1596 sb->sb_mb = NULL; 1597 sb->sb_mtls = m; 1598 if (m == NULL) 1599 sb->sb_mtlstail = NULL; 1600 } else 1601 #endif 1602 if (m) { 1603 sb->sb_mb = m; 1604 m->m_nextpkt = next; 1605 } else 1606 sb->sb_mb = next; 1607 /* 1608 * First part is an inline SB_EMPTY_FIXUP(). Second part makes sure 1609 * sb_lastrecord is up-to-date if we dropped part of the last record. 1610 */ 1611 m = sb->sb_mb; 1612 if (m == NULL) { 1613 sb->sb_mbtail = NULL; 1614 sb->sb_lastrecord = NULL; 1615 } else if (m->m_nextpkt == NULL) { 1616 sb->sb_lastrecord = m; 1617 } 1618 1619 return (mfree); 1620 } 1621 1622 /* 1623 * Drop data from (the front of) a sockbuf. 1624 */ 1625 void 1626 sbdrop_locked(struct sockbuf *sb, int len) 1627 { 1628 1629 SOCKBUF_LOCK_ASSERT(sb); 1630 m_freem(sbcut_internal(sb, len)); 1631 } 1632 1633 /* 1634 * Drop data from (the front of) a sockbuf, 1635 * and return it to caller. 1636 */ 1637 struct mbuf * 1638 sbcut_locked(struct sockbuf *sb, int len) 1639 { 1640 1641 SOCKBUF_LOCK_ASSERT(sb); 1642 return (sbcut_internal(sb, len)); 1643 } 1644 1645 void 1646 sbdrop(struct sockbuf *sb, int len) 1647 { 1648 struct mbuf *mfree; 1649 1650 SOCKBUF_LOCK(sb); 1651 mfree = sbcut_internal(sb, len); 1652 SOCKBUF_UNLOCK(sb); 1653 1654 m_freem(mfree); 1655 } 1656 1657 struct mbuf * 1658 sbsndptr_noadv(struct sockbuf *sb, uint32_t off, uint32_t *moff) 1659 { 1660 struct mbuf *m; 1661 1662 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1663 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) { 1664 *moff = off; 1665 if (sb->sb_sndptr == NULL) { 1666 sb->sb_sndptr = sb->sb_mb; 1667 sb->sb_sndptroff = 0; 1668 } 1669 return (sb->sb_mb); 1670 } else { 1671 m = sb->sb_sndptr; 1672 off -= sb->sb_sndptroff; 1673 } 1674 *moff = off; 1675 return (m); 1676 } 1677 1678 void 1679 sbsndptr_adv(struct sockbuf *sb, struct mbuf *mb, uint32_t len) 1680 { 1681 /* 1682 * A small copy was done, advance forward the sb_sbsndptr to cover 1683 * it. 1684 */ 1685 struct mbuf *m; 1686 1687 if (mb != sb->sb_sndptr) { 1688 /* Did not copyout at the same mbuf */ 1689 return; 1690 } 1691 m = mb; 1692 while (m && (len > 0)) { 1693 if (len >= m->m_len) { 1694 len -= m->m_len; 1695 if (m->m_next) { 1696 sb->sb_sndptroff += m->m_len; 1697 sb->sb_sndptr = m->m_next; 1698 } 1699 m = m->m_next; 1700 } else { 1701 len = 0; 1702 } 1703 } 1704 } 1705 1706 /* 1707 * Return the first mbuf and the mbuf data offset for the provided 1708 * send offset without changing the "sb_sndptroff" field. 1709 */ 1710 struct mbuf * 1711 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff) 1712 { 1713 struct mbuf *m; 1714 1715 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1716 1717 /* 1718 * If the "off" is below the stored offset, which happens on 1719 * retransmits, just use "sb_mb": 1720 */ 1721 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) { 1722 m = sb->sb_mb; 1723 } else { 1724 m = sb->sb_sndptr; 1725 off -= sb->sb_sndptroff; 1726 } 1727 while (off > 0 && m != NULL) { 1728 if (off < m->m_len) 1729 break; 1730 off -= m->m_len; 1731 m = m->m_next; 1732 } 1733 *moff = off; 1734 return (m); 1735 } 1736 1737 /* 1738 * Drop a record off the front of a sockbuf and move the next record to the 1739 * front. 1740 */ 1741 void 1742 sbdroprecord_locked(struct sockbuf *sb) 1743 { 1744 struct mbuf *m; 1745 1746 SOCKBUF_LOCK_ASSERT(sb); 1747 1748 m = sb->sb_mb; 1749 if (m) { 1750 sb->sb_mb = m->m_nextpkt; 1751 do { 1752 sbfree(sb, m); 1753 m = m_free(m); 1754 } while (m); 1755 } 1756 SB_EMPTY_FIXUP(sb); 1757 } 1758 1759 /* 1760 * Drop a record off the front of a sockbuf and move the next record to the 1761 * front. 1762 */ 1763 void 1764 sbdroprecord(struct sockbuf *sb) 1765 { 1766 1767 SOCKBUF_LOCK(sb); 1768 sbdroprecord_locked(sb); 1769 SOCKBUF_UNLOCK(sb); 1770 } 1771 1772 /* 1773 * Create a "control" mbuf containing the specified data with the specified 1774 * type for presentation on a socket buffer. 1775 */ 1776 struct mbuf * 1777 sbcreatecontrol(const void *p, u_int size, int type, int level, int wait) 1778 { 1779 struct cmsghdr *cp; 1780 struct mbuf *m; 1781 1782 MBUF_CHECKSLEEP(wait); 1783 1784 if (wait == M_NOWAIT) { 1785 if (CMSG_SPACE(size) > MCLBYTES) 1786 return (NULL); 1787 } else 1788 KASSERT(CMSG_SPACE(size) <= MCLBYTES, 1789 ("%s: passed CMSG_SPACE(%u) > MCLBYTES", __func__, size)); 1790 1791 if (CMSG_SPACE(size) > MLEN) 1792 m = m_getcl(wait, MT_CONTROL, 0); 1793 else 1794 m = m_get(wait, MT_CONTROL); 1795 if (m == NULL) 1796 return (NULL); 1797 1798 KASSERT(CMSG_SPACE(size) <= M_TRAILINGSPACE(m), 1799 ("sbcreatecontrol: short mbuf")); 1800 /* 1801 * Don't leave the padding between the msg header and the 1802 * cmsg data and the padding after the cmsg data un-initialized. 1803 */ 1804 cp = mtod(m, struct cmsghdr *); 1805 bzero(cp, CMSG_SPACE(size)); 1806 if (p != NULL) 1807 (void)memcpy(CMSG_DATA(cp), p, size); 1808 m->m_len = CMSG_SPACE(size); 1809 cp->cmsg_len = CMSG_LEN(size); 1810 cp->cmsg_level = level; 1811 cp->cmsg_type = type; 1812 return (m); 1813 } 1814 1815 /* 1816 * This does the same for socket buffers that sotoxsocket does for sockets: 1817 * generate an user-format data structure describing the socket buffer. Note 1818 * that the xsockbuf structure, since it is always embedded in a socket, does 1819 * not include a self pointer nor a length. We make this entry point public 1820 * in case some other mechanism needs it. 1821 */ 1822 void 1823 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) 1824 { 1825 1826 xsb->sb_cc = sb->sb_ccc; 1827 xsb->sb_hiwat = sb->sb_hiwat; 1828 xsb->sb_mbcnt = sb->sb_mbcnt; 1829 xsb->sb_mbmax = sb->sb_mbmax; 1830 xsb->sb_lowat = sb->sb_lowat; 1831 xsb->sb_flags = sb->sb_flags; 1832 xsb->sb_timeo = sb->sb_timeo; 1833 } 1834 1835 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ 1836 static int dummy; 1837 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW | CTLFLAG_SKIP, &dummy, 0, ""); 1838 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, 1839 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, &sb_max, 0, 1840 sysctl_handle_sb_max, "LU", 1841 "Maximum socket buffer size"); 1842 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 1843 &sb_efficiency, 0, "Socket buffer size waste factor"); 1844