1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_param.h" 36 37 #include <sys/param.h> 38 #include <sys/aio.h> /* for aio_swake proto */ 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mbuf.h> 42 #include <sys/mutex.h> 43 #include <sys/proc.h> 44 #include <sys/protosw.h> 45 #include <sys/resourcevar.h> 46 #include <sys/signalvar.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sx.h> 50 #include <sys/sysctl.h> 51 52 /* 53 * Function pointer set by the AIO routines so that the socket buffer code 54 * can call back into the AIO module if it is loaded. 55 */ 56 void (*aio_swake)(struct socket *, struct sockbuf *); 57 58 /* 59 * Primitive routines for operating on socket buffers 60 */ 61 62 u_long sb_max = SB_MAX; 63 u_long sb_max_adj = 64 (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 65 66 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 67 68 static struct mbuf *sbcut_internal(struct sockbuf *sb, int len); 69 static void sbflush_internal(struct sockbuf *sb); 70 71 /* 72 * Mark ready "count" mbufs starting with "m". 73 */ 74 int 75 sbready(struct sockbuf *sb, struct mbuf *m, int count) 76 { 77 u_int blocker; 78 79 SOCKBUF_LOCK_ASSERT(sb); 80 KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb)); 81 82 blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0; 83 84 for (int i = 0; i < count; i++, m = m->m_next) { 85 KASSERT(m->m_flags & M_NOTREADY, 86 ("%s: m %p !M_NOTREADY", __func__, m)); 87 m->m_flags &= ~(M_NOTREADY | blocker); 88 if (blocker) 89 sb->sb_acc += m->m_len; 90 } 91 92 if (!blocker) 93 return (EINPROGRESS); 94 95 /* This one was blocking all the queue. */ 96 for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) { 97 KASSERT(m->m_flags & M_BLOCKED, 98 ("%s: m %p !M_BLOCKED", __func__, m)); 99 m->m_flags &= ~M_BLOCKED; 100 sb->sb_acc += m->m_len; 101 } 102 103 sb->sb_fnrdy = m; 104 105 return (0); 106 } 107 108 /* 109 * Adjust sockbuf state reflecting allocation of m. 110 */ 111 void 112 sballoc(struct sockbuf *sb, struct mbuf *m) 113 { 114 115 SOCKBUF_LOCK_ASSERT(sb); 116 117 sb->sb_ccc += m->m_len; 118 119 if (sb->sb_fnrdy == NULL) { 120 if (m->m_flags & M_NOTREADY) 121 sb->sb_fnrdy = m; 122 else 123 sb->sb_acc += m->m_len; 124 } else 125 m->m_flags |= M_BLOCKED; 126 127 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 128 sb->sb_ctl += m->m_len; 129 130 sb->sb_mbcnt += MSIZE; 131 sb->sb_mcnt += 1; 132 133 if (m->m_flags & M_EXT) { 134 sb->sb_mbcnt += m->m_ext.ext_size; 135 sb->sb_ccnt += 1; 136 } 137 } 138 139 /* 140 * Adjust sockbuf state reflecting freeing of m. 141 */ 142 void 143 sbfree(struct sockbuf *sb, struct mbuf *m) 144 { 145 146 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ 147 SOCKBUF_LOCK_ASSERT(sb); 148 #endif 149 150 sb->sb_ccc -= m->m_len; 151 152 if (!(m->m_flags & M_NOTAVAIL)) 153 sb->sb_acc -= m->m_len; 154 155 if (m == sb->sb_fnrdy) { 156 struct mbuf *n; 157 158 KASSERT(m->m_flags & M_NOTREADY, 159 ("%s: m %p !M_NOTREADY", __func__, m)); 160 161 n = m->m_next; 162 while (n != NULL && !(n->m_flags & M_NOTREADY)) { 163 n->m_flags &= ~M_BLOCKED; 164 sb->sb_acc += n->m_len; 165 n = n->m_next; 166 } 167 sb->sb_fnrdy = n; 168 } 169 170 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 171 sb->sb_ctl -= m->m_len; 172 173 sb->sb_mbcnt -= MSIZE; 174 sb->sb_mcnt -= 1; 175 if (m->m_flags & M_EXT) { 176 sb->sb_mbcnt -= m->m_ext.ext_size; 177 sb->sb_ccnt -= 1; 178 } 179 180 if (sb->sb_sndptr == m) { 181 sb->sb_sndptr = NULL; 182 sb->sb_sndptroff = 0; 183 } 184 if (sb->sb_sndptroff != 0) 185 sb->sb_sndptroff -= m->m_len; 186 } 187 188 /* 189 * Socantsendmore indicates that no more data will be sent on the socket; it 190 * would normally be applied to a socket when the user informs the system 191 * that no more data is to be sent, by the protocol code (in case 192 * PRU_SHUTDOWN). Socantrcvmore indicates that no more data will be 193 * received, and will normally be applied to the socket by a protocol when it 194 * detects that the peer will send no more data. Data queued for reading in 195 * the socket may yet be read. 196 */ 197 void 198 socantsendmore_locked(struct socket *so) 199 { 200 201 SOCKBUF_LOCK_ASSERT(&so->so_snd); 202 203 so->so_snd.sb_state |= SBS_CANTSENDMORE; 204 sowwakeup_locked(so); 205 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); 206 } 207 208 void 209 socantsendmore(struct socket *so) 210 { 211 212 SOCKBUF_LOCK(&so->so_snd); 213 socantsendmore_locked(so); 214 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); 215 } 216 217 void 218 socantrcvmore_locked(struct socket *so) 219 { 220 221 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 222 223 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 224 sorwakeup_locked(so); 225 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); 226 } 227 228 void 229 socantrcvmore(struct socket *so) 230 { 231 232 SOCKBUF_LOCK(&so->so_rcv); 233 socantrcvmore_locked(so); 234 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); 235 } 236 237 /* 238 * Wait for data to arrive at/drain from a socket buffer. 239 */ 240 int 241 sbwait(struct sockbuf *sb) 242 { 243 244 SOCKBUF_LOCK_ASSERT(sb); 245 246 sb->sb_flags |= SB_WAIT; 247 return (msleep_sbt(&sb->sb_acc, &sb->sb_mtx, 248 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", 249 sb->sb_timeo, 0, 0)); 250 } 251 252 int 253 sblock(struct sockbuf *sb, int flags) 254 { 255 256 KASSERT((flags & SBL_VALID) == flags, 257 ("sblock: flags invalid (0x%x)", flags)); 258 259 if (flags & SBL_WAIT) { 260 if ((sb->sb_flags & SB_NOINTR) || 261 (flags & SBL_NOINTR)) { 262 sx_xlock(&sb->sb_sx); 263 return (0); 264 } 265 return (sx_xlock_sig(&sb->sb_sx)); 266 } else { 267 if (sx_try_xlock(&sb->sb_sx) == 0) 268 return (EWOULDBLOCK); 269 return (0); 270 } 271 } 272 273 void 274 sbunlock(struct sockbuf *sb) 275 { 276 277 sx_xunlock(&sb->sb_sx); 278 } 279 280 /* 281 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 282 * via SIGIO if the socket has the SS_ASYNC flag set. 283 * 284 * Called with the socket buffer lock held; will release the lock by the end 285 * of the function. This allows the caller to acquire the socket buffer lock 286 * while testing for the need for various sorts of wakeup and hold it through 287 * to the point where it's no longer required. We currently hold the lock 288 * through calls out to other subsystems (with the exception of kqueue), and 289 * then release it to avoid lock order issues. It's not clear that's 290 * correct. 291 */ 292 void 293 sowakeup(struct socket *so, struct sockbuf *sb) 294 { 295 int ret; 296 297 SOCKBUF_LOCK_ASSERT(sb); 298 299 selwakeuppri(&sb->sb_sel, PSOCK); 300 if (!SEL_WAITING(&sb->sb_sel)) 301 sb->sb_flags &= ~SB_SEL; 302 if (sb->sb_flags & SB_WAIT) { 303 sb->sb_flags &= ~SB_WAIT; 304 wakeup(&sb->sb_acc); 305 } 306 KNOTE_LOCKED(&sb->sb_sel.si_note, 0); 307 if (sb->sb_upcall != NULL) { 308 ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT); 309 if (ret == SU_ISCONNECTED) { 310 KASSERT(sb == &so->so_rcv, 311 ("SO_SND upcall returned SU_ISCONNECTED")); 312 soupcall_clear(so, SO_RCV); 313 } 314 } else 315 ret = SU_OK; 316 if (sb->sb_flags & SB_AIO) 317 aio_swake(so, sb); 318 SOCKBUF_UNLOCK(sb); 319 if (ret == SU_ISCONNECTED) 320 soisconnected(so); 321 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 322 pgsigio(&so->so_sigio, SIGIO, 0); 323 mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED); 324 } 325 326 /* 327 * Socket buffer (struct sockbuf) utility routines. 328 * 329 * Each socket contains two socket buffers: one for sending data and one for 330 * receiving data. Each buffer contains a queue of mbufs, information about 331 * the number of mbufs and amount of data in the queue, and other fields 332 * allowing select() statements and notification on data availability to be 333 * implemented. 334 * 335 * Data stored in a socket buffer is maintained as a list of records. Each 336 * record is a list of mbufs chained together with the m_next field. Records 337 * are chained together with the m_nextpkt field. The upper level routine 338 * soreceive() expects the following conventions to be observed when placing 339 * information in the receive buffer: 340 * 341 * 1. If the protocol requires each message be preceded by the sender's name, 342 * then a record containing that name must be present before any 343 * associated data (mbuf's must be of type MT_SONAME). 344 * 2. If the protocol supports the exchange of ``access rights'' (really just 345 * additional data associated with the message), and there are ``rights'' 346 * to be received, then a record containing this data should be present 347 * (mbuf's must be of type MT_RIGHTS). 348 * 3. If a name or rights record exists, then it must be followed by a data 349 * record, perhaps of zero length. 350 * 351 * Before using a new socket structure it is first necessary to reserve 352 * buffer space to the socket, by calling sbreserve(). This should commit 353 * some of the available buffer space in the system buffer pool for the 354 * socket (currently, it does nothing but enforce limits). The space should 355 * be released by calling sbrelease() when the socket is destroyed. 356 */ 357 int 358 soreserve(struct socket *so, u_long sndcc, u_long rcvcc) 359 { 360 struct thread *td = curthread; 361 362 SOCKBUF_LOCK(&so->so_snd); 363 SOCKBUF_LOCK(&so->so_rcv); 364 if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0) 365 goto bad; 366 if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0) 367 goto bad2; 368 if (so->so_rcv.sb_lowat == 0) 369 so->so_rcv.sb_lowat = 1; 370 if (so->so_snd.sb_lowat == 0) 371 so->so_snd.sb_lowat = MCLBYTES; 372 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 373 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 374 SOCKBUF_UNLOCK(&so->so_rcv); 375 SOCKBUF_UNLOCK(&so->so_snd); 376 return (0); 377 bad2: 378 sbrelease_locked(&so->so_snd, so); 379 bad: 380 SOCKBUF_UNLOCK(&so->so_rcv); 381 SOCKBUF_UNLOCK(&so->so_snd); 382 return (ENOBUFS); 383 } 384 385 static int 386 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 387 { 388 int error = 0; 389 u_long tmp_sb_max = sb_max; 390 391 error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req); 392 if (error || !req->newptr) 393 return (error); 394 if (tmp_sb_max < MSIZE + MCLBYTES) 395 return (EINVAL); 396 sb_max = tmp_sb_max; 397 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 398 return (0); 399 } 400 401 /* 402 * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't 403 * become limiting if buffering efficiency is near the normal case. 404 */ 405 int 406 sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, 407 struct thread *td) 408 { 409 rlim_t sbsize_limit; 410 411 SOCKBUF_LOCK_ASSERT(sb); 412 413 /* 414 * When a thread is passed, we take into account the thread's socket 415 * buffer size limit. The caller will generally pass curthread, but 416 * in the TCP input path, NULL will be passed to indicate that no 417 * appropriate thread resource limits are available. In that case, 418 * we don't apply a process limit. 419 */ 420 if (cc > sb_max_adj) 421 return (0); 422 if (td != NULL) { 423 PROC_LOCK(td->td_proc); 424 sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE); 425 PROC_UNLOCK(td->td_proc); 426 } else 427 sbsize_limit = RLIM_INFINITY; 428 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, 429 sbsize_limit)) 430 return (0); 431 sb->sb_mbmax = min(cc * sb_efficiency, sb_max); 432 if (sb->sb_lowat > sb->sb_hiwat) 433 sb->sb_lowat = sb->sb_hiwat; 434 return (1); 435 } 436 437 int 438 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so, 439 struct thread *td) 440 { 441 int error; 442 443 SOCKBUF_LOCK(sb); 444 error = sbreserve_locked(sb, cc, so, td); 445 SOCKBUF_UNLOCK(sb); 446 return (error); 447 } 448 449 /* 450 * Free mbufs held by a socket, and reserved mbuf space. 451 */ 452 void 453 sbrelease_internal(struct sockbuf *sb, struct socket *so) 454 { 455 456 sbflush_internal(sb); 457 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 458 RLIM_INFINITY); 459 sb->sb_mbmax = 0; 460 } 461 462 void 463 sbrelease_locked(struct sockbuf *sb, struct socket *so) 464 { 465 466 SOCKBUF_LOCK_ASSERT(sb); 467 468 sbrelease_internal(sb, so); 469 } 470 471 void 472 sbrelease(struct sockbuf *sb, struct socket *so) 473 { 474 475 SOCKBUF_LOCK(sb); 476 sbrelease_locked(sb, so); 477 SOCKBUF_UNLOCK(sb); 478 } 479 480 void 481 sbdestroy(struct sockbuf *sb, struct socket *so) 482 { 483 484 sbrelease_internal(sb, so); 485 } 486 487 /* 488 * Routines to add and remove data from an mbuf queue. 489 * 490 * The routines sbappend() or sbappendrecord() are normally called to append 491 * new mbufs to a socket buffer, after checking that adequate space is 492 * available, comparing the function sbspace() with the amount of data to be 493 * added. sbappendrecord() differs from sbappend() in that data supplied is 494 * treated as the beginning of a new record. To place a sender's address, 495 * optional access rights, and data in a socket receive buffer, 496 * sbappendaddr() should be used. To place access rights and data in a 497 * socket receive buffer, sbappendrights() should be used. In either case, 498 * the new data begins a new record. Note that unlike sbappend() and 499 * sbappendrecord(), these routines check for the caller that there will be 500 * enough space to store the data. Each fails if there is not enough space, 501 * or if it cannot find mbufs to store additional information in. 502 * 503 * Reliable protocols may use the socket send buffer to hold data awaiting 504 * acknowledgement. Data is normally copied from a socket send buffer in a 505 * protocol with m_copy for output to a peer, and then removing the data from 506 * the socket buffer with sbdrop() or sbdroprecord() when the data is 507 * acknowledged by the peer. 508 */ 509 #ifdef SOCKBUF_DEBUG 510 void 511 sblastrecordchk(struct sockbuf *sb, const char *file, int line) 512 { 513 struct mbuf *m = sb->sb_mb; 514 515 SOCKBUF_LOCK_ASSERT(sb); 516 517 while (m && m->m_nextpkt) 518 m = m->m_nextpkt; 519 520 if (m != sb->sb_lastrecord) { 521 printf("%s: sb_mb %p sb_lastrecord %p last %p\n", 522 __func__, sb->sb_mb, sb->sb_lastrecord, m); 523 printf("packet chain:\n"); 524 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) 525 printf("\t%p\n", m); 526 panic("%s from %s:%u", __func__, file, line); 527 } 528 } 529 530 void 531 sblastmbufchk(struct sockbuf *sb, const char *file, int line) 532 { 533 struct mbuf *m = sb->sb_mb; 534 struct mbuf *n; 535 536 SOCKBUF_LOCK_ASSERT(sb); 537 538 while (m && m->m_nextpkt) 539 m = m->m_nextpkt; 540 541 while (m && m->m_next) 542 m = m->m_next; 543 544 if (m != sb->sb_mbtail) { 545 printf("%s: sb_mb %p sb_mbtail %p last %p\n", 546 __func__, sb->sb_mb, sb->sb_mbtail, m); 547 printf("packet tree:\n"); 548 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) { 549 printf("\t"); 550 for (n = m; n != NULL; n = n->m_next) 551 printf("%p ", n); 552 printf("\n"); 553 } 554 panic("%s from %s:%u", __func__, file, line); 555 } 556 } 557 #endif /* SOCKBUF_DEBUG */ 558 559 #define SBLINKRECORD(sb, m0) do { \ 560 SOCKBUF_LOCK_ASSERT(sb); \ 561 if ((sb)->sb_lastrecord != NULL) \ 562 (sb)->sb_lastrecord->m_nextpkt = (m0); \ 563 else \ 564 (sb)->sb_mb = (m0); \ 565 (sb)->sb_lastrecord = (m0); \ 566 } while (/*CONSTCOND*/0) 567 568 /* 569 * Append mbuf chain m to the last record in the socket buffer sb. The 570 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 571 * are discarded and mbufs are compacted where possible. 572 */ 573 void 574 sbappend_locked(struct sockbuf *sb, struct mbuf *m) 575 { 576 struct mbuf *n; 577 578 SOCKBUF_LOCK_ASSERT(sb); 579 580 if (m == 0) 581 return; 582 583 SBLASTRECORDCHK(sb); 584 n = sb->sb_mb; 585 if (n) { 586 while (n->m_nextpkt) 587 n = n->m_nextpkt; 588 do { 589 if (n->m_flags & M_EOR) { 590 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 591 return; 592 } 593 } while (n->m_next && (n = n->m_next)); 594 } else { 595 /* 596 * XXX Would like to simply use sb_mbtail here, but 597 * XXX I need to verify that I won't miss an EOR that 598 * XXX way. 599 */ 600 if ((n = sb->sb_lastrecord) != NULL) { 601 do { 602 if (n->m_flags & M_EOR) { 603 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 604 return; 605 } 606 } while (n->m_next && (n = n->m_next)); 607 } else { 608 /* 609 * If this is the first record in the socket buffer, 610 * it's also the last record. 611 */ 612 sb->sb_lastrecord = m; 613 } 614 } 615 sbcompress(sb, m, n); 616 SBLASTRECORDCHK(sb); 617 } 618 619 /* 620 * Append mbuf chain m to the last record in the socket buffer sb. The 621 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 622 * are discarded and mbufs are compacted where possible. 623 */ 624 void 625 sbappend(struct sockbuf *sb, struct mbuf *m) 626 { 627 628 SOCKBUF_LOCK(sb); 629 sbappend_locked(sb, m); 630 SOCKBUF_UNLOCK(sb); 631 } 632 633 /* 634 * This version of sbappend() should only be used when the caller absolutely 635 * knows that there will never be more than one record in the socket buffer, 636 * that is, a stream protocol (such as TCP). 637 */ 638 void 639 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags) 640 { 641 SOCKBUF_LOCK_ASSERT(sb); 642 643 KASSERT(m->m_nextpkt == NULL,("sbappendstream 0")); 644 KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1")); 645 646 SBLASTMBUFCHK(sb); 647 648 /* Remove all packet headers and mbuf tags to get a pure data chain. */ 649 m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0); 650 651 sbcompress(sb, m, sb->sb_mbtail); 652 653 sb->sb_lastrecord = sb->sb_mb; 654 SBLASTRECORDCHK(sb); 655 } 656 657 /* 658 * This version of sbappend() should only be used when the caller absolutely 659 * knows that there will never be more than one record in the socket buffer, 660 * that is, a stream protocol (such as TCP). 661 */ 662 void 663 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags) 664 { 665 666 SOCKBUF_LOCK(sb); 667 sbappendstream_locked(sb, m, flags); 668 SOCKBUF_UNLOCK(sb); 669 } 670 671 #ifdef SOCKBUF_DEBUG 672 void 673 sbcheck(struct sockbuf *sb, const char *file, int line) 674 { 675 struct mbuf *m, *n, *fnrdy; 676 u_long acc, ccc, mbcnt; 677 678 SOCKBUF_LOCK_ASSERT(sb); 679 680 acc = ccc = mbcnt = 0; 681 fnrdy = NULL; 682 683 for (m = sb->sb_mb; m; m = n) { 684 n = m->m_nextpkt; 685 for (; m; m = m->m_next) { 686 if (m->m_len == 0) { 687 printf("sb %p empty mbuf %p\n", sb, m); 688 goto fail; 689 } 690 if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) { 691 if (m != sb->sb_fnrdy) { 692 printf("sb %p: fnrdy %p != m %p\n", 693 sb, sb->sb_fnrdy, m); 694 goto fail; 695 } 696 fnrdy = m; 697 } 698 if (fnrdy) { 699 if (!(m->m_flags & M_NOTAVAIL)) { 700 printf("sb %p: fnrdy %p, m %p is avail\n", 701 sb, sb->sb_fnrdy, m); 702 goto fail; 703 } 704 } else 705 acc += m->m_len; 706 ccc += m->m_len; 707 mbcnt += MSIZE; 708 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 709 mbcnt += m->m_ext.ext_size; 710 } 711 } 712 if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) { 713 printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n", 714 acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt); 715 goto fail; 716 } 717 return; 718 fail: 719 panic("%s from %s:%u", __func__, file, line); 720 } 721 #endif 722 723 /* 724 * As above, except the mbuf chain begins a new record. 725 */ 726 void 727 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0) 728 { 729 struct mbuf *m; 730 731 SOCKBUF_LOCK_ASSERT(sb); 732 733 if (m0 == 0) 734 return; 735 /* 736 * Put the first mbuf on the queue. Note this permits zero length 737 * records. 738 */ 739 sballoc(sb, m0); 740 SBLASTRECORDCHK(sb); 741 SBLINKRECORD(sb, m0); 742 sb->sb_mbtail = m0; 743 m = m0->m_next; 744 m0->m_next = 0; 745 if (m && (m0->m_flags & M_EOR)) { 746 m0->m_flags &= ~M_EOR; 747 m->m_flags |= M_EOR; 748 } 749 /* always call sbcompress() so it can do SBLASTMBUFCHK() */ 750 sbcompress(sb, m, m0); 751 } 752 753 /* 754 * As above, except the mbuf chain begins a new record. 755 */ 756 void 757 sbappendrecord(struct sockbuf *sb, struct mbuf *m0) 758 { 759 760 SOCKBUF_LOCK(sb); 761 sbappendrecord_locked(sb, m0); 762 SOCKBUF_UNLOCK(sb); 763 } 764 765 /* Helper routine that appends data, control, and address to a sockbuf. */ 766 static int 767 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa, 768 struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last) 769 { 770 struct mbuf *m, *n, *nlast; 771 #if MSIZE <= 256 772 if (asa->sa_len > MLEN) 773 return (0); 774 #endif 775 m = m_get(M_NOWAIT, MT_SONAME); 776 if (m == NULL) 777 return (0); 778 m->m_len = asa->sa_len; 779 bcopy(asa, mtod(m, caddr_t), asa->sa_len); 780 if (ctrl_last) 781 ctrl_last->m_next = m0; /* concatenate data to control */ 782 else 783 control = m0; 784 m->m_next = control; 785 for (n = m; n->m_next != NULL; n = n->m_next) 786 sballoc(sb, n); 787 sballoc(sb, n); 788 nlast = n; 789 SBLINKRECORD(sb, m); 790 791 sb->sb_mbtail = nlast; 792 SBLASTMBUFCHK(sb); 793 794 SBLASTRECORDCHK(sb); 795 return (1); 796 } 797 798 /* 799 * Append address and data, and optionally, control (ancillary) data to the 800 * receive queue of a socket. If present, m0 must include a packet header 801 * with total length. Returns 0 if no space in sockbuf or insufficient 802 * mbufs. 803 */ 804 int 805 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, 806 struct mbuf *m0, struct mbuf *control) 807 { 808 struct mbuf *ctrl_last; 809 int space = asa->sa_len; 810 811 SOCKBUF_LOCK_ASSERT(sb); 812 813 if (m0 && (m0->m_flags & M_PKTHDR) == 0) 814 panic("sbappendaddr_locked"); 815 if (m0) 816 space += m0->m_pkthdr.len; 817 space += m_length(control, &ctrl_last); 818 819 if (space > sbspace(sb)) 820 return (0); 821 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 822 } 823 824 /* 825 * Append address and data, and optionally, control (ancillary) data to the 826 * receive queue of a socket. If present, m0 must include a packet header 827 * with total length. Returns 0 if insufficient mbufs. Does not validate space 828 * on the receiving sockbuf. 829 */ 830 int 831 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa, 832 struct mbuf *m0, struct mbuf *control) 833 { 834 struct mbuf *ctrl_last; 835 836 SOCKBUF_LOCK_ASSERT(sb); 837 838 ctrl_last = (control == NULL) ? NULL : m_last(control); 839 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 840 } 841 842 /* 843 * Append address and data, and optionally, control (ancillary) data to the 844 * receive queue of a socket. If present, m0 must include a packet header 845 * with total length. Returns 0 if no space in sockbuf or insufficient 846 * mbufs. 847 */ 848 int 849 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, 850 struct mbuf *m0, struct mbuf *control) 851 { 852 int retval; 853 854 SOCKBUF_LOCK(sb); 855 retval = sbappendaddr_locked(sb, asa, m0, control); 856 SOCKBUF_UNLOCK(sb); 857 return (retval); 858 } 859 860 int 861 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, 862 struct mbuf *control) 863 { 864 struct mbuf *m, *n, *mlast; 865 int space; 866 867 SOCKBUF_LOCK_ASSERT(sb); 868 869 if (control == 0) 870 panic("sbappendcontrol_locked"); 871 space = m_length(control, &n) + m_length(m0, NULL); 872 873 if (space > sbspace(sb)) 874 return (0); 875 n->m_next = m0; /* concatenate data to control */ 876 877 SBLASTRECORDCHK(sb); 878 879 for (m = control; m->m_next; m = m->m_next) 880 sballoc(sb, m); 881 sballoc(sb, m); 882 mlast = m; 883 SBLINKRECORD(sb, control); 884 885 sb->sb_mbtail = mlast; 886 SBLASTMBUFCHK(sb); 887 888 SBLASTRECORDCHK(sb); 889 return (1); 890 } 891 892 int 893 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control) 894 { 895 int retval; 896 897 SOCKBUF_LOCK(sb); 898 retval = sbappendcontrol_locked(sb, m0, control); 899 SOCKBUF_UNLOCK(sb); 900 return (retval); 901 } 902 903 /* 904 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf 905 * (n). If (n) is NULL, the buffer is presumed empty. 906 * 907 * When the data is compressed, mbufs in the chain may be handled in one of 908 * three ways: 909 * 910 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no 911 * record boundary, and no change in data type). 912 * 913 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into 914 * an mbuf already in the socket buffer. This can occur if an 915 * appropriate mbuf exists, there is room, both mbufs are not marked as 916 * not ready, and no merging of data types will occur. 917 * 918 * (3) The mbuf may be appended to the end of the existing mbuf chain. 919 * 920 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as 921 * end-of-record. 922 */ 923 void 924 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 925 { 926 int eor = 0; 927 struct mbuf *o; 928 929 SOCKBUF_LOCK_ASSERT(sb); 930 931 while (m) { 932 eor |= m->m_flags & M_EOR; 933 if (m->m_len == 0 && 934 (eor == 0 || 935 (((o = m->m_next) || (o = n)) && 936 o->m_type == m->m_type))) { 937 if (sb->sb_lastrecord == m) 938 sb->sb_lastrecord = m->m_next; 939 m = m_free(m); 940 continue; 941 } 942 if (n && (n->m_flags & M_EOR) == 0 && 943 M_WRITABLE(n) && 944 ((sb->sb_flags & SB_NOCOALESCE) == 0) && 945 !(m->m_flags & M_NOTREADY) && 946 !(n->m_flags & M_NOTREADY) && 947 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 948 m->m_len <= M_TRAILINGSPACE(n) && 949 n->m_type == m->m_type) { 950 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, 951 (unsigned)m->m_len); 952 n->m_len += m->m_len; 953 sb->sb_ccc += m->m_len; 954 if (sb->sb_fnrdy == NULL) 955 sb->sb_acc += m->m_len; 956 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 957 /* XXX: Probably don't need.*/ 958 sb->sb_ctl += m->m_len; 959 m = m_free(m); 960 continue; 961 } 962 if (n) 963 n->m_next = m; 964 else 965 sb->sb_mb = m; 966 sb->sb_mbtail = m; 967 sballoc(sb, m); 968 n = m; 969 m->m_flags &= ~M_EOR; 970 m = m->m_next; 971 n->m_next = 0; 972 } 973 if (eor) { 974 KASSERT(n != NULL, ("sbcompress: eor && n == NULL")); 975 n->m_flags |= eor; 976 } 977 SBLASTMBUFCHK(sb); 978 } 979 980 /* 981 * Free all mbufs in a sockbuf. Check that all resources are reclaimed. 982 */ 983 static void 984 sbflush_internal(struct sockbuf *sb) 985 { 986 987 while (sb->sb_mbcnt) { 988 /* 989 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty: 990 * we would loop forever. Panic instead. 991 */ 992 if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len)) 993 break; 994 m_freem(sbcut_internal(sb, (int)sb->sb_ccc)); 995 } 996 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0, 997 ("%s: ccc %u mb %p mbcnt %u", __func__, 998 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt)); 999 } 1000 1001 void 1002 sbflush_locked(struct sockbuf *sb) 1003 { 1004 1005 SOCKBUF_LOCK_ASSERT(sb); 1006 sbflush_internal(sb); 1007 } 1008 1009 void 1010 sbflush(struct sockbuf *sb) 1011 { 1012 1013 SOCKBUF_LOCK(sb); 1014 sbflush_locked(sb); 1015 SOCKBUF_UNLOCK(sb); 1016 } 1017 1018 /* 1019 * Cut data from (the front of) a sockbuf. 1020 */ 1021 static struct mbuf * 1022 sbcut_internal(struct sockbuf *sb, int len) 1023 { 1024 struct mbuf *m, *next, *mfree; 1025 1026 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 1027 mfree = NULL; 1028 1029 while (len > 0) { 1030 if (m == NULL) { 1031 KASSERT(next, ("%s: no next, len %d", __func__, len)); 1032 m = next; 1033 next = m->m_nextpkt; 1034 } 1035 if (m->m_len > len) { 1036 KASSERT(!(m->m_flags & M_NOTAVAIL), 1037 ("%s: m %p M_NOTAVAIL", __func__, m)); 1038 m->m_len -= len; 1039 m->m_data += len; 1040 sb->sb_ccc -= len; 1041 sb->sb_acc -= len; 1042 if (sb->sb_sndptroff != 0) 1043 sb->sb_sndptroff -= len; 1044 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1045 sb->sb_ctl -= len; 1046 break; 1047 } 1048 len -= m->m_len; 1049 sbfree(sb, m); 1050 /* 1051 * Do not put M_NOTREADY buffers to the free list, they 1052 * are referenced from outside. 1053 */ 1054 if (m->m_flags & M_NOTREADY) 1055 m = m->m_next; 1056 else { 1057 struct mbuf *n; 1058 1059 n = m->m_next; 1060 m->m_next = mfree; 1061 mfree = m; 1062 m = n; 1063 } 1064 } 1065 if (m) { 1066 sb->sb_mb = m; 1067 m->m_nextpkt = next; 1068 } else 1069 sb->sb_mb = next; 1070 /* 1071 * First part is an inline SB_EMPTY_FIXUP(). Second part makes sure 1072 * sb_lastrecord is up-to-date if we dropped part of the last record. 1073 */ 1074 m = sb->sb_mb; 1075 if (m == NULL) { 1076 sb->sb_mbtail = NULL; 1077 sb->sb_lastrecord = NULL; 1078 } else if (m->m_nextpkt == NULL) { 1079 sb->sb_lastrecord = m; 1080 } 1081 1082 return (mfree); 1083 } 1084 1085 /* 1086 * Drop data from (the front of) a sockbuf. 1087 */ 1088 void 1089 sbdrop_locked(struct sockbuf *sb, int len) 1090 { 1091 1092 SOCKBUF_LOCK_ASSERT(sb); 1093 m_freem(sbcut_internal(sb, len)); 1094 } 1095 1096 /* 1097 * Drop data from (the front of) a sockbuf, 1098 * and return it to caller. 1099 */ 1100 struct mbuf * 1101 sbcut_locked(struct sockbuf *sb, int len) 1102 { 1103 1104 SOCKBUF_LOCK_ASSERT(sb); 1105 return (sbcut_internal(sb, len)); 1106 } 1107 1108 void 1109 sbdrop(struct sockbuf *sb, int len) 1110 { 1111 struct mbuf *mfree; 1112 1113 SOCKBUF_LOCK(sb); 1114 mfree = sbcut_internal(sb, len); 1115 SOCKBUF_UNLOCK(sb); 1116 1117 m_freem(mfree); 1118 } 1119 1120 /* 1121 * Maintain a pointer and offset pair into the socket buffer mbuf chain to 1122 * avoid traversal of the entire socket buffer for larger offsets. 1123 */ 1124 struct mbuf * 1125 sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff) 1126 { 1127 struct mbuf *m, *ret; 1128 1129 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1130 KASSERT(off + len <= sb->sb_acc, ("%s: beyond sb", __func__)); 1131 KASSERT(sb->sb_sndptroff <= sb->sb_acc, ("%s: sndptroff broken", __func__)); 1132 1133 /* 1134 * Is off below stored offset? Happens on retransmits. 1135 * Just return, we can't help here. 1136 */ 1137 if (sb->sb_sndptroff > off) { 1138 *moff = off; 1139 return (sb->sb_mb); 1140 } 1141 1142 /* Return closest mbuf in chain for current offset. */ 1143 *moff = off - sb->sb_sndptroff; 1144 m = ret = sb->sb_sndptr ? sb->sb_sndptr : sb->sb_mb; 1145 if (*moff == m->m_len) { 1146 *moff = 0; 1147 sb->sb_sndptroff += m->m_len; 1148 m = ret = m->m_next; 1149 KASSERT(ret->m_len > 0, 1150 ("mbuf %p in sockbuf %p chain has no valid data", ret, sb)); 1151 } 1152 1153 /* Advance by len to be as close as possible for the next transmit. */ 1154 for (off = off - sb->sb_sndptroff + len - 1; 1155 off > 0 && m != NULL && off >= m->m_len; 1156 m = m->m_next) { 1157 sb->sb_sndptroff += m->m_len; 1158 off -= m->m_len; 1159 } 1160 if (off > 0 && m == NULL) 1161 panic("%s: sockbuf %p and mbuf %p clashing", __func__, sb, ret); 1162 sb->sb_sndptr = m; 1163 1164 return (ret); 1165 } 1166 1167 /* 1168 * Return the first mbuf and the mbuf data offset for the provided 1169 * send offset without changing the "sb_sndptroff" field. 1170 */ 1171 struct mbuf * 1172 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff) 1173 { 1174 struct mbuf *m; 1175 1176 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1177 1178 /* 1179 * If the "off" is below the stored offset, which happens on 1180 * retransmits, just use "sb_mb": 1181 */ 1182 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) { 1183 m = sb->sb_mb; 1184 } else { 1185 m = sb->sb_sndptr; 1186 off -= sb->sb_sndptroff; 1187 } 1188 while (off > 0 && m != NULL) { 1189 if (off < m->m_len) 1190 break; 1191 off -= m->m_len; 1192 m = m->m_next; 1193 } 1194 *moff = off; 1195 return (m); 1196 } 1197 1198 /* 1199 * Drop a record off the front of a sockbuf and move the next record to the 1200 * front. 1201 */ 1202 void 1203 sbdroprecord_locked(struct sockbuf *sb) 1204 { 1205 struct mbuf *m; 1206 1207 SOCKBUF_LOCK_ASSERT(sb); 1208 1209 m = sb->sb_mb; 1210 if (m) { 1211 sb->sb_mb = m->m_nextpkt; 1212 do { 1213 sbfree(sb, m); 1214 m = m_free(m); 1215 } while (m); 1216 } 1217 SB_EMPTY_FIXUP(sb); 1218 } 1219 1220 /* 1221 * Drop a record off the front of a sockbuf and move the next record to the 1222 * front. 1223 */ 1224 void 1225 sbdroprecord(struct sockbuf *sb) 1226 { 1227 1228 SOCKBUF_LOCK(sb); 1229 sbdroprecord_locked(sb); 1230 SOCKBUF_UNLOCK(sb); 1231 } 1232 1233 /* 1234 * Create a "control" mbuf containing the specified data with the specified 1235 * type for presentation on a socket buffer. 1236 */ 1237 struct mbuf * 1238 sbcreatecontrol(caddr_t p, int size, int type, int level) 1239 { 1240 struct cmsghdr *cp; 1241 struct mbuf *m; 1242 1243 if (CMSG_SPACE((u_int)size) > MCLBYTES) 1244 return ((struct mbuf *) NULL); 1245 if (CMSG_SPACE((u_int)size) > MLEN) 1246 m = m_getcl(M_NOWAIT, MT_CONTROL, 0); 1247 else 1248 m = m_get(M_NOWAIT, MT_CONTROL); 1249 if (m == NULL) 1250 return ((struct mbuf *) NULL); 1251 cp = mtod(m, struct cmsghdr *); 1252 m->m_len = 0; 1253 KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m), 1254 ("sbcreatecontrol: short mbuf")); 1255 /* 1256 * Don't leave the padding between the msg header and the 1257 * cmsg data and the padding after the cmsg data un-initialized. 1258 */ 1259 bzero(cp, CMSG_SPACE((u_int)size)); 1260 if (p != NULL) 1261 (void)memcpy(CMSG_DATA(cp), p, size); 1262 m->m_len = CMSG_SPACE(size); 1263 cp->cmsg_len = CMSG_LEN(size); 1264 cp->cmsg_level = level; 1265 cp->cmsg_type = type; 1266 return (m); 1267 } 1268 1269 /* 1270 * This does the same for socket buffers that sotoxsocket does for sockets: 1271 * generate an user-format data structure describing the socket buffer. Note 1272 * that the xsockbuf structure, since it is always embedded in a socket, does 1273 * not include a self pointer nor a length. We make this entry point public 1274 * in case some other mechanism needs it. 1275 */ 1276 void 1277 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) 1278 { 1279 1280 xsb->sb_cc = sb->sb_ccc; 1281 xsb->sb_hiwat = sb->sb_hiwat; 1282 xsb->sb_mbcnt = sb->sb_mbcnt; 1283 xsb->sb_mcnt = sb->sb_mcnt; 1284 xsb->sb_ccnt = sb->sb_ccnt; 1285 xsb->sb_mbmax = sb->sb_mbmax; 1286 xsb->sb_lowat = sb->sb_lowat; 1287 xsb->sb_flags = sb->sb_flags; 1288 xsb->sb_timeo = sb->sb_timeo; 1289 } 1290 1291 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ 1292 static int dummy; 1293 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); 1294 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW, 1295 &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size"); 1296 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 1297 &sb_efficiency, 0, "Socket buffer size waste factor"); 1298