1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_param.h" 36 37 #include <sys/param.h> 38 #include <sys/aio.h> /* for aio_swake proto */ 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/protosw.h> 46 #include <sys/resourcevar.h> 47 #include <sys/signalvar.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/sx.h> 51 #include <sys/sysctl.h> 52 53 /* 54 * Function pointer set by the AIO routines so that the socket buffer code 55 * can call back into the AIO module if it is loaded. 56 */ 57 void (*aio_swake)(struct socket *, struct sockbuf *); 58 59 /* 60 * Primitive routines for operating on socket buffers 61 */ 62 63 u_long sb_max = SB_MAX; 64 u_long sb_max_adj = 65 (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */ 66 67 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 68 69 static struct mbuf *sbcut_internal(struct sockbuf *sb, int len); 70 static void sbflush_internal(struct sockbuf *sb); 71 72 /* 73 * Our own version of m_clrprotoflags(), that can preserve M_NOTREADY. 74 */ 75 static void 76 sbm_clrprotoflags(struct mbuf *m, int flags) 77 { 78 int mask; 79 80 mask = ~M_PROTOFLAGS; 81 if (flags & PRUS_NOTREADY) 82 mask |= M_NOTREADY; 83 while (m) { 84 m->m_flags &= mask; 85 m = m->m_next; 86 } 87 } 88 89 /* 90 * Mark ready "count" mbufs starting with "m". 91 */ 92 int 93 sbready(struct sockbuf *sb, struct mbuf *m, int count) 94 { 95 u_int blocker; 96 97 SOCKBUF_LOCK_ASSERT(sb); 98 KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb)); 99 100 blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0; 101 102 for (int i = 0; i < count; i++, m = m->m_next) { 103 KASSERT(m->m_flags & M_NOTREADY, 104 ("%s: m %p !M_NOTREADY", __func__, m)); 105 m->m_flags &= ~(M_NOTREADY | blocker); 106 if (blocker) 107 sb->sb_acc += m->m_len; 108 } 109 110 if (!blocker) 111 return (EINPROGRESS); 112 113 /* This one was blocking all the queue. */ 114 for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) { 115 KASSERT(m->m_flags & M_BLOCKED, 116 ("%s: m %p !M_BLOCKED", __func__, m)); 117 m->m_flags &= ~M_BLOCKED; 118 sb->sb_acc += m->m_len; 119 } 120 121 sb->sb_fnrdy = m; 122 123 return (0); 124 } 125 126 /* 127 * Adjust sockbuf state reflecting allocation of m. 128 */ 129 void 130 sballoc(struct sockbuf *sb, struct mbuf *m) 131 { 132 133 SOCKBUF_LOCK_ASSERT(sb); 134 135 sb->sb_ccc += m->m_len; 136 137 if (sb->sb_fnrdy == NULL) { 138 if (m->m_flags & M_NOTREADY) 139 sb->sb_fnrdy = m; 140 else 141 sb->sb_acc += m->m_len; 142 } else 143 m->m_flags |= M_BLOCKED; 144 145 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 146 sb->sb_ctl += m->m_len; 147 148 sb->sb_mbcnt += MSIZE; 149 sb->sb_mcnt += 1; 150 151 if (m->m_flags & M_EXT) { 152 sb->sb_mbcnt += m->m_ext.ext_size; 153 sb->sb_ccnt += 1; 154 } 155 } 156 157 /* 158 * Adjust sockbuf state reflecting freeing of m. 159 */ 160 void 161 sbfree(struct sockbuf *sb, struct mbuf *m) 162 { 163 164 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ 165 SOCKBUF_LOCK_ASSERT(sb); 166 #endif 167 168 sb->sb_ccc -= m->m_len; 169 170 if (!(m->m_flags & M_NOTAVAIL)) 171 sb->sb_acc -= m->m_len; 172 173 if (m == sb->sb_fnrdy) { 174 struct mbuf *n; 175 176 KASSERT(m->m_flags & M_NOTREADY, 177 ("%s: m %p !M_NOTREADY", __func__, m)); 178 179 n = m->m_next; 180 while (n != NULL && !(n->m_flags & M_NOTREADY)) { 181 n->m_flags &= ~M_BLOCKED; 182 sb->sb_acc += n->m_len; 183 n = n->m_next; 184 } 185 sb->sb_fnrdy = n; 186 } 187 188 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 189 sb->sb_ctl -= m->m_len; 190 191 sb->sb_mbcnt -= MSIZE; 192 sb->sb_mcnt -= 1; 193 if (m->m_flags & M_EXT) { 194 sb->sb_mbcnt -= m->m_ext.ext_size; 195 sb->sb_ccnt -= 1; 196 } 197 198 if (sb->sb_sndptr == m) { 199 sb->sb_sndptr = NULL; 200 sb->sb_sndptroff = 0; 201 } 202 if (sb->sb_sndptroff != 0) 203 sb->sb_sndptroff -= m->m_len; 204 } 205 206 /* 207 * Socantsendmore indicates that no more data will be sent on the socket; it 208 * would normally be applied to a socket when the user informs the system 209 * that no more data is to be sent, by the protocol code (in case 210 * PRU_SHUTDOWN). Socantrcvmore indicates that no more data will be 211 * received, and will normally be applied to the socket by a protocol when it 212 * detects that the peer will send no more data. Data queued for reading in 213 * the socket may yet be read. 214 */ 215 void 216 socantsendmore_locked(struct socket *so) 217 { 218 219 SOCKBUF_LOCK_ASSERT(&so->so_snd); 220 221 so->so_snd.sb_state |= SBS_CANTSENDMORE; 222 sowwakeup_locked(so); 223 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); 224 } 225 226 void 227 socantsendmore(struct socket *so) 228 { 229 230 SOCKBUF_LOCK(&so->so_snd); 231 socantsendmore_locked(so); 232 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); 233 } 234 235 void 236 socantrcvmore_locked(struct socket *so) 237 { 238 239 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 240 241 so->so_rcv.sb_state |= SBS_CANTRCVMORE; 242 sorwakeup_locked(so); 243 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); 244 } 245 246 void 247 socantrcvmore(struct socket *so) 248 { 249 250 SOCKBUF_LOCK(&so->so_rcv); 251 socantrcvmore_locked(so); 252 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); 253 } 254 255 /* 256 * Wait for data to arrive at/drain from a socket buffer. 257 */ 258 int 259 sbwait(struct sockbuf *sb) 260 { 261 262 SOCKBUF_LOCK_ASSERT(sb); 263 264 sb->sb_flags |= SB_WAIT; 265 return (msleep_sbt(&sb->sb_acc, &sb->sb_mtx, 266 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", 267 sb->sb_timeo, 0, 0)); 268 } 269 270 int 271 sblock(struct sockbuf *sb, int flags) 272 { 273 274 KASSERT((flags & SBL_VALID) == flags, 275 ("sblock: flags invalid (0x%x)", flags)); 276 277 if (flags & SBL_WAIT) { 278 if ((sb->sb_flags & SB_NOINTR) || 279 (flags & SBL_NOINTR)) { 280 sx_xlock(&sb->sb_sx); 281 return (0); 282 } 283 return (sx_xlock_sig(&sb->sb_sx)); 284 } else { 285 if (sx_try_xlock(&sb->sb_sx) == 0) 286 return (EWOULDBLOCK); 287 return (0); 288 } 289 } 290 291 void 292 sbunlock(struct sockbuf *sb) 293 { 294 295 sx_xunlock(&sb->sb_sx); 296 } 297 298 /* 299 * Wakeup processes waiting on a socket buffer. Do asynchronous notification 300 * via SIGIO if the socket has the SS_ASYNC flag set. 301 * 302 * Called with the socket buffer lock held; will release the lock by the end 303 * of the function. This allows the caller to acquire the socket buffer lock 304 * while testing for the need for various sorts of wakeup and hold it through 305 * to the point where it's no longer required. We currently hold the lock 306 * through calls out to other subsystems (with the exception of kqueue), and 307 * then release it to avoid lock order issues. It's not clear that's 308 * correct. 309 */ 310 void 311 sowakeup(struct socket *so, struct sockbuf *sb) 312 { 313 int ret; 314 315 SOCKBUF_LOCK_ASSERT(sb); 316 317 selwakeuppri(sb->sb_sel, PSOCK); 318 if (!SEL_WAITING(sb->sb_sel)) 319 sb->sb_flags &= ~SB_SEL; 320 if (sb->sb_flags & SB_WAIT) { 321 sb->sb_flags &= ~SB_WAIT; 322 wakeup(&sb->sb_acc); 323 } 324 KNOTE_LOCKED(&sb->sb_sel->si_note, 0); 325 if (sb->sb_upcall != NULL) { 326 ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT); 327 if (ret == SU_ISCONNECTED) { 328 KASSERT(sb == &so->so_rcv, 329 ("SO_SND upcall returned SU_ISCONNECTED")); 330 soupcall_clear(so, SO_RCV); 331 } 332 } else 333 ret = SU_OK; 334 if (sb->sb_flags & SB_AIO) 335 sowakeup_aio(so, sb); 336 SOCKBUF_UNLOCK(sb); 337 if (ret == SU_ISCONNECTED) 338 soisconnected(so); 339 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) 340 pgsigio(&so->so_sigio, SIGIO, 0); 341 mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED); 342 } 343 344 /* 345 * Socket buffer (struct sockbuf) utility routines. 346 * 347 * Each socket contains two socket buffers: one for sending data and one for 348 * receiving data. Each buffer contains a queue of mbufs, information about 349 * the number of mbufs and amount of data in the queue, and other fields 350 * allowing select() statements and notification on data availability to be 351 * implemented. 352 * 353 * Data stored in a socket buffer is maintained as a list of records. Each 354 * record is a list of mbufs chained together with the m_next field. Records 355 * are chained together with the m_nextpkt field. The upper level routine 356 * soreceive() expects the following conventions to be observed when placing 357 * information in the receive buffer: 358 * 359 * 1. If the protocol requires each message be preceded by the sender's name, 360 * then a record containing that name must be present before any 361 * associated data (mbuf's must be of type MT_SONAME). 362 * 2. If the protocol supports the exchange of ``access rights'' (really just 363 * additional data associated with the message), and there are ``rights'' 364 * to be received, then a record containing this data should be present 365 * (mbuf's must be of type MT_RIGHTS). 366 * 3. If a name or rights record exists, then it must be followed by a data 367 * record, perhaps of zero length. 368 * 369 * Before using a new socket structure it is first necessary to reserve 370 * buffer space to the socket, by calling sbreserve(). This should commit 371 * some of the available buffer space in the system buffer pool for the 372 * socket (currently, it does nothing but enforce limits). The space should 373 * be released by calling sbrelease() when the socket is destroyed. 374 */ 375 int 376 soreserve(struct socket *so, u_long sndcc, u_long rcvcc) 377 { 378 struct thread *td = curthread; 379 380 SOCKBUF_LOCK(&so->so_snd); 381 SOCKBUF_LOCK(&so->so_rcv); 382 if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0) 383 goto bad; 384 if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0) 385 goto bad2; 386 if (so->so_rcv.sb_lowat == 0) 387 so->so_rcv.sb_lowat = 1; 388 if (so->so_snd.sb_lowat == 0) 389 so->so_snd.sb_lowat = MCLBYTES; 390 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 391 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 392 SOCKBUF_UNLOCK(&so->so_rcv); 393 SOCKBUF_UNLOCK(&so->so_snd); 394 return (0); 395 bad2: 396 sbrelease_locked(&so->so_snd, so); 397 bad: 398 SOCKBUF_UNLOCK(&so->so_rcv); 399 SOCKBUF_UNLOCK(&so->so_snd); 400 return (ENOBUFS); 401 } 402 403 static int 404 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) 405 { 406 int error = 0; 407 u_long tmp_sb_max = sb_max; 408 409 error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req); 410 if (error || !req->newptr) 411 return (error); 412 if (tmp_sb_max < MSIZE + MCLBYTES) 413 return (EINVAL); 414 sb_max = tmp_sb_max; 415 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); 416 return (0); 417 } 418 419 /* 420 * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't 421 * become limiting if buffering efficiency is near the normal case. 422 */ 423 int 424 sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, 425 struct thread *td) 426 { 427 rlim_t sbsize_limit; 428 429 SOCKBUF_LOCK_ASSERT(sb); 430 431 /* 432 * When a thread is passed, we take into account the thread's socket 433 * buffer size limit. The caller will generally pass curthread, but 434 * in the TCP input path, NULL will be passed to indicate that no 435 * appropriate thread resource limits are available. In that case, 436 * we don't apply a process limit. 437 */ 438 if (cc > sb_max_adj) 439 return (0); 440 if (td != NULL) { 441 sbsize_limit = lim_cur(td, RLIMIT_SBSIZE); 442 } else 443 sbsize_limit = RLIM_INFINITY; 444 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, 445 sbsize_limit)) 446 return (0); 447 sb->sb_mbmax = min(cc * sb_efficiency, sb_max); 448 if (sb->sb_lowat > sb->sb_hiwat) 449 sb->sb_lowat = sb->sb_hiwat; 450 return (1); 451 } 452 453 int 454 sbsetopt(struct socket *so, int cmd, u_long cc) 455 { 456 struct sockbuf *sb; 457 short *flags; 458 u_int *hiwat, *lowat; 459 int error; 460 461 SOCK_LOCK(so); 462 if (SOLISTENING(so)) { 463 switch (cmd) { 464 case SO_SNDLOWAT: 465 case SO_SNDBUF: 466 lowat = &so->sol_sbsnd_lowat; 467 hiwat = &so->sol_sbsnd_hiwat; 468 flags = &so->sol_sbsnd_flags; 469 break; 470 case SO_RCVLOWAT: 471 case SO_RCVBUF: 472 lowat = &so->sol_sbrcv_lowat; 473 hiwat = &so->sol_sbrcv_hiwat; 474 flags = &so->sol_sbrcv_flags; 475 break; 476 } 477 } else { 478 switch (cmd) { 479 case SO_SNDLOWAT: 480 case SO_SNDBUF: 481 sb = &so->so_snd; 482 break; 483 case SO_RCVLOWAT: 484 case SO_RCVBUF: 485 sb = &so->so_rcv; 486 break; 487 } 488 flags = &sb->sb_flags; 489 hiwat = &sb->sb_hiwat; 490 lowat = &sb->sb_lowat; 491 SOCKBUF_LOCK(sb); 492 } 493 494 error = 0; 495 switch (cmd) { 496 case SO_SNDBUF: 497 case SO_RCVBUF: 498 if (SOLISTENING(so)) { 499 if (cc > sb_max_adj) { 500 error = ENOBUFS; 501 break; 502 } 503 *hiwat = cc; 504 if (*lowat > *hiwat) 505 *lowat = *hiwat; 506 } else { 507 if (!sbreserve_locked(sb, cc, so, curthread)) 508 error = ENOBUFS; 509 } 510 if (error == 0) 511 *flags &= ~SB_AUTOSIZE; 512 break; 513 case SO_SNDLOWAT: 514 case SO_RCVLOWAT: 515 /* 516 * Make sure the low-water is never greater than the 517 * high-water. 518 */ 519 *lowat = (cc > *hiwat) ? *hiwat : cc; 520 break; 521 } 522 523 if (!SOLISTENING(so)) 524 SOCKBUF_UNLOCK(sb); 525 SOCK_UNLOCK(so); 526 return (error); 527 } 528 529 /* 530 * Free mbufs held by a socket, and reserved mbuf space. 531 */ 532 void 533 sbrelease_internal(struct sockbuf *sb, struct socket *so) 534 { 535 536 sbflush_internal(sb); 537 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, 538 RLIM_INFINITY); 539 sb->sb_mbmax = 0; 540 } 541 542 void 543 sbrelease_locked(struct sockbuf *sb, struct socket *so) 544 { 545 546 SOCKBUF_LOCK_ASSERT(sb); 547 548 sbrelease_internal(sb, so); 549 } 550 551 void 552 sbrelease(struct sockbuf *sb, struct socket *so) 553 { 554 555 SOCKBUF_LOCK(sb); 556 sbrelease_locked(sb, so); 557 SOCKBUF_UNLOCK(sb); 558 } 559 560 void 561 sbdestroy(struct sockbuf *sb, struct socket *so) 562 { 563 564 sbrelease_internal(sb, so); 565 } 566 567 /* 568 * Routines to add and remove data from an mbuf queue. 569 * 570 * The routines sbappend() or sbappendrecord() are normally called to append 571 * new mbufs to a socket buffer, after checking that adequate space is 572 * available, comparing the function sbspace() with the amount of data to be 573 * added. sbappendrecord() differs from sbappend() in that data supplied is 574 * treated as the beginning of a new record. To place a sender's address, 575 * optional access rights, and data in a socket receive buffer, 576 * sbappendaddr() should be used. To place access rights and data in a 577 * socket receive buffer, sbappendrights() should be used. In either case, 578 * the new data begins a new record. Note that unlike sbappend() and 579 * sbappendrecord(), these routines check for the caller that there will be 580 * enough space to store the data. Each fails if there is not enough space, 581 * or if it cannot find mbufs to store additional information in. 582 * 583 * Reliable protocols may use the socket send buffer to hold data awaiting 584 * acknowledgement. Data is normally copied from a socket send buffer in a 585 * protocol with m_copy for output to a peer, and then removing the data from 586 * the socket buffer with sbdrop() or sbdroprecord() when the data is 587 * acknowledged by the peer. 588 */ 589 #ifdef SOCKBUF_DEBUG 590 void 591 sblastrecordchk(struct sockbuf *sb, const char *file, int line) 592 { 593 struct mbuf *m = sb->sb_mb; 594 595 SOCKBUF_LOCK_ASSERT(sb); 596 597 while (m && m->m_nextpkt) 598 m = m->m_nextpkt; 599 600 if (m != sb->sb_lastrecord) { 601 printf("%s: sb_mb %p sb_lastrecord %p last %p\n", 602 __func__, sb->sb_mb, sb->sb_lastrecord, m); 603 printf("packet chain:\n"); 604 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) 605 printf("\t%p\n", m); 606 panic("%s from %s:%u", __func__, file, line); 607 } 608 } 609 610 void 611 sblastmbufchk(struct sockbuf *sb, const char *file, int line) 612 { 613 struct mbuf *m = sb->sb_mb; 614 struct mbuf *n; 615 616 SOCKBUF_LOCK_ASSERT(sb); 617 618 while (m && m->m_nextpkt) 619 m = m->m_nextpkt; 620 621 while (m && m->m_next) 622 m = m->m_next; 623 624 if (m != sb->sb_mbtail) { 625 printf("%s: sb_mb %p sb_mbtail %p last %p\n", 626 __func__, sb->sb_mb, sb->sb_mbtail, m); 627 printf("packet tree:\n"); 628 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) { 629 printf("\t"); 630 for (n = m; n != NULL; n = n->m_next) 631 printf("%p ", n); 632 printf("\n"); 633 } 634 panic("%s from %s:%u", __func__, file, line); 635 } 636 } 637 #endif /* SOCKBUF_DEBUG */ 638 639 #define SBLINKRECORD(sb, m0) do { \ 640 SOCKBUF_LOCK_ASSERT(sb); \ 641 if ((sb)->sb_lastrecord != NULL) \ 642 (sb)->sb_lastrecord->m_nextpkt = (m0); \ 643 else \ 644 (sb)->sb_mb = (m0); \ 645 (sb)->sb_lastrecord = (m0); \ 646 } while (/*CONSTCOND*/0) 647 648 /* 649 * Append mbuf chain m to the last record in the socket buffer sb. The 650 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 651 * are discarded and mbufs are compacted where possible. 652 */ 653 void 654 sbappend_locked(struct sockbuf *sb, struct mbuf *m, int flags) 655 { 656 struct mbuf *n; 657 658 SOCKBUF_LOCK_ASSERT(sb); 659 660 if (m == NULL) 661 return; 662 sbm_clrprotoflags(m, flags); 663 SBLASTRECORDCHK(sb); 664 n = sb->sb_mb; 665 if (n) { 666 while (n->m_nextpkt) 667 n = n->m_nextpkt; 668 do { 669 if (n->m_flags & M_EOR) { 670 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 671 return; 672 } 673 } while (n->m_next && (n = n->m_next)); 674 } else { 675 /* 676 * XXX Would like to simply use sb_mbtail here, but 677 * XXX I need to verify that I won't miss an EOR that 678 * XXX way. 679 */ 680 if ((n = sb->sb_lastrecord) != NULL) { 681 do { 682 if (n->m_flags & M_EOR) { 683 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */ 684 return; 685 } 686 } while (n->m_next && (n = n->m_next)); 687 } else { 688 /* 689 * If this is the first record in the socket buffer, 690 * it's also the last record. 691 */ 692 sb->sb_lastrecord = m; 693 } 694 } 695 sbcompress(sb, m, n); 696 SBLASTRECORDCHK(sb); 697 } 698 699 /* 700 * Append mbuf chain m to the last record in the socket buffer sb. The 701 * additional space associated the mbuf chain is recorded in sb. Empty mbufs 702 * are discarded and mbufs are compacted where possible. 703 */ 704 void 705 sbappend(struct sockbuf *sb, struct mbuf *m, int flags) 706 { 707 708 SOCKBUF_LOCK(sb); 709 sbappend_locked(sb, m, flags); 710 SOCKBUF_UNLOCK(sb); 711 } 712 713 /* 714 * This version of sbappend() should only be used when the caller absolutely 715 * knows that there will never be more than one record in the socket buffer, 716 * that is, a stream protocol (such as TCP). 717 */ 718 void 719 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags) 720 { 721 SOCKBUF_LOCK_ASSERT(sb); 722 723 KASSERT(m->m_nextpkt == NULL,("sbappendstream 0")); 724 KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1")); 725 726 SBLASTMBUFCHK(sb); 727 728 /* Remove all packet headers and mbuf tags to get a pure data chain. */ 729 m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0); 730 731 sbcompress(sb, m, sb->sb_mbtail); 732 733 sb->sb_lastrecord = sb->sb_mb; 734 SBLASTRECORDCHK(sb); 735 } 736 737 /* 738 * This version of sbappend() should only be used when the caller absolutely 739 * knows that there will never be more than one record in the socket buffer, 740 * that is, a stream protocol (such as TCP). 741 */ 742 void 743 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags) 744 { 745 746 SOCKBUF_LOCK(sb); 747 sbappendstream_locked(sb, m, flags); 748 SOCKBUF_UNLOCK(sb); 749 } 750 751 #ifdef SOCKBUF_DEBUG 752 void 753 sbcheck(struct sockbuf *sb, const char *file, int line) 754 { 755 struct mbuf *m, *n, *fnrdy; 756 u_long acc, ccc, mbcnt; 757 758 SOCKBUF_LOCK_ASSERT(sb); 759 760 acc = ccc = mbcnt = 0; 761 fnrdy = NULL; 762 763 for (m = sb->sb_mb; m; m = n) { 764 n = m->m_nextpkt; 765 for (; m; m = m->m_next) { 766 if (m->m_len == 0) { 767 printf("sb %p empty mbuf %p\n", sb, m); 768 goto fail; 769 } 770 if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) { 771 if (m != sb->sb_fnrdy) { 772 printf("sb %p: fnrdy %p != m %p\n", 773 sb, sb->sb_fnrdy, m); 774 goto fail; 775 } 776 fnrdy = m; 777 } 778 if (fnrdy) { 779 if (!(m->m_flags & M_NOTAVAIL)) { 780 printf("sb %p: fnrdy %p, m %p is avail\n", 781 sb, sb->sb_fnrdy, m); 782 goto fail; 783 } 784 } else 785 acc += m->m_len; 786 ccc += m->m_len; 787 mbcnt += MSIZE; 788 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ 789 mbcnt += m->m_ext.ext_size; 790 } 791 } 792 if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) { 793 printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n", 794 acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt); 795 goto fail; 796 } 797 return; 798 fail: 799 panic("%s from %s:%u", __func__, file, line); 800 } 801 #endif 802 803 /* 804 * As above, except the mbuf chain begins a new record. 805 */ 806 void 807 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0) 808 { 809 struct mbuf *m; 810 811 SOCKBUF_LOCK_ASSERT(sb); 812 813 if (m0 == NULL) 814 return; 815 m_clrprotoflags(m0); 816 /* 817 * Put the first mbuf on the queue. Note this permits zero length 818 * records. 819 */ 820 sballoc(sb, m0); 821 SBLASTRECORDCHK(sb); 822 SBLINKRECORD(sb, m0); 823 sb->sb_mbtail = m0; 824 m = m0->m_next; 825 m0->m_next = 0; 826 if (m && (m0->m_flags & M_EOR)) { 827 m0->m_flags &= ~M_EOR; 828 m->m_flags |= M_EOR; 829 } 830 /* always call sbcompress() so it can do SBLASTMBUFCHK() */ 831 sbcompress(sb, m, m0); 832 } 833 834 /* 835 * As above, except the mbuf chain begins a new record. 836 */ 837 void 838 sbappendrecord(struct sockbuf *sb, struct mbuf *m0) 839 { 840 841 SOCKBUF_LOCK(sb); 842 sbappendrecord_locked(sb, m0); 843 SOCKBUF_UNLOCK(sb); 844 } 845 846 /* Helper routine that appends data, control, and address to a sockbuf. */ 847 static int 848 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa, 849 struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last) 850 { 851 struct mbuf *m, *n, *nlast; 852 #if MSIZE <= 256 853 if (asa->sa_len > MLEN) 854 return (0); 855 #endif 856 m = m_get(M_NOWAIT, MT_SONAME); 857 if (m == NULL) 858 return (0); 859 m->m_len = asa->sa_len; 860 bcopy(asa, mtod(m, caddr_t), asa->sa_len); 861 if (m0) { 862 m_clrprotoflags(m0); 863 m_tag_delete_chain(m0, NULL); 864 /* 865 * Clear some persistent info from pkthdr. 866 * We don't use m_demote(), because some netgraph consumers 867 * expect M_PKTHDR presence. 868 */ 869 m0->m_pkthdr.rcvif = NULL; 870 m0->m_pkthdr.flowid = 0; 871 m0->m_pkthdr.csum_flags = 0; 872 m0->m_pkthdr.fibnum = 0; 873 m0->m_pkthdr.rsstype = 0; 874 } 875 if (ctrl_last) 876 ctrl_last->m_next = m0; /* concatenate data to control */ 877 else 878 control = m0; 879 m->m_next = control; 880 for (n = m; n->m_next != NULL; n = n->m_next) 881 sballoc(sb, n); 882 sballoc(sb, n); 883 nlast = n; 884 SBLINKRECORD(sb, m); 885 886 sb->sb_mbtail = nlast; 887 SBLASTMBUFCHK(sb); 888 889 SBLASTRECORDCHK(sb); 890 return (1); 891 } 892 893 /* 894 * Append address and data, and optionally, control (ancillary) data to the 895 * receive queue of a socket. If present, m0 must include a packet header 896 * with total length. Returns 0 if no space in sockbuf or insufficient 897 * mbufs. 898 */ 899 int 900 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, 901 struct mbuf *m0, struct mbuf *control) 902 { 903 struct mbuf *ctrl_last; 904 int space = asa->sa_len; 905 906 SOCKBUF_LOCK_ASSERT(sb); 907 908 if (m0 && (m0->m_flags & M_PKTHDR) == 0) 909 panic("sbappendaddr_locked"); 910 if (m0) 911 space += m0->m_pkthdr.len; 912 space += m_length(control, &ctrl_last); 913 914 if (space > sbspace(sb)) 915 return (0); 916 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 917 } 918 919 /* 920 * Append address and data, and optionally, control (ancillary) data to the 921 * receive queue of a socket. If present, m0 must include a packet header 922 * with total length. Returns 0 if insufficient mbufs. Does not validate space 923 * on the receiving sockbuf. 924 */ 925 int 926 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa, 927 struct mbuf *m0, struct mbuf *control) 928 { 929 struct mbuf *ctrl_last; 930 931 SOCKBUF_LOCK_ASSERT(sb); 932 933 ctrl_last = (control == NULL) ? NULL : m_last(control); 934 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last)); 935 } 936 937 /* 938 * Append address and data, and optionally, control (ancillary) data to the 939 * receive queue of a socket. If present, m0 must include a packet header 940 * with total length. Returns 0 if no space in sockbuf or insufficient 941 * mbufs. 942 */ 943 int 944 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, 945 struct mbuf *m0, struct mbuf *control) 946 { 947 int retval; 948 949 SOCKBUF_LOCK(sb); 950 retval = sbappendaddr_locked(sb, asa, m0, control); 951 SOCKBUF_UNLOCK(sb); 952 return (retval); 953 } 954 955 int 956 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, 957 struct mbuf *control) 958 { 959 struct mbuf *m, *n, *mlast; 960 int space; 961 962 SOCKBUF_LOCK_ASSERT(sb); 963 964 if (control == NULL) 965 panic("sbappendcontrol_locked"); 966 space = m_length(control, &n) + m_length(m0, NULL); 967 968 if (space > sbspace(sb)) 969 return (0); 970 m_clrprotoflags(m0); 971 n->m_next = m0; /* concatenate data to control */ 972 973 SBLASTRECORDCHK(sb); 974 975 for (m = control; m->m_next; m = m->m_next) 976 sballoc(sb, m); 977 sballoc(sb, m); 978 mlast = m; 979 SBLINKRECORD(sb, control); 980 981 sb->sb_mbtail = mlast; 982 SBLASTMBUFCHK(sb); 983 984 SBLASTRECORDCHK(sb); 985 return (1); 986 } 987 988 int 989 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control) 990 { 991 int retval; 992 993 SOCKBUF_LOCK(sb); 994 retval = sbappendcontrol_locked(sb, m0, control); 995 SOCKBUF_UNLOCK(sb); 996 return (retval); 997 } 998 999 /* 1000 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf 1001 * (n). If (n) is NULL, the buffer is presumed empty. 1002 * 1003 * When the data is compressed, mbufs in the chain may be handled in one of 1004 * three ways: 1005 * 1006 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no 1007 * record boundary, and no change in data type). 1008 * 1009 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into 1010 * an mbuf already in the socket buffer. This can occur if an 1011 * appropriate mbuf exists, there is room, both mbufs are not marked as 1012 * not ready, and no merging of data types will occur. 1013 * 1014 * (3) The mbuf may be appended to the end of the existing mbuf chain. 1015 * 1016 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as 1017 * end-of-record. 1018 */ 1019 void 1020 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) 1021 { 1022 int eor = 0; 1023 struct mbuf *o; 1024 1025 SOCKBUF_LOCK_ASSERT(sb); 1026 1027 while (m) { 1028 eor |= m->m_flags & M_EOR; 1029 if (m->m_len == 0 && 1030 (eor == 0 || 1031 (((o = m->m_next) || (o = n)) && 1032 o->m_type == m->m_type))) { 1033 if (sb->sb_lastrecord == m) 1034 sb->sb_lastrecord = m->m_next; 1035 m = m_free(m); 1036 continue; 1037 } 1038 if (n && (n->m_flags & M_EOR) == 0 && 1039 M_WRITABLE(n) && 1040 ((sb->sb_flags & SB_NOCOALESCE) == 0) && 1041 !(m->m_flags & M_NOTREADY) && 1042 !(n->m_flags & M_NOTREADY) && 1043 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ 1044 m->m_len <= M_TRAILINGSPACE(n) && 1045 n->m_type == m->m_type) { 1046 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, 1047 (unsigned)m->m_len); 1048 n->m_len += m->m_len; 1049 sb->sb_ccc += m->m_len; 1050 if (sb->sb_fnrdy == NULL) 1051 sb->sb_acc += m->m_len; 1052 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1053 /* XXX: Probably don't need.*/ 1054 sb->sb_ctl += m->m_len; 1055 m = m_free(m); 1056 continue; 1057 } 1058 if (n) 1059 n->m_next = m; 1060 else 1061 sb->sb_mb = m; 1062 sb->sb_mbtail = m; 1063 sballoc(sb, m); 1064 n = m; 1065 m->m_flags &= ~M_EOR; 1066 m = m->m_next; 1067 n->m_next = 0; 1068 } 1069 if (eor) { 1070 KASSERT(n != NULL, ("sbcompress: eor && n == NULL")); 1071 n->m_flags |= eor; 1072 } 1073 SBLASTMBUFCHK(sb); 1074 } 1075 1076 /* 1077 * Free all mbufs in a sockbuf. Check that all resources are reclaimed. 1078 */ 1079 static void 1080 sbflush_internal(struct sockbuf *sb) 1081 { 1082 1083 while (sb->sb_mbcnt) { 1084 /* 1085 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty: 1086 * we would loop forever. Panic instead. 1087 */ 1088 if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len)) 1089 break; 1090 m_freem(sbcut_internal(sb, (int)sb->sb_ccc)); 1091 } 1092 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0, 1093 ("%s: ccc %u mb %p mbcnt %u", __func__, 1094 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt)); 1095 } 1096 1097 void 1098 sbflush_locked(struct sockbuf *sb) 1099 { 1100 1101 SOCKBUF_LOCK_ASSERT(sb); 1102 sbflush_internal(sb); 1103 } 1104 1105 void 1106 sbflush(struct sockbuf *sb) 1107 { 1108 1109 SOCKBUF_LOCK(sb); 1110 sbflush_locked(sb); 1111 SOCKBUF_UNLOCK(sb); 1112 } 1113 1114 /* 1115 * Cut data from (the front of) a sockbuf. 1116 */ 1117 static struct mbuf * 1118 sbcut_internal(struct sockbuf *sb, int len) 1119 { 1120 struct mbuf *m, *next, *mfree; 1121 1122 KASSERT(len >= 0, ("%s: len is %d but it is supposed to be >= 0", 1123 __func__, len)); 1124 KASSERT(len <= sb->sb_ccc, ("%s: len: %d is > ccc: %u", 1125 __func__, len, sb->sb_ccc)); 1126 1127 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 1128 mfree = NULL; 1129 1130 while (len > 0) { 1131 if (m == NULL) { 1132 KASSERT(next, ("%s: no next, len %d", __func__, len)); 1133 m = next; 1134 next = m->m_nextpkt; 1135 } 1136 if (m->m_len > len) { 1137 KASSERT(!(m->m_flags & M_NOTAVAIL), 1138 ("%s: m %p M_NOTAVAIL", __func__, m)); 1139 m->m_len -= len; 1140 m->m_data += len; 1141 sb->sb_ccc -= len; 1142 sb->sb_acc -= len; 1143 if (sb->sb_sndptroff != 0) 1144 sb->sb_sndptroff -= len; 1145 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) 1146 sb->sb_ctl -= len; 1147 break; 1148 } 1149 len -= m->m_len; 1150 sbfree(sb, m); 1151 /* 1152 * Do not put M_NOTREADY buffers to the free list, they 1153 * are referenced from outside. 1154 */ 1155 if (m->m_flags & M_NOTREADY) 1156 m = m->m_next; 1157 else { 1158 struct mbuf *n; 1159 1160 n = m->m_next; 1161 m->m_next = mfree; 1162 mfree = m; 1163 m = n; 1164 } 1165 } 1166 /* 1167 * Free any zero-length mbufs from the buffer. 1168 * For SOCK_DGRAM sockets such mbufs represent empty records. 1169 * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer, 1170 * when sosend_generic() needs to send only control data. 1171 */ 1172 while (m && m->m_len == 0) { 1173 struct mbuf *n; 1174 1175 sbfree(sb, m); 1176 n = m->m_next; 1177 m->m_next = mfree; 1178 mfree = m; 1179 m = n; 1180 } 1181 if (m) { 1182 sb->sb_mb = m; 1183 m->m_nextpkt = next; 1184 } else 1185 sb->sb_mb = next; 1186 /* 1187 * First part is an inline SB_EMPTY_FIXUP(). Second part makes sure 1188 * sb_lastrecord is up-to-date if we dropped part of the last record. 1189 */ 1190 m = sb->sb_mb; 1191 if (m == NULL) { 1192 sb->sb_mbtail = NULL; 1193 sb->sb_lastrecord = NULL; 1194 } else if (m->m_nextpkt == NULL) { 1195 sb->sb_lastrecord = m; 1196 } 1197 1198 return (mfree); 1199 } 1200 1201 /* 1202 * Drop data from (the front of) a sockbuf. 1203 */ 1204 void 1205 sbdrop_locked(struct sockbuf *sb, int len) 1206 { 1207 1208 SOCKBUF_LOCK_ASSERT(sb); 1209 m_freem(sbcut_internal(sb, len)); 1210 } 1211 1212 /* 1213 * Drop data from (the front of) a sockbuf, 1214 * and return it to caller. 1215 */ 1216 struct mbuf * 1217 sbcut_locked(struct sockbuf *sb, int len) 1218 { 1219 1220 SOCKBUF_LOCK_ASSERT(sb); 1221 return (sbcut_internal(sb, len)); 1222 } 1223 1224 void 1225 sbdrop(struct sockbuf *sb, int len) 1226 { 1227 struct mbuf *mfree; 1228 1229 SOCKBUF_LOCK(sb); 1230 mfree = sbcut_internal(sb, len); 1231 SOCKBUF_UNLOCK(sb); 1232 1233 m_freem(mfree); 1234 } 1235 1236 /* 1237 * Maintain a pointer and offset pair into the socket buffer mbuf chain to 1238 * avoid traversal of the entire socket buffer for larger offsets. 1239 */ 1240 struct mbuf * 1241 sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff) 1242 { 1243 struct mbuf *m, *ret; 1244 1245 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1246 KASSERT(off + len <= sb->sb_acc, ("%s: beyond sb", __func__)); 1247 KASSERT(sb->sb_sndptroff <= sb->sb_acc, ("%s: sndptroff broken", __func__)); 1248 1249 /* 1250 * Is off below stored offset? Happens on retransmits. 1251 * Just return, we can't help here. 1252 */ 1253 if (sb->sb_sndptroff > off) { 1254 *moff = off; 1255 return (sb->sb_mb); 1256 } 1257 1258 /* Return closest mbuf in chain for current offset. */ 1259 *moff = off - sb->sb_sndptroff; 1260 m = ret = sb->sb_sndptr ? sb->sb_sndptr : sb->sb_mb; 1261 if (*moff == m->m_len) { 1262 *moff = 0; 1263 sb->sb_sndptroff += m->m_len; 1264 m = ret = m->m_next; 1265 KASSERT(ret->m_len > 0, 1266 ("mbuf %p in sockbuf %p chain has no valid data", ret, sb)); 1267 } 1268 1269 /* Advance by len to be as close as possible for the next transmit. */ 1270 for (off = off - sb->sb_sndptroff + len - 1; 1271 off > 0 && m != NULL && off >= m->m_len; 1272 m = m->m_next) { 1273 sb->sb_sndptroff += m->m_len; 1274 off -= m->m_len; 1275 } 1276 if (off > 0 && m == NULL) 1277 panic("%s: sockbuf %p and mbuf %p clashing", __func__, sb, ret); 1278 sb->sb_sndptr = m; 1279 1280 return (ret); 1281 } 1282 1283 /* 1284 * Return the first mbuf and the mbuf data offset for the provided 1285 * send offset without changing the "sb_sndptroff" field. 1286 */ 1287 struct mbuf * 1288 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff) 1289 { 1290 struct mbuf *m; 1291 1292 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__)); 1293 1294 /* 1295 * If the "off" is below the stored offset, which happens on 1296 * retransmits, just use "sb_mb": 1297 */ 1298 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) { 1299 m = sb->sb_mb; 1300 } else { 1301 m = sb->sb_sndptr; 1302 off -= sb->sb_sndptroff; 1303 } 1304 while (off > 0 && m != NULL) { 1305 if (off < m->m_len) 1306 break; 1307 off -= m->m_len; 1308 m = m->m_next; 1309 } 1310 *moff = off; 1311 return (m); 1312 } 1313 1314 /* 1315 * Drop a record off the front of a sockbuf and move the next record to the 1316 * front. 1317 */ 1318 void 1319 sbdroprecord_locked(struct sockbuf *sb) 1320 { 1321 struct mbuf *m; 1322 1323 SOCKBUF_LOCK_ASSERT(sb); 1324 1325 m = sb->sb_mb; 1326 if (m) { 1327 sb->sb_mb = m->m_nextpkt; 1328 do { 1329 sbfree(sb, m); 1330 m = m_free(m); 1331 } while (m); 1332 } 1333 SB_EMPTY_FIXUP(sb); 1334 } 1335 1336 /* 1337 * Drop a record off the front of a sockbuf and move the next record to the 1338 * front. 1339 */ 1340 void 1341 sbdroprecord(struct sockbuf *sb) 1342 { 1343 1344 SOCKBUF_LOCK(sb); 1345 sbdroprecord_locked(sb); 1346 SOCKBUF_UNLOCK(sb); 1347 } 1348 1349 /* 1350 * Create a "control" mbuf containing the specified data with the specified 1351 * type for presentation on a socket buffer. 1352 */ 1353 struct mbuf * 1354 sbcreatecontrol(caddr_t p, int size, int type, int level) 1355 { 1356 struct cmsghdr *cp; 1357 struct mbuf *m; 1358 1359 if (CMSG_SPACE((u_int)size) > MCLBYTES) 1360 return ((struct mbuf *) NULL); 1361 if (CMSG_SPACE((u_int)size) > MLEN) 1362 m = m_getcl(M_NOWAIT, MT_CONTROL, 0); 1363 else 1364 m = m_get(M_NOWAIT, MT_CONTROL); 1365 if (m == NULL) 1366 return ((struct mbuf *) NULL); 1367 cp = mtod(m, struct cmsghdr *); 1368 m->m_len = 0; 1369 KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m), 1370 ("sbcreatecontrol: short mbuf")); 1371 /* 1372 * Don't leave the padding between the msg header and the 1373 * cmsg data and the padding after the cmsg data un-initialized. 1374 */ 1375 bzero(cp, CMSG_SPACE((u_int)size)); 1376 if (p != NULL) 1377 (void)memcpy(CMSG_DATA(cp), p, size); 1378 m->m_len = CMSG_SPACE(size); 1379 cp->cmsg_len = CMSG_LEN(size); 1380 cp->cmsg_level = level; 1381 cp->cmsg_type = type; 1382 return (m); 1383 } 1384 1385 /* 1386 * This does the same for socket buffers that sotoxsocket does for sockets: 1387 * generate an user-format data structure describing the socket buffer. Note 1388 * that the xsockbuf structure, since it is always embedded in a socket, does 1389 * not include a self pointer nor a length. We make this entry point public 1390 * in case some other mechanism needs it. 1391 */ 1392 void 1393 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) 1394 { 1395 1396 xsb->sb_cc = sb->sb_ccc; 1397 xsb->sb_hiwat = sb->sb_hiwat; 1398 xsb->sb_mbcnt = sb->sb_mbcnt; 1399 xsb->sb_mcnt = sb->sb_mcnt; 1400 xsb->sb_ccnt = sb->sb_ccnt; 1401 xsb->sb_mbmax = sb->sb_mbmax; 1402 xsb->sb_lowat = sb->sb_lowat; 1403 xsb->sb_flags = sb->sb_flags; 1404 xsb->sb_timeo = sb->sb_timeo; 1405 } 1406 1407 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ 1408 static int dummy; 1409 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); 1410 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW, 1411 &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size"); 1412 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, 1413 &sb_efficiency, 0, "Socket buffer size waste factor"); 1414