1 /* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 34 * $Id: uipc_socket2.c,v 1.7 1995/12/14 22:51:02 bde Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/proc.h> 41 #include <sys/file.h> 42 #include <sys/buf.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/protosw.h> 46 #include <sys/stat.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/signalvar.h> 50 #include <sys/sysctl.h> 51 52 /* 53 * Primitive routines for operating on sockets and socket buffers 54 */ 55 56 u_long sb_max = SB_MAX; /* XXX should be static */ 57 SYSCTL_INT(_kern, KERN_MAXSOCKBUF, maxsockbuf, CTLFLAG_RW, &sb_max, 0, "") 58 59 static u_long sb_efficiency = 8; /* parameter for sbreserve() */ 60 SYSCTL_INT(_kern, OID_AUTO, sockbuf_waste_factor, CTLFLAG_RW, &sb_efficiency, 61 0, ""); 62 63 /* 64 * Procedures to manipulate state flags of socket 65 * and do appropriate wakeups. Normal sequence from the 66 * active (originating) side is that soisconnecting() is 67 * called during processing of connect() call, 68 * resulting in an eventual call to soisconnected() if/when the 69 * connection is established. When the connection is torn down 70 * soisdisconnecting() is called during processing of disconnect() call, 71 * and soisdisconnected() is called when the connection to the peer 72 * is totally severed. The semantics of these routines are such that 73 * connectionless protocols can call soisconnected() and soisdisconnected() 74 * only, bypassing the in-progress calls when setting up a ``connection'' 75 * takes no time. 76 * 77 * From the passive side, a socket is created with 78 * two queues of sockets: so_q0 for connections in progress 79 * and so_q for connections already made and awaiting user acceptance. 80 * As a protocol is preparing incoming connections, it creates a socket 81 * structure queued on so_q0 by calling sonewconn(). When the connection 82 * is established, soisconnected() is called, and transfers the 83 * socket structure to so_q, making it available to accept(). 84 * 85 * If a socket is closed with sockets on either 86 * so_q0 or so_q, these sockets are dropped. 87 * 88 * If higher level protocols are implemented in 89 * the kernel, the wakeups done here will sometimes 90 * cause software-interrupt process scheduling. 91 */ 92 93 void 94 soisconnecting(so) 95 register struct socket *so; 96 { 97 98 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 99 so->so_state |= SS_ISCONNECTING; 100 } 101 102 void 103 soisconnected(so) 104 register struct socket *so; 105 { 106 register struct socket *head = so->so_head; 107 108 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); 109 so->so_state |= SS_ISCONNECTED; 110 if (head && soqremque(so, 0)) { 111 soqinsque(head, so, 1); 112 sorwakeup(head); 113 wakeup((caddr_t)&head->so_timeo); 114 } else { 115 wakeup((caddr_t)&so->so_timeo); 116 sorwakeup(so); 117 sowwakeup(so); 118 } 119 } 120 121 void 122 soisdisconnecting(so) 123 register struct socket *so; 124 { 125 126 so->so_state &= ~SS_ISCONNECTING; 127 so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); 128 wakeup((caddr_t)&so->so_timeo); 129 sowwakeup(so); 130 sorwakeup(so); 131 } 132 133 void 134 soisdisconnected(so) 135 register struct socket *so; 136 { 137 138 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 139 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); 140 wakeup((caddr_t)&so->so_timeo); 141 sowwakeup(so); 142 sorwakeup(so); 143 } 144 145 /* 146 * When an attempt at a new connection is noted on a socket 147 * which accepts connections, sonewconn is called. If the 148 * connection is possible (subject to space constraints, etc.) 149 * then we allocate a new structure, propoerly linked into the 150 * data structure of the original socket, and return this. 151 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. 152 * 153 * Currently, sonewconn() is defined as sonewconn1() in socketvar.h 154 * to catch calls that are missing the (new) second parameter. 155 */ 156 struct socket * 157 sonewconn1(head, connstatus) 158 register struct socket *head; 159 int connstatus; 160 { 161 register struct socket *so; 162 int soqueue = connstatus ? 1 : 0; 163 164 if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) 165 return ((struct socket *)0); 166 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_DONTWAIT); 167 if (so == NULL) 168 return ((struct socket *)0); 169 bzero((caddr_t)so, sizeof(*so)); 170 so->so_type = head->so_type; 171 so->so_options = head->so_options &~ SO_ACCEPTCONN; 172 so->so_linger = head->so_linger; 173 so->so_state = head->so_state | SS_NOFDREF; 174 so->so_proto = head->so_proto; 175 so->so_timeo = head->so_timeo; 176 so->so_pgid = head->so_pgid; 177 (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); 178 soqinsque(head, so, soqueue); 179 if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, 180 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)) { 181 (void) soqremque(so, soqueue); 182 (void) free((caddr_t)so, M_SOCKET); 183 return ((struct socket *)0); 184 } 185 if (connstatus) { 186 sorwakeup(head); 187 wakeup((caddr_t)&head->so_timeo); 188 so->so_state |= connstatus; 189 } 190 return (so); 191 } 192 193 void 194 soqinsque(head, so, q) 195 register struct socket *head, *so; 196 int q; 197 { 198 199 register struct socket **prev; 200 so->so_head = head; 201 if (q == 0) { 202 head->so_q0len++; 203 so->so_q0 = 0; 204 for (prev = &(head->so_q0); *prev; ) 205 prev = &((*prev)->so_q0); 206 } else { 207 head->so_qlen++; 208 so->so_q = 0; 209 for (prev = &(head->so_q); *prev; ) 210 prev = &((*prev)->so_q); 211 } 212 *prev = so; 213 } 214 215 int 216 soqremque(so, q) 217 register struct socket *so; 218 int q; 219 { 220 register struct socket *head, *prev, *next; 221 222 head = so->so_head; 223 prev = head; 224 for (;;) { 225 next = q ? prev->so_q : prev->so_q0; 226 if (next == so) 227 break; 228 if (next == 0) 229 return (0); 230 prev = next; 231 } 232 if (q == 0) { 233 prev->so_q0 = next->so_q0; 234 head->so_q0len--; 235 } else { 236 prev->so_q = next->so_q; 237 head->so_qlen--; 238 } 239 next->so_q0 = next->so_q = 0; 240 next->so_head = 0; 241 return (1); 242 } 243 244 /* 245 * Socantsendmore indicates that no more data will be sent on the 246 * socket; it would normally be applied to a socket when the user 247 * informs the system that no more data is to be sent, by the protocol 248 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data 249 * will be received, and will normally be applied to the socket by a 250 * protocol when it detects that the peer will send no more data. 251 * Data queued for reading in the socket may yet be read. 252 */ 253 254 void 255 socantsendmore(so) 256 struct socket *so; 257 { 258 259 so->so_state |= SS_CANTSENDMORE; 260 sowwakeup(so); 261 } 262 263 void 264 socantrcvmore(so) 265 struct socket *so; 266 { 267 268 so->so_state |= SS_CANTRCVMORE; 269 sorwakeup(so); 270 } 271 272 /* 273 * Wait for data to arrive at/drain from a socket buffer. 274 */ 275 int 276 sbwait(sb) 277 struct sockbuf *sb; 278 { 279 280 sb->sb_flags |= SB_WAIT; 281 return (tsleep((caddr_t)&sb->sb_cc, 282 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", 283 sb->sb_timeo)); 284 } 285 286 /* 287 * Lock a sockbuf already known to be locked; 288 * return any error returned from sleep (EINTR). 289 */ 290 int 291 sb_lock(sb) 292 register struct sockbuf *sb; 293 { 294 int error; 295 296 while (sb->sb_flags & SB_LOCK) { 297 sb->sb_flags |= SB_WANT; 298 error = tsleep((caddr_t)&sb->sb_flags, 299 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, 300 "sblock", 0); 301 if (error) 302 return (error); 303 } 304 sb->sb_flags |= SB_LOCK; 305 return (0); 306 } 307 308 /* 309 * Wakeup processes waiting on a socket buffer. 310 * Do asynchronous notification via SIGIO 311 * if the socket has the SS_ASYNC flag set. 312 */ 313 void 314 sowakeup(so, sb) 315 register struct socket *so; 316 register struct sockbuf *sb; 317 { 318 struct proc *p; 319 320 selwakeup(&sb->sb_sel); 321 sb->sb_flags &= ~SB_SEL; 322 if (sb->sb_flags & SB_WAIT) { 323 sb->sb_flags &= ~SB_WAIT; 324 wakeup((caddr_t)&sb->sb_cc); 325 } 326 if (so->so_state & SS_ASYNC) { 327 if (so->so_pgid < 0) 328 gsignal(-so->so_pgid, SIGIO); 329 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) 330 psignal(p, SIGIO); 331 } 332 } 333 334 /* 335 * Socket buffer (struct sockbuf) utility routines. 336 * 337 * Each socket contains two socket buffers: one for sending data and 338 * one for receiving data. Each buffer contains a queue of mbufs, 339 * information about the number of mbufs and amount of data in the 340 * queue, and other fields allowing select() statements and notification 341 * on data availability to be implemented. 342 * 343 * Data stored in a socket buffer is maintained as a list of records. 344 * Each record is a list of mbufs chained together with the m_next 345 * field. Records are chained together with the m_nextpkt field. The upper 346 * level routine soreceive() expects the following conventions to be 347 * observed when placing information in the receive buffer: 348 * 349 * 1. If the protocol requires each message be preceded by the sender's 350 * name, then a record containing that name must be present before 351 * any associated data (mbuf's must be of type MT_SONAME). 352 * 2. If the protocol supports the exchange of ``access rights'' (really 353 * just additional data associated with the message), and there are 354 * ``rights'' to be received, then a record containing this data 355 * should be present (mbuf's must be of type MT_RIGHTS). 356 * 3. If a name or rights record exists, then it must be followed by 357 * a data record, perhaps of zero length. 358 * 359 * Before using a new socket structure it is first necessary to reserve 360 * buffer space to the socket, by calling sbreserve(). This should commit 361 * some of the available buffer space in the system buffer pool for the 362 * socket (currently, it does nothing but enforce limits). The space 363 * should be released by calling sbrelease() when the socket is destroyed. 364 */ 365 366 int 367 soreserve(so, sndcc, rcvcc) 368 register struct socket *so; 369 u_long sndcc, rcvcc; 370 { 371 372 if (sbreserve(&so->so_snd, sndcc) == 0) 373 goto bad; 374 if (sbreserve(&so->so_rcv, rcvcc) == 0) 375 goto bad2; 376 if (so->so_rcv.sb_lowat == 0) 377 so->so_rcv.sb_lowat = 1; 378 if (so->so_snd.sb_lowat == 0) 379 so->so_snd.sb_lowat = MCLBYTES; 380 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) 381 so->so_snd.sb_lowat = so->so_snd.sb_hiwat; 382 return (0); 383 bad2: 384 sbrelease(&so->so_snd); 385 bad: 386 return (ENOBUFS); 387 } 388 389 /* 390 * Allot mbufs to a sockbuf. 391 * Attempt to scale mbmax so that mbcnt doesn't become limiting 392 * if buffering efficiency is near the normal case. 393 */ 394 int 395 sbreserve(sb, cc) 396 struct sockbuf *sb; 397 u_long cc; 398 { 399 400 if (cc > sb_max * MCLBYTES / (MSIZE + MCLBYTES)) 401 return (0); 402 sb->sb_hiwat = cc; 403 sb->sb_mbmax = min(cc * sb_efficiency, sb_max); 404 if (sb->sb_lowat > sb->sb_hiwat) 405 sb->sb_lowat = sb->sb_hiwat; 406 return (1); 407 } 408 409 /* 410 * Free mbufs held by a socket, and reserved mbuf space. 411 */ 412 void 413 sbrelease(sb) 414 struct sockbuf *sb; 415 { 416 417 sbflush(sb); 418 sb->sb_hiwat = sb->sb_mbmax = 0; 419 } 420 421 /* 422 * Routines to add and remove 423 * data from an mbuf queue. 424 * 425 * The routines sbappend() or sbappendrecord() are normally called to 426 * append new mbufs to a socket buffer, after checking that adequate 427 * space is available, comparing the function sbspace() with the amount 428 * of data to be added. sbappendrecord() differs from sbappend() in 429 * that data supplied is treated as the beginning of a new record. 430 * To place a sender's address, optional access rights, and data in a 431 * socket receive buffer, sbappendaddr() should be used. To place 432 * access rights and data in a socket receive buffer, sbappendrights() 433 * should be used. In either case, the new data begins a new record. 434 * Note that unlike sbappend() and sbappendrecord(), these routines check 435 * for the caller that there will be enough space to store the data. 436 * Each fails if there is not enough space, or if it cannot find mbufs 437 * to store additional information in. 438 * 439 * Reliable protocols may use the socket send buffer to hold data 440 * awaiting acknowledgement. Data is normally copied from a socket 441 * send buffer in a protocol with m_copy for output to a peer, 442 * and then removing the data from the socket buffer with sbdrop() 443 * or sbdroprecord() when the data is acknowledged by the peer. 444 */ 445 446 /* 447 * Append mbuf chain m to the last record in the 448 * socket buffer sb. The additional space associated 449 * the mbuf chain is recorded in sb. Empty mbufs are 450 * discarded and mbufs are compacted where possible. 451 */ 452 void 453 sbappend(sb, m) 454 struct sockbuf *sb; 455 struct mbuf *m; 456 { 457 register struct mbuf *n; 458 459 if (m == 0) 460 return; 461 n = sb->sb_mb; 462 if (n) { 463 while (n->m_nextpkt) 464 n = n->m_nextpkt; 465 do { 466 if (n->m_flags & M_EOR) { 467 sbappendrecord(sb, m); /* XXXXXX!!!! */ 468 return; 469 } 470 } while (n->m_next && (n = n->m_next)); 471 } 472 sbcompress(sb, m, n); 473 } 474 475 #ifdef SOCKBUF_DEBUG 476 void 477 sbcheck(sb) 478 register struct sockbuf *sb; 479 { 480 register struct mbuf *m; 481 register int len = 0, mbcnt = 0; 482 483 for (m = sb->sb_mb; m; m = m->m_next) { 484 len += m->m_len; 485 mbcnt += MSIZE; 486 if (m->m_flags & M_EXT) 487 mbcnt += m->m_ext.ext_size; 488 if (m->m_nextpkt) 489 panic("sbcheck nextpkt"); 490 } 491 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { 492 printf("cc %d != %d || mbcnt %d != %d\n", len, sb->sb_cc, 493 mbcnt, sb->sb_mbcnt); 494 panic("sbcheck"); 495 } 496 } 497 #endif 498 499 /* 500 * As above, except the mbuf chain 501 * begins a new record. 502 */ 503 void 504 sbappendrecord(sb, m0) 505 register struct sockbuf *sb; 506 register struct mbuf *m0; 507 { 508 register struct mbuf *m; 509 510 if (m0 == 0) 511 return; 512 m = sb->sb_mb; 513 if (m) 514 while (m->m_nextpkt) 515 m = m->m_nextpkt; 516 /* 517 * Put the first mbuf on the queue. 518 * Note this permits zero length records. 519 */ 520 sballoc(sb, m0); 521 if (m) 522 m->m_nextpkt = m0; 523 else 524 sb->sb_mb = m0; 525 m = m0->m_next; 526 m0->m_next = 0; 527 if (m && (m0->m_flags & M_EOR)) { 528 m0->m_flags &= ~M_EOR; 529 m->m_flags |= M_EOR; 530 } 531 sbcompress(sb, m, m0); 532 } 533 534 /* 535 * As above except that OOB data 536 * is inserted at the beginning of the sockbuf, 537 * but after any other OOB data. 538 */ 539 void 540 sbinsertoob(sb, m0) 541 register struct sockbuf *sb; 542 register struct mbuf *m0; 543 { 544 register struct mbuf *m; 545 register struct mbuf **mp; 546 547 if (m0 == 0) 548 return; 549 for (mp = &sb->sb_mb; *mp ; mp = &((*mp)->m_nextpkt)) { 550 m = *mp; 551 again: 552 switch (m->m_type) { 553 554 case MT_OOBDATA: 555 continue; /* WANT next train */ 556 557 case MT_CONTROL: 558 m = m->m_next; 559 if (m) 560 goto again; /* inspect THIS train further */ 561 } 562 break; 563 } 564 /* 565 * Put the first mbuf on the queue. 566 * Note this permits zero length records. 567 */ 568 sballoc(sb, m0); 569 m0->m_nextpkt = *mp; 570 *mp = m0; 571 m = m0->m_next; 572 m0->m_next = 0; 573 if (m && (m0->m_flags & M_EOR)) { 574 m0->m_flags &= ~M_EOR; 575 m->m_flags |= M_EOR; 576 } 577 sbcompress(sb, m, m0); 578 } 579 580 /* 581 * Append address and data, and optionally, control (ancillary) data 582 * to the receive queue of a socket. If present, 583 * m0 must include a packet header with total length. 584 * Returns 0 if no space in sockbuf or insufficient mbufs. 585 */ 586 int 587 sbappendaddr(sb, asa, m0, control) 588 register struct sockbuf *sb; 589 struct sockaddr *asa; 590 struct mbuf *m0, *control; 591 { 592 register struct mbuf *m, *n; 593 int space = asa->sa_len; 594 595 if (m0 && (m0->m_flags & M_PKTHDR) == 0) 596 panic("sbappendaddr"); 597 if (m0) 598 space += m0->m_pkthdr.len; 599 for (n = control; n; n = n->m_next) { 600 space += n->m_len; 601 if (n->m_next == 0) /* keep pointer to last control buf */ 602 break; 603 } 604 if (space > sbspace(sb)) 605 return (0); 606 if (asa->sa_len > MLEN) 607 return (0); 608 MGET(m, M_DONTWAIT, MT_SONAME); 609 if (m == 0) 610 return (0); 611 m->m_len = asa->sa_len; 612 bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); 613 if (n) 614 n->m_next = m0; /* concatenate data to control */ 615 else 616 control = m0; 617 m->m_next = control; 618 for (n = m; n; n = n->m_next) 619 sballoc(sb, n); 620 n = sb->sb_mb; 621 if (n) { 622 while (n->m_nextpkt) 623 n = n->m_nextpkt; 624 n->m_nextpkt = m; 625 } else 626 sb->sb_mb = m; 627 return (1); 628 } 629 630 int 631 sbappendcontrol(sb, m0, control) 632 struct sockbuf *sb; 633 struct mbuf *control, *m0; 634 { 635 register struct mbuf *m, *n; 636 int space = 0; 637 638 if (control == 0) 639 panic("sbappendcontrol"); 640 for (m = control; ; m = m->m_next) { 641 space += m->m_len; 642 if (m->m_next == 0) 643 break; 644 } 645 n = m; /* save pointer to last control buffer */ 646 for (m = m0; m; m = m->m_next) 647 space += m->m_len; 648 if (space > sbspace(sb)) 649 return (0); 650 n->m_next = m0; /* concatenate data to control */ 651 for (m = control; m; m = m->m_next) 652 sballoc(sb, m); 653 n = sb->sb_mb; 654 if (n) { 655 while (n->m_nextpkt) 656 n = n->m_nextpkt; 657 n->m_nextpkt = control; 658 } else 659 sb->sb_mb = control; 660 return (1); 661 } 662 663 /* 664 * Compress mbuf chain m into the socket 665 * buffer sb following mbuf n. If n 666 * is null, the buffer is presumed empty. 667 */ 668 void 669 sbcompress(sb, m, n) 670 register struct sockbuf *sb; 671 register struct mbuf *m, *n; 672 { 673 register int eor = 0; 674 register struct mbuf *o; 675 676 while (m) { 677 eor |= m->m_flags & M_EOR; 678 if (m->m_len == 0 && 679 (eor == 0 || 680 (((o = m->m_next) || (o = n)) && 681 o->m_type == m->m_type))) { 682 m = m_free(m); 683 continue; 684 } 685 if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 && 686 (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] && 687 n->m_type == m->m_type) { 688 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, 689 (unsigned)m->m_len); 690 n->m_len += m->m_len; 691 sb->sb_cc += m->m_len; 692 m = m_free(m); 693 continue; 694 } 695 if (n) 696 n->m_next = m; 697 else 698 sb->sb_mb = m; 699 sballoc(sb, m); 700 n = m; 701 m->m_flags &= ~M_EOR; 702 m = m->m_next; 703 n->m_next = 0; 704 } 705 if (eor) { 706 if (n) 707 n->m_flags |= eor; 708 else 709 printf("semi-panic: sbcompress\n"); 710 } 711 } 712 713 /* 714 * Free all mbufs in a sockbuf. 715 * Check that all resources are reclaimed. 716 */ 717 void 718 sbflush(sb) 719 register struct sockbuf *sb; 720 { 721 722 if (sb->sb_flags & SB_LOCK) 723 panic("sbflush"); 724 while (sb->sb_mbcnt) 725 sbdrop(sb, (int)sb->sb_cc); 726 if (sb->sb_cc || sb->sb_mb) 727 panic("sbflush 2"); 728 } 729 730 /* 731 * Drop data from (the front of) a sockbuf. 732 */ 733 void 734 sbdrop(sb, len) 735 register struct sockbuf *sb; 736 register int len; 737 { 738 register struct mbuf *m, *mn; 739 struct mbuf *next; 740 741 next = (m = sb->sb_mb) ? m->m_nextpkt : 0; 742 while (len > 0) { 743 if (m == 0) { 744 if (next == 0) 745 panic("sbdrop"); 746 m = next; 747 next = m->m_nextpkt; 748 continue; 749 } 750 if (m->m_len > len) { 751 m->m_len -= len; 752 m->m_data += len; 753 sb->sb_cc -= len; 754 break; 755 } 756 len -= m->m_len; 757 sbfree(sb, m); 758 MFREE(m, mn); 759 m = mn; 760 } 761 while (m && m->m_len == 0) { 762 sbfree(sb, m); 763 MFREE(m, mn); 764 m = mn; 765 } 766 if (m) { 767 sb->sb_mb = m; 768 m->m_nextpkt = next; 769 } else 770 sb->sb_mb = next; 771 } 772 773 /* 774 * Drop a record off the front of a sockbuf 775 * and move the next record to the front. 776 */ 777 void 778 sbdroprecord(sb) 779 register struct sockbuf *sb; 780 { 781 register struct mbuf *m, *mn; 782 783 m = sb->sb_mb; 784 if (m) { 785 sb->sb_mb = m->m_nextpkt; 786 do { 787 sbfree(sb, m); 788 MFREE(m, mn); 789 m = mn; 790 } while (m); 791 } 792 } 793