1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <sys/proc.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctp_pcb.h> 42 #include <netinet/sctputil.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctputil.h> 46 #include <netinet/sctp_auth.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_asconf.h> 49 #include <netinet/sctp_indata.h> 50 #include <netinet/sctp_bsd_addr.h> 51 52 53 54 #define SCTP_MAX_GAPS_INARRAY 4 55 struct sack_track { 56 uint8_t right_edge; /* mergable on the right edge */ 57 uint8_t left_edge; /* mergable on the left edge */ 58 uint8_t num_entries; 59 uint8_t spare; 60 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 61 }; 62 63 struct sack_track sack_array[256] = { 64 {0, 0, 0, 0, /* 0x00 */ 65 {{0, 0}, 66 {0, 0}, 67 {0, 0}, 68 {0, 0} 69 } 70 }, 71 {1, 0, 1, 0, /* 0x01 */ 72 {{0, 0}, 73 {0, 0}, 74 {0, 0}, 75 {0, 0} 76 } 77 }, 78 {0, 0, 1, 0, /* 0x02 */ 79 {{1, 1}, 80 {0, 0}, 81 {0, 0}, 82 {0, 0} 83 } 84 }, 85 {1, 0, 1, 0, /* 0x03 */ 86 {{0, 1}, 87 {0, 0}, 88 {0, 0}, 89 {0, 0} 90 } 91 }, 92 {0, 0, 1, 0, /* 0x04 */ 93 {{2, 2}, 94 {0, 0}, 95 {0, 0}, 96 {0, 0} 97 } 98 }, 99 {1, 0, 2, 0, /* 0x05 */ 100 {{0, 0}, 101 {2, 2}, 102 {0, 0}, 103 {0, 0} 104 } 105 }, 106 {0, 0, 1, 0, /* 0x06 */ 107 {{1, 2}, 108 {0, 0}, 109 {0, 0}, 110 {0, 0} 111 } 112 }, 113 {1, 0, 1, 0, /* 0x07 */ 114 {{0, 2}, 115 {0, 0}, 116 {0, 0}, 117 {0, 0} 118 } 119 }, 120 {0, 0, 1, 0, /* 0x08 */ 121 {{3, 3}, 122 {0, 0}, 123 {0, 0}, 124 {0, 0} 125 } 126 }, 127 {1, 0, 2, 0, /* 0x09 */ 128 {{0, 0}, 129 {3, 3}, 130 {0, 0}, 131 {0, 0} 132 } 133 }, 134 {0, 0, 2, 0, /* 0x0a */ 135 {{1, 1}, 136 {3, 3}, 137 {0, 0}, 138 {0, 0} 139 } 140 }, 141 {1, 0, 2, 0, /* 0x0b */ 142 {{0, 1}, 143 {3, 3}, 144 {0, 0}, 145 {0, 0} 146 } 147 }, 148 {0, 0, 1, 0, /* 0x0c */ 149 {{2, 3}, 150 {0, 0}, 151 {0, 0}, 152 {0, 0} 153 } 154 }, 155 {1, 0, 2, 0, /* 0x0d */ 156 {{0, 0}, 157 {2, 3}, 158 {0, 0}, 159 {0, 0} 160 } 161 }, 162 {0, 0, 1, 0, /* 0x0e */ 163 {{1, 3}, 164 {0, 0}, 165 {0, 0}, 166 {0, 0} 167 } 168 }, 169 {1, 0, 1, 0, /* 0x0f */ 170 {{0, 3}, 171 {0, 0}, 172 {0, 0}, 173 {0, 0} 174 } 175 }, 176 {0, 0, 1, 0, /* 0x10 */ 177 {{4, 4}, 178 {0, 0}, 179 {0, 0}, 180 {0, 0} 181 } 182 }, 183 {1, 0, 2, 0, /* 0x11 */ 184 {{0, 0}, 185 {4, 4}, 186 {0, 0}, 187 {0, 0} 188 } 189 }, 190 {0, 0, 2, 0, /* 0x12 */ 191 {{1, 1}, 192 {4, 4}, 193 {0, 0}, 194 {0, 0} 195 } 196 }, 197 {1, 0, 2, 0, /* 0x13 */ 198 {{0, 1}, 199 {4, 4}, 200 {0, 0}, 201 {0, 0} 202 } 203 }, 204 {0, 0, 2, 0, /* 0x14 */ 205 {{2, 2}, 206 {4, 4}, 207 {0, 0}, 208 {0, 0} 209 } 210 }, 211 {1, 0, 3, 0, /* 0x15 */ 212 {{0, 0}, 213 {2, 2}, 214 {4, 4}, 215 {0, 0} 216 } 217 }, 218 {0, 0, 2, 0, /* 0x16 */ 219 {{1, 2}, 220 {4, 4}, 221 {0, 0}, 222 {0, 0} 223 } 224 }, 225 {1, 0, 2, 0, /* 0x17 */ 226 {{0, 2}, 227 {4, 4}, 228 {0, 0}, 229 {0, 0} 230 } 231 }, 232 {0, 0, 1, 0, /* 0x18 */ 233 {{3, 4}, 234 {0, 0}, 235 {0, 0}, 236 {0, 0} 237 } 238 }, 239 {1, 0, 2, 0, /* 0x19 */ 240 {{0, 0}, 241 {3, 4}, 242 {0, 0}, 243 {0, 0} 244 } 245 }, 246 {0, 0, 2, 0, /* 0x1a */ 247 {{1, 1}, 248 {3, 4}, 249 {0, 0}, 250 {0, 0} 251 } 252 }, 253 {1, 0, 2, 0, /* 0x1b */ 254 {{0, 1}, 255 {3, 4}, 256 {0, 0}, 257 {0, 0} 258 } 259 }, 260 {0, 0, 1, 0, /* 0x1c */ 261 {{2, 4}, 262 {0, 0}, 263 {0, 0}, 264 {0, 0} 265 } 266 }, 267 {1, 0, 2, 0, /* 0x1d */ 268 {{0, 0}, 269 {2, 4}, 270 {0, 0}, 271 {0, 0} 272 } 273 }, 274 {0, 0, 1, 0, /* 0x1e */ 275 {{1, 4}, 276 {0, 0}, 277 {0, 0}, 278 {0, 0} 279 } 280 }, 281 {1, 0, 1, 0, /* 0x1f */ 282 {{0, 4}, 283 {0, 0}, 284 {0, 0}, 285 {0, 0} 286 } 287 }, 288 {0, 0, 1, 0, /* 0x20 */ 289 {{5, 5}, 290 {0, 0}, 291 {0, 0}, 292 {0, 0} 293 } 294 }, 295 {1, 0, 2, 0, /* 0x21 */ 296 {{0, 0}, 297 {5, 5}, 298 {0, 0}, 299 {0, 0} 300 } 301 }, 302 {0, 0, 2, 0, /* 0x22 */ 303 {{1, 1}, 304 {5, 5}, 305 {0, 0}, 306 {0, 0} 307 } 308 }, 309 {1, 0, 2, 0, /* 0x23 */ 310 {{0, 1}, 311 {5, 5}, 312 {0, 0}, 313 {0, 0} 314 } 315 }, 316 {0, 0, 2, 0, /* 0x24 */ 317 {{2, 2}, 318 {5, 5}, 319 {0, 0}, 320 {0, 0} 321 } 322 }, 323 {1, 0, 3, 0, /* 0x25 */ 324 {{0, 0}, 325 {2, 2}, 326 {5, 5}, 327 {0, 0} 328 } 329 }, 330 {0, 0, 2, 0, /* 0x26 */ 331 {{1, 2}, 332 {5, 5}, 333 {0, 0}, 334 {0, 0} 335 } 336 }, 337 {1, 0, 2, 0, /* 0x27 */ 338 {{0, 2}, 339 {5, 5}, 340 {0, 0}, 341 {0, 0} 342 } 343 }, 344 {0, 0, 2, 0, /* 0x28 */ 345 {{3, 3}, 346 {5, 5}, 347 {0, 0}, 348 {0, 0} 349 } 350 }, 351 {1, 0, 3, 0, /* 0x29 */ 352 {{0, 0}, 353 {3, 3}, 354 {5, 5}, 355 {0, 0} 356 } 357 }, 358 {0, 0, 3, 0, /* 0x2a */ 359 {{1, 1}, 360 {3, 3}, 361 {5, 5}, 362 {0, 0} 363 } 364 }, 365 {1, 0, 3, 0, /* 0x2b */ 366 {{0, 1}, 367 {3, 3}, 368 {5, 5}, 369 {0, 0} 370 } 371 }, 372 {0, 0, 2, 0, /* 0x2c */ 373 {{2, 3}, 374 {5, 5}, 375 {0, 0}, 376 {0, 0} 377 } 378 }, 379 {1, 0, 3, 0, /* 0x2d */ 380 {{0, 0}, 381 {2, 3}, 382 {5, 5}, 383 {0, 0} 384 } 385 }, 386 {0, 0, 2, 0, /* 0x2e */ 387 {{1, 3}, 388 {5, 5}, 389 {0, 0}, 390 {0, 0} 391 } 392 }, 393 {1, 0, 2, 0, /* 0x2f */ 394 {{0, 3}, 395 {5, 5}, 396 {0, 0}, 397 {0, 0} 398 } 399 }, 400 {0, 0, 1, 0, /* 0x30 */ 401 {{4, 5}, 402 {0, 0}, 403 {0, 0}, 404 {0, 0} 405 } 406 }, 407 {1, 0, 2, 0, /* 0x31 */ 408 {{0, 0}, 409 {4, 5}, 410 {0, 0}, 411 {0, 0} 412 } 413 }, 414 {0, 0, 2, 0, /* 0x32 */ 415 {{1, 1}, 416 {4, 5}, 417 {0, 0}, 418 {0, 0} 419 } 420 }, 421 {1, 0, 2, 0, /* 0x33 */ 422 {{0, 1}, 423 {4, 5}, 424 {0, 0}, 425 {0, 0} 426 } 427 }, 428 {0, 0, 2, 0, /* 0x34 */ 429 {{2, 2}, 430 {4, 5}, 431 {0, 0}, 432 {0, 0} 433 } 434 }, 435 {1, 0, 3, 0, /* 0x35 */ 436 {{0, 0}, 437 {2, 2}, 438 {4, 5}, 439 {0, 0} 440 } 441 }, 442 {0, 0, 2, 0, /* 0x36 */ 443 {{1, 2}, 444 {4, 5}, 445 {0, 0}, 446 {0, 0} 447 } 448 }, 449 {1, 0, 2, 0, /* 0x37 */ 450 {{0, 2}, 451 {4, 5}, 452 {0, 0}, 453 {0, 0} 454 } 455 }, 456 {0, 0, 1, 0, /* 0x38 */ 457 {{3, 5}, 458 {0, 0}, 459 {0, 0}, 460 {0, 0} 461 } 462 }, 463 {1, 0, 2, 0, /* 0x39 */ 464 {{0, 0}, 465 {3, 5}, 466 {0, 0}, 467 {0, 0} 468 } 469 }, 470 {0, 0, 2, 0, /* 0x3a */ 471 {{1, 1}, 472 {3, 5}, 473 {0, 0}, 474 {0, 0} 475 } 476 }, 477 {1, 0, 2, 0, /* 0x3b */ 478 {{0, 1}, 479 {3, 5}, 480 {0, 0}, 481 {0, 0} 482 } 483 }, 484 {0, 0, 1, 0, /* 0x3c */ 485 {{2, 5}, 486 {0, 0}, 487 {0, 0}, 488 {0, 0} 489 } 490 }, 491 {1, 0, 2, 0, /* 0x3d */ 492 {{0, 0}, 493 {2, 5}, 494 {0, 0}, 495 {0, 0} 496 } 497 }, 498 {0, 0, 1, 0, /* 0x3e */ 499 {{1, 5}, 500 {0, 0}, 501 {0, 0}, 502 {0, 0} 503 } 504 }, 505 {1, 0, 1, 0, /* 0x3f */ 506 {{0, 5}, 507 {0, 0}, 508 {0, 0}, 509 {0, 0} 510 } 511 }, 512 {0, 0, 1, 0, /* 0x40 */ 513 {{6, 6}, 514 {0, 0}, 515 {0, 0}, 516 {0, 0} 517 } 518 }, 519 {1, 0, 2, 0, /* 0x41 */ 520 {{0, 0}, 521 {6, 6}, 522 {0, 0}, 523 {0, 0} 524 } 525 }, 526 {0, 0, 2, 0, /* 0x42 */ 527 {{1, 1}, 528 {6, 6}, 529 {0, 0}, 530 {0, 0} 531 } 532 }, 533 {1, 0, 2, 0, /* 0x43 */ 534 {{0, 1}, 535 {6, 6}, 536 {0, 0}, 537 {0, 0} 538 } 539 }, 540 {0, 0, 2, 0, /* 0x44 */ 541 {{2, 2}, 542 {6, 6}, 543 {0, 0}, 544 {0, 0} 545 } 546 }, 547 {1, 0, 3, 0, /* 0x45 */ 548 {{0, 0}, 549 {2, 2}, 550 {6, 6}, 551 {0, 0} 552 } 553 }, 554 {0, 0, 2, 0, /* 0x46 */ 555 {{1, 2}, 556 {6, 6}, 557 {0, 0}, 558 {0, 0} 559 } 560 }, 561 {1, 0, 2, 0, /* 0x47 */ 562 {{0, 2}, 563 {6, 6}, 564 {0, 0}, 565 {0, 0} 566 } 567 }, 568 {0, 0, 2, 0, /* 0x48 */ 569 {{3, 3}, 570 {6, 6}, 571 {0, 0}, 572 {0, 0} 573 } 574 }, 575 {1, 0, 3, 0, /* 0x49 */ 576 {{0, 0}, 577 {3, 3}, 578 {6, 6}, 579 {0, 0} 580 } 581 }, 582 {0, 0, 3, 0, /* 0x4a */ 583 {{1, 1}, 584 {3, 3}, 585 {6, 6}, 586 {0, 0} 587 } 588 }, 589 {1, 0, 3, 0, /* 0x4b */ 590 {{0, 1}, 591 {3, 3}, 592 {6, 6}, 593 {0, 0} 594 } 595 }, 596 {0, 0, 2, 0, /* 0x4c */ 597 {{2, 3}, 598 {6, 6}, 599 {0, 0}, 600 {0, 0} 601 } 602 }, 603 {1, 0, 3, 0, /* 0x4d */ 604 {{0, 0}, 605 {2, 3}, 606 {6, 6}, 607 {0, 0} 608 } 609 }, 610 {0, 0, 2, 0, /* 0x4e */ 611 {{1, 3}, 612 {6, 6}, 613 {0, 0}, 614 {0, 0} 615 } 616 }, 617 {1, 0, 2, 0, /* 0x4f */ 618 {{0, 3}, 619 {6, 6}, 620 {0, 0}, 621 {0, 0} 622 } 623 }, 624 {0, 0, 2, 0, /* 0x50 */ 625 {{4, 4}, 626 {6, 6}, 627 {0, 0}, 628 {0, 0} 629 } 630 }, 631 {1, 0, 3, 0, /* 0x51 */ 632 {{0, 0}, 633 {4, 4}, 634 {6, 6}, 635 {0, 0} 636 } 637 }, 638 {0, 0, 3, 0, /* 0x52 */ 639 {{1, 1}, 640 {4, 4}, 641 {6, 6}, 642 {0, 0} 643 } 644 }, 645 {1, 0, 3, 0, /* 0x53 */ 646 {{0, 1}, 647 {4, 4}, 648 {6, 6}, 649 {0, 0} 650 } 651 }, 652 {0, 0, 3, 0, /* 0x54 */ 653 {{2, 2}, 654 {4, 4}, 655 {6, 6}, 656 {0, 0} 657 } 658 }, 659 {1, 0, 4, 0, /* 0x55 */ 660 {{0, 0}, 661 {2, 2}, 662 {4, 4}, 663 {6, 6} 664 } 665 }, 666 {0, 0, 3, 0, /* 0x56 */ 667 {{1, 2}, 668 {4, 4}, 669 {6, 6}, 670 {0, 0} 671 } 672 }, 673 {1, 0, 3, 0, /* 0x57 */ 674 {{0, 2}, 675 {4, 4}, 676 {6, 6}, 677 {0, 0} 678 } 679 }, 680 {0, 0, 2, 0, /* 0x58 */ 681 {{3, 4}, 682 {6, 6}, 683 {0, 0}, 684 {0, 0} 685 } 686 }, 687 {1, 0, 3, 0, /* 0x59 */ 688 {{0, 0}, 689 {3, 4}, 690 {6, 6}, 691 {0, 0} 692 } 693 }, 694 {0, 0, 3, 0, /* 0x5a */ 695 {{1, 1}, 696 {3, 4}, 697 {6, 6}, 698 {0, 0} 699 } 700 }, 701 {1, 0, 3, 0, /* 0x5b */ 702 {{0, 1}, 703 {3, 4}, 704 {6, 6}, 705 {0, 0} 706 } 707 }, 708 {0, 0, 2, 0, /* 0x5c */ 709 {{2, 4}, 710 {6, 6}, 711 {0, 0}, 712 {0, 0} 713 } 714 }, 715 {1, 0, 3, 0, /* 0x5d */ 716 {{0, 0}, 717 {2, 4}, 718 {6, 6}, 719 {0, 0} 720 } 721 }, 722 {0, 0, 2, 0, /* 0x5e */ 723 {{1, 4}, 724 {6, 6}, 725 {0, 0}, 726 {0, 0} 727 } 728 }, 729 {1, 0, 2, 0, /* 0x5f */ 730 {{0, 4}, 731 {6, 6}, 732 {0, 0}, 733 {0, 0} 734 } 735 }, 736 {0, 0, 1, 0, /* 0x60 */ 737 {{5, 6}, 738 {0, 0}, 739 {0, 0}, 740 {0, 0} 741 } 742 }, 743 {1, 0, 2, 0, /* 0x61 */ 744 {{0, 0}, 745 {5, 6}, 746 {0, 0}, 747 {0, 0} 748 } 749 }, 750 {0, 0, 2, 0, /* 0x62 */ 751 {{1, 1}, 752 {5, 6}, 753 {0, 0}, 754 {0, 0} 755 } 756 }, 757 {1, 0, 2, 0, /* 0x63 */ 758 {{0, 1}, 759 {5, 6}, 760 {0, 0}, 761 {0, 0} 762 } 763 }, 764 {0, 0, 2, 0, /* 0x64 */ 765 {{2, 2}, 766 {5, 6}, 767 {0, 0}, 768 {0, 0} 769 } 770 }, 771 {1, 0, 3, 0, /* 0x65 */ 772 {{0, 0}, 773 {2, 2}, 774 {5, 6}, 775 {0, 0} 776 } 777 }, 778 {0, 0, 2, 0, /* 0x66 */ 779 {{1, 2}, 780 {5, 6}, 781 {0, 0}, 782 {0, 0} 783 } 784 }, 785 {1, 0, 2, 0, /* 0x67 */ 786 {{0, 2}, 787 {5, 6}, 788 {0, 0}, 789 {0, 0} 790 } 791 }, 792 {0, 0, 2, 0, /* 0x68 */ 793 {{3, 3}, 794 {5, 6}, 795 {0, 0}, 796 {0, 0} 797 } 798 }, 799 {1, 0, 3, 0, /* 0x69 */ 800 {{0, 0}, 801 {3, 3}, 802 {5, 6}, 803 {0, 0} 804 } 805 }, 806 {0, 0, 3, 0, /* 0x6a */ 807 {{1, 1}, 808 {3, 3}, 809 {5, 6}, 810 {0, 0} 811 } 812 }, 813 {1, 0, 3, 0, /* 0x6b */ 814 {{0, 1}, 815 {3, 3}, 816 {5, 6}, 817 {0, 0} 818 } 819 }, 820 {0, 0, 2, 0, /* 0x6c */ 821 {{2, 3}, 822 {5, 6}, 823 {0, 0}, 824 {0, 0} 825 } 826 }, 827 {1, 0, 3, 0, /* 0x6d */ 828 {{0, 0}, 829 {2, 3}, 830 {5, 6}, 831 {0, 0} 832 } 833 }, 834 {0, 0, 2, 0, /* 0x6e */ 835 {{1, 3}, 836 {5, 6}, 837 {0, 0}, 838 {0, 0} 839 } 840 }, 841 {1, 0, 2, 0, /* 0x6f */ 842 {{0, 3}, 843 {5, 6}, 844 {0, 0}, 845 {0, 0} 846 } 847 }, 848 {0, 0, 1, 0, /* 0x70 */ 849 {{4, 6}, 850 {0, 0}, 851 {0, 0}, 852 {0, 0} 853 } 854 }, 855 {1, 0, 2, 0, /* 0x71 */ 856 {{0, 0}, 857 {4, 6}, 858 {0, 0}, 859 {0, 0} 860 } 861 }, 862 {0, 0, 2, 0, /* 0x72 */ 863 {{1, 1}, 864 {4, 6}, 865 {0, 0}, 866 {0, 0} 867 } 868 }, 869 {1, 0, 2, 0, /* 0x73 */ 870 {{0, 1}, 871 {4, 6}, 872 {0, 0}, 873 {0, 0} 874 } 875 }, 876 {0, 0, 2, 0, /* 0x74 */ 877 {{2, 2}, 878 {4, 6}, 879 {0, 0}, 880 {0, 0} 881 } 882 }, 883 {1, 0, 3, 0, /* 0x75 */ 884 {{0, 0}, 885 {2, 2}, 886 {4, 6}, 887 {0, 0} 888 } 889 }, 890 {0, 0, 2, 0, /* 0x76 */ 891 {{1, 2}, 892 {4, 6}, 893 {0, 0}, 894 {0, 0} 895 } 896 }, 897 {1, 0, 2, 0, /* 0x77 */ 898 {{0, 2}, 899 {4, 6}, 900 {0, 0}, 901 {0, 0} 902 } 903 }, 904 {0, 0, 1, 0, /* 0x78 */ 905 {{3, 6}, 906 {0, 0}, 907 {0, 0}, 908 {0, 0} 909 } 910 }, 911 {1, 0, 2, 0, /* 0x79 */ 912 {{0, 0}, 913 {3, 6}, 914 {0, 0}, 915 {0, 0} 916 } 917 }, 918 {0, 0, 2, 0, /* 0x7a */ 919 {{1, 1}, 920 {3, 6}, 921 {0, 0}, 922 {0, 0} 923 } 924 }, 925 {1, 0, 2, 0, /* 0x7b */ 926 {{0, 1}, 927 {3, 6}, 928 {0, 0}, 929 {0, 0} 930 } 931 }, 932 {0, 0, 1, 0, /* 0x7c */ 933 {{2, 6}, 934 {0, 0}, 935 {0, 0}, 936 {0, 0} 937 } 938 }, 939 {1, 0, 2, 0, /* 0x7d */ 940 {{0, 0}, 941 {2, 6}, 942 {0, 0}, 943 {0, 0} 944 } 945 }, 946 {0, 0, 1, 0, /* 0x7e */ 947 {{1, 6}, 948 {0, 0}, 949 {0, 0}, 950 {0, 0} 951 } 952 }, 953 {1, 0, 1, 0, /* 0x7f */ 954 {{0, 6}, 955 {0, 0}, 956 {0, 0}, 957 {0, 0} 958 } 959 }, 960 {0, 1, 1, 0, /* 0x80 */ 961 {{7, 7}, 962 {0, 0}, 963 {0, 0}, 964 {0, 0} 965 } 966 }, 967 {1, 1, 2, 0, /* 0x81 */ 968 {{0, 0}, 969 {7, 7}, 970 {0, 0}, 971 {0, 0} 972 } 973 }, 974 {0, 1, 2, 0, /* 0x82 */ 975 {{1, 1}, 976 {7, 7}, 977 {0, 0}, 978 {0, 0} 979 } 980 }, 981 {1, 1, 2, 0, /* 0x83 */ 982 {{0, 1}, 983 {7, 7}, 984 {0, 0}, 985 {0, 0} 986 } 987 }, 988 {0, 1, 2, 0, /* 0x84 */ 989 {{2, 2}, 990 {7, 7}, 991 {0, 0}, 992 {0, 0} 993 } 994 }, 995 {1, 1, 3, 0, /* 0x85 */ 996 {{0, 0}, 997 {2, 2}, 998 {7, 7}, 999 {0, 0} 1000 } 1001 }, 1002 {0, 1, 2, 0, /* 0x86 */ 1003 {{1, 2}, 1004 {7, 7}, 1005 {0, 0}, 1006 {0, 0} 1007 } 1008 }, 1009 {1, 1, 2, 0, /* 0x87 */ 1010 {{0, 2}, 1011 {7, 7}, 1012 {0, 0}, 1013 {0, 0} 1014 } 1015 }, 1016 {0, 1, 2, 0, /* 0x88 */ 1017 {{3, 3}, 1018 {7, 7}, 1019 {0, 0}, 1020 {0, 0} 1021 } 1022 }, 1023 {1, 1, 3, 0, /* 0x89 */ 1024 {{0, 0}, 1025 {3, 3}, 1026 {7, 7}, 1027 {0, 0} 1028 } 1029 }, 1030 {0, 1, 3, 0, /* 0x8a */ 1031 {{1, 1}, 1032 {3, 3}, 1033 {7, 7}, 1034 {0, 0} 1035 } 1036 }, 1037 {1, 1, 3, 0, /* 0x8b */ 1038 {{0, 1}, 1039 {3, 3}, 1040 {7, 7}, 1041 {0, 0} 1042 } 1043 }, 1044 {0, 1, 2, 0, /* 0x8c */ 1045 {{2, 3}, 1046 {7, 7}, 1047 {0, 0}, 1048 {0, 0} 1049 } 1050 }, 1051 {1, 1, 3, 0, /* 0x8d */ 1052 {{0, 0}, 1053 {2, 3}, 1054 {7, 7}, 1055 {0, 0} 1056 } 1057 }, 1058 {0, 1, 2, 0, /* 0x8e */ 1059 {{1, 3}, 1060 {7, 7}, 1061 {0, 0}, 1062 {0, 0} 1063 } 1064 }, 1065 {1, 1, 2, 0, /* 0x8f */ 1066 {{0, 3}, 1067 {7, 7}, 1068 {0, 0}, 1069 {0, 0} 1070 } 1071 }, 1072 {0, 1, 2, 0, /* 0x90 */ 1073 {{4, 4}, 1074 {7, 7}, 1075 {0, 0}, 1076 {0, 0} 1077 } 1078 }, 1079 {1, 1, 3, 0, /* 0x91 */ 1080 {{0, 0}, 1081 {4, 4}, 1082 {7, 7}, 1083 {0, 0} 1084 } 1085 }, 1086 {0, 1, 3, 0, /* 0x92 */ 1087 {{1, 1}, 1088 {4, 4}, 1089 {7, 7}, 1090 {0, 0} 1091 } 1092 }, 1093 {1, 1, 3, 0, /* 0x93 */ 1094 {{0, 1}, 1095 {4, 4}, 1096 {7, 7}, 1097 {0, 0} 1098 } 1099 }, 1100 {0, 1, 3, 0, /* 0x94 */ 1101 {{2, 2}, 1102 {4, 4}, 1103 {7, 7}, 1104 {0, 0} 1105 } 1106 }, 1107 {1, 1, 4, 0, /* 0x95 */ 1108 {{0, 0}, 1109 {2, 2}, 1110 {4, 4}, 1111 {7, 7} 1112 } 1113 }, 1114 {0, 1, 3, 0, /* 0x96 */ 1115 {{1, 2}, 1116 {4, 4}, 1117 {7, 7}, 1118 {0, 0} 1119 } 1120 }, 1121 {1, 1, 3, 0, /* 0x97 */ 1122 {{0, 2}, 1123 {4, 4}, 1124 {7, 7}, 1125 {0, 0} 1126 } 1127 }, 1128 {0, 1, 2, 0, /* 0x98 */ 1129 {{3, 4}, 1130 {7, 7}, 1131 {0, 0}, 1132 {0, 0} 1133 } 1134 }, 1135 {1, 1, 3, 0, /* 0x99 */ 1136 {{0, 0}, 1137 {3, 4}, 1138 {7, 7}, 1139 {0, 0} 1140 } 1141 }, 1142 {0, 1, 3, 0, /* 0x9a */ 1143 {{1, 1}, 1144 {3, 4}, 1145 {7, 7}, 1146 {0, 0} 1147 } 1148 }, 1149 {1, 1, 3, 0, /* 0x9b */ 1150 {{0, 1}, 1151 {3, 4}, 1152 {7, 7}, 1153 {0, 0} 1154 } 1155 }, 1156 {0, 1, 2, 0, /* 0x9c */ 1157 {{2, 4}, 1158 {7, 7}, 1159 {0, 0}, 1160 {0, 0} 1161 } 1162 }, 1163 {1, 1, 3, 0, /* 0x9d */ 1164 {{0, 0}, 1165 {2, 4}, 1166 {7, 7}, 1167 {0, 0} 1168 } 1169 }, 1170 {0, 1, 2, 0, /* 0x9e */ 1171 {{1, 4}, 1172 {7, 7}, 1173 {0, 0}, 1174 {0, 0} 1175 } 1176 }, 1177 {1, 1, 2, 0, /* 0x9f */ 1178 {{0, 4}, 1179 {7, 7}, 1180 {0, 0}, 1181 {0, 0} 1182 } 1183 }, 1184 {0, 1, 2, 0, /* 0xa0 */ 1185 {{5, 5}, 1186 {7, 7}, 1187 {0, 0}, 1188 {0, 0} 1189 } 1190 }, 1191 {1, 1, 3, 0, /* 0xa1 */ 1192 {{0, 0}, 1193 {5, 5}, 1194 {7, 7}, 1195 {0, 0} 1196 } 1197 }, 1198 {0, 1, 3, 0, /* 0xa2 */ 1199 {{1, 1}, 1200 {5, 5}, 1201 {7, 7}, 1202 {0, 0} 1203 } 1204 }, 1205 {1, 1, 3, 0, /* 0xa3 */ 1206 {{0, 1}, 1207 {5, 5}, 1208 {7, 7}, 1209 {0, 0} 1210 } 1211 }, 1212 {0, 1, 3, 0, /* 0xa4 */ 1213 {{2, 2}, 1214 {5, 5}, 1215 {7, 7}, 1216 {0, 0} 1217 } 1218 }, 1219 {1, 1, 4, 0, /* 0xa5 */ 1220 {{0, 0}, 1221 {2, 2}, 1222 {5, 5}, 1223 {7, 7} 1224 } 1225 }, 1226 {0, 1, 3, 0, /* 0xa6 */ 1227 {{1, 2}, 1228 {5, 5}, 1229 {7, 7}, 1230 {0, 0} 1231 } 1232 }, 1233 {1, 1, 3, 0, /* 0xa7 */ 1234 {{0, 2}, 1235 {5, 5}, 1236 {7, 7}, 1237 {0, 0} 1238 } 1239 }, 1240 {0, 1, 3, 0, /* 0xa8 */ 1241 {{3, 3}, 1242 {5, 5}, 1243 {7, 7}, 1244 {0, 0} 1245 } 1246 }, 1247 {1, 1, 4, 0, /* 0xa9 */ 1248 {{0, 0}, 1249 {3, 3}, 1250 {5, 5}, 1251 {7, 7} 1252 } 1253 }, 1254 {0, 1, 4, 0, /* 0xaa */ 1255 {{1, 1}, 1256 {3, 3}, 1257 {5, 5}, 1258 {7, 7} 1259 } 1260 }, 1261 {1, 1, 4, 0, /* 0xab */ 1262 {{0, 1}, 1263 {3, 3}, 1264 {5, 5}, 1265 {7, 7} 1266 } 1267 }, 1268 {0, 1, 3, 0, /* 0xac */ 1269 {{2, 3}, 1270 {5, 5}, 1271 {7, 7}, 1272 {0, 0} 1273 } 1274 }, 1275 {1, 1, 4, 0, /* 0xad */ 1276 {{0, 0}, 1277 {2, 3}, 1278 {5, 5}, 1279 {7, 7} 1280 } 1281 }, 1282 {0, 1, 3, 0, /* 0xae */ 1283 {{1, 3}, 1284 {5, 5}, 1285 {7, 7}, 1286 {0, 0} 1287 } 1288 }, 1289 {1, 1, 3, 0, /* 0xaf */ 1290 {{0, 3}, 1291 {5, 5}, 1292 {7, 7}, 1293 {0, 0} 1294 } 1295 }, 1296 {0, 1, 2, 0, /* 0xb0 */ 1297 {{4, 5}, 1298 {7, 7}, 1299 {0, 0}, 1300 {0, 0} 1301 } 1302 }, 1303 {1, 1, 3, 0, /* 0xb1 */ 1304 {{0, 0}, 1305 {4, 5}, 1306 {7, 7}, 1307 {0, 0} 1308 } 1309 }, 1310 {0, 1, 3, 0, /* 0xb2 */ 1311 {{1, 1}, 1312 {4, 5}, 1313 {7, 7}, 1314 {0, 0} 1315 } 1316 }, 1317 {1, 1, 3, 0, /* 0xb3 */ 1318 {{0, 1}, 1319 {4, 5}, 1320 {7, 7}, 1321 {0, 0} 1322 } 1323 }, 1324 {0, 1, 3, 0, /* 0xb4 */ 1325 {{2, 2}, 1326 {4, 5}, 1327 {7, 7}, 1328 {0, 0} 1329 } 1330 }, 1331 {1, 1, 4, 0, /* 0xb5 */ 1332 {{0, 0}, 1333 {2, 2}, 1334 {4, 5}, 1335 {7, 7} 1336 } 1337 }, 1338 {0, 1, 3, 0, /* 0xb6 */ 1339 {{1, 2}, 1340 {4, 5}, 1341 {7, 7}, 1342 {0, 0} 1343 } 1344 }, 1345 {1, 1, 3, 0, /* 0xb7 */ 1346 {{0, 2}, 1347 {4, 5}, 1348 {7, 7}, 1349 {0, 0} 1350 } 1351 }, 1352 {0, 1, 2, 0, /* 0xb8 */ 1353 {{3, 5}, 1354 {7, 7}, 1355 {0, 0}, 1356 {0, 0} 1357 } 1358 }, 1359 {1, 1, 3, 0, /* 0xb9 */ 1360 {{0, 0}, 1361 {3, 5}, 1362 {7, 7}, 1363 {0, 0} 1364 } 1365 }, 1366 {0, 1, 3, 0, /* 0xba */ 1367 {{1, 1}, 1368 {3, 5}, 1369 {7, 7}, 1370 {0, 0} 1371 } 1372 }, 1373 {1, 1, 3, 0, /* 0xbb */ 1374 {{0, 1}, 1375 {3, 5}, 1376 {7, 7}, 1377 {0, 0} 1378 } 1379 }, 1380 {0, 1, 2, 0, /* 0xbc */ 1381 {{2, 5}, 1382 {7, 7}, 1383 {0, 0}, 1384 {0, 0} 1385 } 1386 }, 1387 {1, 1, 3, 0, /* 0xbd */ 1388 {{0, 0}, 1389 {2, 5}, 1390 {7, 7}, 1391 {0, 0} 1392 } 1393 }, 1394 {0, 1, 2, 0, /* 0xbe */ 1395 {{1, 5}, 1396 {7, 7}, 1397 {0, 0}, 1398 {0, 0} 1399 } 1400 }, 1401 {1, 1, 2, 0, /* 0xbf */ 1402 {{0, 5}, 1403 {7, 7}, 1404 {0, 0}, 1405 {0, 0} 1406 } 1407 }, 1408 {0, 1, 1, 0, /* 0xc0 */ 1409 {{6, 7}, 1410 {0, 0}, 1411 {0, 0}, 1412 {0, 0} 1413 } 1414 }, 1415 {1, 1, 2, 0, /* 0xc1 */ 1416 {{0, 0}, 1417 {6, 7}, 1418 {0, 0}, 1419 {0, 0} 1420 } 1421 }, 1422 {0, 1, 2, 0, /* 0xc2 */ 1423 {{1, 1}, 1424 {6, 7}, 1425 {0, 0}, 1426 {0, 0} 1427 } 1428 }, 1429 {1, 1, 2, 0, /* 0xc3 */ 1430 {{0, 1}, 1431 {6, 7}, 1432 {0, 0}, 1433 {0, 0} 1434 } 1435 }, 1436 {0, 1, 2, 0, /* 0xc4 */ 1437 {{2, 2}, 1438 {6, 7}, 1439 {0, 0}, 1440 {0, 0} 1441 } 1442 }, 1443 {1, 1, 3, 0, /* 0xc5 */ 1444 {{0, 0}, 1445 {2, 2}, 1446 {6, 7}, 1447 {0, 0} 1448 } 1449 }, 1450 {0, 1, 2, 0, /* 0xc6 */ 1451 {{1, 2}, 1452 {6, 7}, 1453 {0, 0}, 1454 {0, 0} 1455 } 1456 }, 1457 {1, 1, 2, 0, /* 0xc7 */ 1458 {{0, 2}, 1459 {6, 7}, 1460 {0, 0}, 1461 {0, 0} 1462 } 1463 }, 1464 {0, 1, 2, 0, /* 0xc8 */ 1465 {{3, 3}, 1466 {6, 7}, 1467 {0, 0}, 1468 {0, 0} 1469 } 1470 }, 1471 {1, 1, 3, 0, /* 0xc9 */ 1472 {{0, 0}, 1473 {3, 3}, 1474 {6, 7}, 1475 {0, 0} 1476 } 1477 }, 1478 {0, 1, 3, 0, /* 0xca */ 1479 {{1, 1}, 1480 {3, 3}, 1481 {6, 7}, 1482 {0, 0} 1483 } 1484 }, 1485 {1, 1, 3, 0, /* 0xcb */ 1486 {{0, 1}, 1487 {3, 3}, 1488 {6, 7}, 1489 {0, 0} 1490 } 1491 }, 1492 {0, 1, 2, 0, /* 0xcc */ 1493 {{2, 3}, 1494 {6, 7}, 1495 {0, 0}, 1496 {0, 0} 1497 } 1498 }, 1499 {1, 1, 3, 0, /* 0xcd */ 1500 {{0, 0}, 1501 {2, 3}, 1502 {6, 7}, 1503 {0, 0} 1504 } 1505 }, 1506 {0, 1, 2, 0, /* 0xce */ 1507 {{1, 3}, 1508 {6, 7}, 1509 {0, 0}, 1510 {0, 0} 1511 } 1512 }, 1513 {1, 1, 2, 0, /* 0xcf */ 1514 {{0, 3}, 1515 {6, 7}, 1516 {0, 0}, 1517 {0, 0} 1518 } 1519 }, 1520 {0, 1, 2, 0, /* 0xd0 */ 1521 {{4, 4}, 1522 {6, 7}, 1523 {0, 0}, 1524 {0, 0} 1525 } 1526 }, 1527 {1, 1, 3, 0, /* 0xd1 */ 1528 {{0, 0}, 1529 {4, 4}, 1530 {6, 7}, 1531 {0, 0} 1532 } 1533 }, 1534 {0, 1, 3, 0, /* 0xd2 */ 1535 {{1, 1}, 1536 {4, 4}, 1537 {6, 7}, 1538 {0, 0} 1539 } 1540 }, 1541 {1, 1, 3, 0, /* 0xd3 */ 1542 {{0, 1}, 1543 {4, 4}, 1544 {6, 7}, 1545 {0, 0} 1546 } 1547 }, 1548 {0, 1, 3, 0, /* 0xd4 */ 1549 {{2, 2}, 1550 {4, 4}, 1551 {6, 7}, 1552 {0, 0} 1553 } 1554 }, 1555 {1, 1, 4, 0, /* 0xd5 */ 1556 {{0, 0}, 1557 {2, 2}, 1558 {4, 4}, 1559 {6, 7} 1560 } 1561 }, 1562 {0, 1, 3, 0, /* 0xd6 */ 1563 {{1, 2}, 1564 {4, 4}, 1565 {6, 7}, 1566 {0, 0} 1567 } 1568 }, 1569 {1, 1, 3, 0, /* 0xd7 */ 1570 {{0, 2}, 1571 {4, 4}, 1572 {6, 7}, 1573 {0, 0} 1574 } 1575 }, 1576 {0, 1, 2, 0, /* 0xd8 */ 1577 {{3, 4}, 1578 {6, 7}, 1579 {0, 0}, 1580 {0, 0} 1581 } 1582 }, 1583 {1, 1, 3, 0, /* 0xd9 */ 1584 {{0, 0}, 1585 {3, 4}, 1586 {6, 7}, 1587 {0, 0} 1588 } 1589 }, 1590 {0, 1, 3, 0, /* 0xda */ 1591 {{1, 1}, 1592 {3, 4}, 1593 {6, 7}, 1594 {0, 0} 1595 } 1596 }, 1597 {1, 1, 3, 0, /* 0xdb */ 1598 {{0, 1}, 1599 {3, 4}, 1600 {6, 7}, 1601 {0, 0} 1602 } 1603 }, 1604 {0, 1, 2, 0, /* 0xdc */ 1605 {{2, 4}, 1606 {6, 7}, 1607 {0, 0}, 1608 {0, 0} 1609 } 1610 }, 1611 {1, 1, 3, 0, /* 0xdd */ 1612 {{0, 0}, 1613 {2, 4}, 1614 {6, 7}, 1615 {0, 0} 1616 } 1617 }, 1618 {0, 1, 2, 0, /* 0xde */ 1619 {{1, 4}, 1620 {6, 7}, 1621 {0, 0}, 1622 {0, 0} 1623 } 1624 }, 1625 {1, 1, 2, 0, /* 0xdf */ 1626 {{0, 4}, 1627 {6, 7}, 1628 {0, 0}, 1629 {0, 0} 1630 } 1631 }, 1632 {0, 1, 1, 0, /* 0xe0 */ 1633 {{5, 7}, 1634 {0, 0}, 1635 {0, 0}, 1636 {0, 0} 1637 } 1638 }, 1639 {1, 1, 2, 0, /* 0xe1 */ 1640 {{0, 0}, 1641 {5, 7}, 1642 {0, 0}, 1643 {0, 0} 1644 } 1645 }, 1646 {0, 1, 2, 0, /* 0xe2 */ 1647 {{1, 1}, 1648 {5, 7}, 1649 {0, 0}, 1650 {0, 0} 1651 } 1652 }, 1653 {1, 1, 2, 0, /* 0xe3 */ 1654 {{0, 1}, 1655 {5, 7}, 1656 {0, 0}, 1657 {0, 0} 1658 } 1659 }, 1660 {0, 1, 2, 0, /* 0xe4 */ 1661 {{2, 2}, 1662 {5, 7}, 1663 {0, 0}, 1664 {0, 0} 1665 } 1666 }, 1667 {1, 1, 3, 0, /* 0xe5 */ 1668 {{0, 0}, 1669 {2, 2}, 1670 {5, 7}, 1671 {0, 0} 1672 } 1673 }, 1674 {0, 1, 2, 0, /* 0xe6 */ 1675 {{1, 2}, 1676 {5, 7}, 1677 {0, 0}, 1678 {0, 0} 1679 } 1680 }, 1681 {1, 1, 2, 0, /* 0xe7 */ 1682 {{0, 2}, 1683 {5, 7}, 1684 {0, 0}, 1685 {0, 0} 1686 } 1687 }, 1688 {0, 1, 2, 0, /* 0xe8 */ 1689 {{3, 3}, 1690 {5, 7}, 1691 {0, 0}, 1692 {0, 0} 1693 } 1694 }, 1695 {1, 1, 3, 0, /* 0xe9 */ 1696 {{0, 0}, 1697 {3, 3}, 1698 {5, 7}, 1699 {0, 0} 1700 } 1701 }, 1702 {0, 1, 3, 0, /* 0xea */ 1703 {{1, 1}, 1704 {3, 3}, 1705 {5, 7}, 1706 {0, 0} 1707 } 1708 }, 1709 {1, 1, 3, 0, /* 0xeb */ 1710 {{0, 1}, 1711 {3, 3}, 1712 {5, 7}, 1713 {0, 0} 1714 } 1715 }, 1716 {0, 1, 2, 0, /* 0xec */ 1717 {{2, 3}, 1718 {5, 7}, 1719 {0, 0}, 1720 {0, 0} 1721 } 1722 }, 1723 {1, 1, 3, 0, /* 0xed */ 1724 {{0, 0}, 1725 {2, 3}, 1726 {5, 7}, 1727 {0, 0} 1728 } 1729 }, 1730 {0, 1, 2, 0, /* 0xee */ 1731 {{1, 3}, 1732 {5, 7}, 1733 {0, 0}, 1734 {0, 0} 1735 } 1736 }, 1737 {1, 1, 2, 0, /* 0xef */ 1738 {{0, 3}, 1739 {5, 7}, 1740 {0, 0}, 1741 {0, 0} 1742 } 1743 }, 1744 {0, 1, 1, 0, /* 0xf0 */ 1745 {{4, 7}, 1746 {0, 0}, 1747 {0, 0}, 1748 {0, 0} 1749 } 1750 }, 1751 {1, 1, 2, 0, /* 0xf1 */ 1752 {{0, 0}, 1753 {4, 7}, 1754 {0, 0}, 1755 {0, 0} 1756 } 1757 }, 1758 {0, 1, 2, 0, /* 0xf2 */ 1759 {{1, 1}, 1760 {4, 7}, 1761 {0, 0}, 1762 {0, 0} 1763 } 1764 }, 1765 {1, 1, 2, 0, /* 0xf3 */ 1766 {{0, 1}, 1767 {4, 7}, 1768 {0, 0}, 1769 {0, 0} 1770 } 1771 }, 1772 {0, 1, 2, 0, /* 0xf4 */ 1773 {{2, 2}, 1774 {4, 7}, 1775 {0, 0}, 1776 {0, 0} 1777 } 1778 }, 1779 {1, 1, 3, 0, /* 0xf5 */ 1780 {{0, 0}, 1781 {2, 2}, 1782 {4, 7}, 1783 {0, 0} 1784 } 1785 }, 1786 {0, 1, 2, 0, /* 0xf6 */ 1787 {{1, 2}, 1788 {4, 7}, 1789 {0, 0}, 1790 {0, 0} 1791 } 1792 }, 1793 {1, 1, 2, 0, /* 0xf7 */ 1794 {{0, 2}, 1795 {4, 7}, 1796 {0, 0}, 1797 {0, 0} 1798 } 1799 }, 1800 {0, 1, 1, 0, /* 0xf8 */ 1801 {{3, 7}, 1802 {0, 0}, 1803 {0, 0}, 1804 {0, 0} 1805 } 1806 }, 1807 {1, 1, 2, 0, /* 0xf9 */ 1808 {{0, 0}, 1809 {3, 7}, 1810 {0, 0}, 1811 {0, 0} 1812 } 1813 }, 1814 {0, 1, 2, 0, /* 0xfa */ 1815 {{1, 1}, 1816 {3, 7}, 1817 {0, 0}, 1818 {0, 0} 1819 } 1820 }, 1821 {1, 1, 2, 0, /* 0xfb */ 1822 {{0, 1}, 1823 {3, 7}, 1824 {0, 0}, 1825 {0, 0} 1826 } 1827 }, 1828 {0, 1, 1, 0, /* 0xfc */ 1829 {{2, 7}, 1830 {0, 0}, 1831 {0, 0}, 1832 {0, 0} 1833 } 1834 }, 1835 {1, 1, 2, 0, /* 0xfd */ 1836 {{0, 0}, 1837 {2, 7}, 1838 {0, 0}, 1839 {0, 0} 1840 } 1841 }, 1842 {0, 1, 1, 0, /* 0xfe */ 1843 {{1, 7}, 1844 {0, 0}, 1845 {0, 0}, 1846 {0, 0} 1847 } 1848 }, 1849 {1, 1, 1, 0, /* 0xff */ 1850 {{0, 7}, 1851 {0, 0}, 1852 {0, 0}, 1853 {0, 0} 1854 } 1855 } 1856 }; 1857 1858 1859 int 1860 sctp_is_address_in_scope(struct sctp_ifa *ifa, 1861 int ipv4_addr_legal, 1862 int ipv6_addr_legal, 1863 int loopback_scope, 1864 int ipv4_local_scope, 1865 int local_scope, 1866 int site_scope, 1867 int do_update) 1868 { 1869 if ((loopback_scope == 0) && 1870 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { 1871 /* 1872 * skip loopback if not in scope * 1873 */ 1874 return (0); 1875 } 1876 if ((ifa->address.sa.sa_family == AF_INET) && ipv4_addr_legal) { 1877 struct sockaddr_in *sin; 1878 1879 sin = (struct sockaddr_in *)&ifa->address.sin; 1880 if (sin->sin_addr.s_addr == 0) { 1881 /* not in scope , unspecified */ 1882 return (0); 1883 } 1884 if ((ipv4_local_scope == 0) && 1885 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1886 /* private address not in scope */ 1887 return (0); 1888 } 1889 } else if ((ifa->address.sa.sa_family == AF_INET6) && ipv6_addr_legal) { 1890 struct sockaddr_in6 *sin6; 1891 1892 /* 1893 * Must update the flags, bummer, which means any IFA locks 1894 * must now be applied HERE <-> 1895 */ 1896 if (do_update) { 1897 sctp_gather_internal_ifa_flags(ifa); 1898 } 1899 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1900 return (0); 1901 } 1902 /* ok to use deprecated addresses? */ 1903 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1904 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1905 /* skip unspecifed addresses */ 1906 return (0); 1907 } 1908 if ( /* (local_scope == 0) && */ 1909 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { 1910 return (0); 1911 } 1912 if ((site_scope == 0) && 1913 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1914 return (0); 1915 } 1916 } else { 1917 return (0); 1918 } 1919 return (1); 1920 } 1921 1922 static struct mbuf * 1923 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa) 1924 { 1925 struct sctp_paramhdr *parmh; 1926 struct mbuf *mret; 1927 int len; 1928 1929 if (ifa->address.sa.sa_family == AF_INET) { 1930 len = sizeof(struct sctp_ipv4addr_param); 1931 } else if (ifa->address.sa.sa_family == AF_INET6) { 1932 len = sizeof(struct sctp_ipv6addr_param); 1933 } else { 1934 /* unknown type */ 1935 return (m); 1936 } 1937 if (M_TRAILINGSPACE(m) >= len) { 1938 /* easy side we just drop it on the end */ 1939 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); 1940 mret = m; 1941 } else { 1942 /* Need more space */ 1943 mret = m; 1944 while (SCTP_BUF_NEXT(mret) != NULL) { 1945 mret = SCTP_BUF_NEXT(mret); 1946 } 1947 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA); 1948 if (SCTP_BUF_NEXT(mret) == NULL) { 1949 /* We are hosed, can't add more addresses */ 1950 return (m); 1951 } 1952 mret = SCTP_BUF_NEXT(mret); 1953 parmh = mtod(mret, struct sctp_paramhdr *); 1954 } 1955 /* now add the parameter */ 1956 if (ifa->address.sa.sa_family == AF_INET) { 1957 struct sctp_ipv4addr_param *ipv4p; 1958 struct sockaddr_in *sin; 1959 1960 sin = (struct sockaddr_in *)&ifa->address.sin; 1961 ipv4p = (struct sctp_ipv4addr_param *)parmh; 1962 parmh->param_type = htons(SCTP_IPV4_ADDRESS); 1963 parmh->param_length = htons(len); 1964 ipv4p->addr = sin->sin_addr.s_addr; 1965 SCTP_BUF_LEN(mret) += len; 1966 } else if (ifa->address.sa.sa_family == AF_INET6) { 1967 struct sctp_ipv6addr_param *ipv6p; 1968 struct sockaddr_in6 *sin6; 1969 1970 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1971 ipv6p = (struct sctp_ipv6addr_param *)parmh; 1972 parmh->param_type = htons(SCTP_IPV6_ADDRESS); 1973 parmh->param_length = htons(len); 1974 memcpy(ipv6p->addr, &sin6->sin6_addr, 1975 sizeof(ipv6p->addr)); 1976 /* clear embedded scope in the address */ 1977 in6_clearscope((struct in6_addr *)ipv6p->addr); 1978 SCTP_BUF_LEN(mret) += len; 1979 } else { 1980 return (m); 1981 } 1982 return (mret); 1983 } 1984 1985 1986 struct mbuf * 1987 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope, 1988 struct mbuf *m_at, int cnt_inits_to) 1989 { 1990 struct sctp_vrf *vrf = NULL; 1991 int cnt, limit_out = 0, total_count; 1992 uint32_t vrf_id; 1993 1994 vrf_id = SCTP_DEFAULT_VRFID; 1995 SCTP_IPI_ADDR_LOCK(); 1996 vrf = sctp_find_vrf(vrf_id); 1997 if (vrf == NULL) { 1998 SCTP_IPI_ADDR_UNLOCK(); 1999 return (m_at); 2000 } 2001 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2002 struct sctp_ifa *sctp_ifap; 2003 struct sctp_ifn *sctp_ifnp; 2004 2005 cnt = cnt_inits_to; 2006 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { 2007 limit_out = 1; 2008 cnt = SCTP_ADDRESS_LIMIT; 2009 goto skip_count; 2010 } 2011 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2012 if ((scope->loopback_scope == 0) && 2013 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2014 /* 2015 * Skip loopback devices if loopback_scope 2016 * not set 2017 */ 2018 continue; 2019 } 2020 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2021 if (sctp_is_address_in_scope(sctp_ifap, 2022 scope->ipv4_addr_legal, 2023 scope->ipv6_addr_legal, 2024 scope->loopback_scope, 2025 scope->ipv4_local_scope, 2026 scope->local_scope, 2027 scope->site_scope, 1) == 0) { 2028 continue; 2029 } 2030 cnt++; 2031 if (cnt > SCTP_ADDRESS_LIMIT) { 2032 break; 2033 } 2034 } 2035 if (cnt > SCTP_ADDRESS_LIMIT) { 2036 break; 2037 } 2038 } 2039 skip_count: 2040 if (cnt > 1) { 2041 total_count = 0; 2042 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2043 cnt = 0; 2044 if ((scope->loopback_scope == 0) && 2045 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2046 /* 2047 * Skip loopback devices if 2048 * loopback_scope not set 2049 */ 2050 continue; 2051 } 2052 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2053 if (sctp_is_address_in_scope(sctp_ifap, 2054 scope->ipv4_addr_legal, 2055 scope->ipv6_addr_legal, 2056 scope->loopback_scope, 2057 scope->ipv4_local_scope, 2058 scope->local_scope, 2059 scope->site_scope, 0) == 0) { 2060 continue; 2061 } 2062 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap); 2063 if (limit_out) { 2064 cnt++; 2065 total_count++; 2066 if (cnt >= 2) { 2067 /* 2068 * two from each 2069 * address 2070 */ 2071 break; 2072 } 2073 if (total_count > SCTP_ADDRESS_LIMIT) { 2074 /* No more addresses */ 2075 break; 2076 } 2077 } 2078 } 2079 } 2080 } 2081 } else { 2082 struct sctp_laddr *laddr; 2083 2084 cnt = cnt_inits_to; 2085 /* First, how many ? */ 2086 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2087 if (laddr->ifa == NULL) { 2088 continue; 2089 } 2090 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2091 /* 2092 * Address being deleted by the system, dont 2093 * list. 2094 */ 2095 continue; 2096 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2097 /* 2098 * Address being deleted on this ep don't 2099 * list. 2100 */ 2101 continue; 2102 } 2103 if (sctp_is_address_in_scope(laddr->ifa, 2104 scope->ipv4_addr_legal, 2105 scope->ipv6_addr_legal, 2106 scope->loopback_scope, 2107 scope->ipv4_local_scope, 2108 scope->local_scope, 2109 scope->site_scope, 1) == 0) { 2110 continue; 2111 } 2112 cnt++; 2113 } 2114 if (cnt > SCTP_ADDRESS_LIMIT) { 2115 limit_out = 1; 2116 } 2117 /* 2118 * To get through a NAT we only list addresses if we have 2119 * more than one. That way if you just bind a single address 2120 * we let the source of the init dictate our address. 2121 */ 2122 if (cnt > 1) { 2123 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2124 cnt = 0; 2125 if (laddr->ifa == NULL) { 2126 continue; 2127 } 2128 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2129 continue; 2130 2131 if (sctp_is_address_in_scope(laddr->ifa, 2132 scope->ipv4_addr_legal, 2133 scope->ipv6_addr_legal, 2134 scope->loopback_scope, 2135 scope->ipv4_local_scope, 2136 scope->local_scope, 2137 scope->site_scope, 0) == 0) { 2138 continue; 2139 } 2140 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa); 2141 cnt++; 2142 if (cnt >= SCTP_ADDRESS_LIMIT) { 2143 break; 2144 } 2145 } 2146 } 2147 } 2148 SCTP_IPI_ADDR_UNLOCK(); 2149 return (m_at); 2150 } 2151 2152 static struct sctp_ifa * 2153 sctp_is_ifa_addr_prefered(struct sctp_ifa *ifa, 2154 uint8_t dest_is_loop, 2155 uint8_t dest_is_priv, 2156 sa_family_t fam) 2157 { 2158 uint8_t dest_is_global = 0; 2159 2160 /* 2161 * is_scope -> dest_is_priv is true if destination is a private 2162 * address 2163 */ 2164 /* dest_is_loop is true if destination is a loopback addresses */ 2165 2166 /* 2167 * Here we determine if its a prefered address. A prefered address 2168 * means it is the same scope or higher scope then the destination. 2169 * L = loopback, P = private, G = global 2170 * ----------------------------------------- src | dest | result 2171 * ---------------------------------------- L | L | yes 2172 * ----------------------------------------- P | L | 2173 * yes-v4 no-v6 ----------------------------------------- G | 2174 * L | yes-v4 no-v6 ----------------------------------------- L 2175 * | P | no ----------------------------------------- P | 2176 * P | yes ----------------------------------------- G | 2177 * P | no ----------------------------------------- L | G 2178 * | no ----------------------------------------- P | G | 2179 * no ----------------------------------------- G | G | 2180 * yes ----------------------------------------- 2181 */ 2182 2183 if (ifa->address.sa.sa_family != fam) { 2184 /* forget mis-matched family */ 2185 return (NULL); 2186 } 2187 if ((dest_is_priv == 0) && (dest_is_loop == 0)) { 2188 dest_is_global = 1; 2189 } 2190 #ifdef SCTP_DEBUG 2191 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2192 printf("Is destination prefered:"); 2193 sctp_print_address(&ifa->address.sa); 2194 } 2195 #endif 2196 2197 /* Ok the address may be ok */ 2198 if (fam == AF_INET6) { 2199 /* ok to use deprecated addresses? */ 2200 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2201 #ifdef SCTP_DEBUG 2202 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2203 printf("NO:1\n"); 2204 } 2205 #endif 2206 return (NULL); 2207 } 2208 if (ifa->src_is_priv) { 2209 if (dest_is_loop) { 2210 #ifdef SCTP_DEBUG 2211 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2212 printf("NO:2\n"); 2213 } 2214 #endif 2215 return (NULL); 2216 } 2217 } 2218 if (ifa->src_is_glob) { 2219 2220 if (dest_is_loop) { 2221 #ifdef SCTP_DEBUG 2222 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2223 printf("NO:3\n"); 2224 } 2225 #endif 2226 return (NULL); 2227 } 2228 } 2229 } 2230 /* 2231 * Now that we know what is what, implement or table this could in 2232 * theory be done slicker (it used to be), but this is 2233 * straightforward and easier to validate :-) 2234 */ 2235 #ifdef SCTP_DEBUG 2236 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2237 printf("src_loop:%d src_priv:%d src_glob:%d\n", 2238 ifa->src_is_loop, ifa->src_is_priv, 2239 ifa->src_is_glob); 2240 printf("dest_loop:%d dest_priv:%d dest_glob:%d\n", 2241 dest_is_loop, dest_is_priv, 2242 dest_is_global); 2243 } 2244 #endif 2245 2246 if ((ifa->src_is_loop) && (dest_is_priv)) { 2247 #ifdef SCTP_DEBUG 2248 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2249 printf("NO:4\n"); 2250 } 2251 #endif 2252 return (NULL); 2253 } 2254 if ((ifa->src_is_glob) && (dest_is_priv)) { 2255 #ifdef SCTP_DEBUG 2256 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2257 printf("NO:5\n"); 2258 } 2259 #endif 2260 return (NULL); 2261 } 2262 if ((ifa->src_is_loop) && (dest_is_global)) { 2263 #ifdef SCTP_DEBUG 2264 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2265 printf("NO:6\n"); 2266 } 2267 #endif 2268 return (NULL); 2269 } 2270 if ((ifa->src_is_priv) && (dest_is_global)) { 2271 #ifdef SCTP_DEBUG 2272 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2273 printf("NO:7\n"); 2274 } 2275 #endif 2276 return (NULL); 2277 } 2278 #ifdef SCTP_DEBUG 2279 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2280 printf("YES\n"); 2281 } 2282 #endif 2283 /* its a prefered address */ 2284 return (ifa); 2285 } 2286 2287 static struct sctp_ifa * 2288 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, 2289 uint8_t dest_is_loop, 2290 uint8_t dest_is_priv, 2291 sa_family_t fam) 2292 { 2293 uint8_t dest_is_global = 0; 2294 2295 2296 /* 2297 * Here we determine if its a acceptable address. A acceptable 2298 * address means it is the same scope or higher scope but we can 2299 * allow for NAT which means its ok to have a global dest and a 2300 * private src. 2301 * 2302 * L = loopback, P = private, G = global 2303 * ----------------------------------------- src | dest | result 2304 * ----------------------------------------- L | L | yes 2305 * ----------------------------------------- P | L | 2306 * yes-v4 no-v6 ----------------------------------------- G | 2307 * L | yes ----------------------------------------- L | 2308 * P | no ----------------------------------------- P | P 2309 * | yes ----------------------------------------- G | P 2310 * | yes - May not work ----------------------------------------- 2311 * L | G | no ----------------------------------------- P 2312 * | G | yes - May not work 2313 * ----------------------------------------- G | G | yes 2314 * ----------------------------------------- 2315 */ 2316 2317 if (ifa->address.sa.sa_family != fam) { 2318 /* forget non matching family */ 2319 return (NULL); 2320 } 2321 /* Ok the address may be ok */ 2322 if ((dest_is_loop == 0) && (dest_is_priv == 0)) { 2323 dest_is_global = 1; 2324 } 2325 if (fam == AF_INET6) { 2326 /* ok to use deprecated addresses? */ 2327 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2328 return (NULL); 2329 } 2330 if (ifa->src_is_priv) { 2331 /* Special case, linklocal to loop */ 2332 if (dest_is_loop) 2333 return (NULL); 2334 } 2335 } 2336 /* 2337 * Now that we know what is what, implement or table this could in 2338 * theory be done slicker (it used to be), but this is 2339 * straightforward and easier to validate :-) 2340 */ 2341 2342 if ((ifa->src_is_loop == 0) && (dest_is_priv)) { 2343 return (NULL); 2344 } 2345 if ((ifa->src_is_loop == 0) && (dest_is_global)) { 2346 return (NULL); 2347 } 2348 /* its an acceptable address */ 2349 return (ifa); 2350 } 2351 2352 int 2353 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 2354 { 2355 struct sctp_laddr *laddr; 2356 2357 if (stcb == NULL) { 2358 /* There are no restrictions, no TCB :-) */ 2359 return (0); 2360 } 2361 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 2362 if (laddr->ifa == NULL) { 2363 #ifdef SCTP_DEBUG 2364 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 2365 printf("Help I have fallen and I can't get up!\n"); 2366 } 2367 #endif 2368 continue; 2369 } 2370 if (laddr->ifa == ifa) { 2371 /* Yes it is on the list */ 2372 return (1); 2373 } 2374 } 2375 return (0); 2376 } 2377 2378 2379 int 2380 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 2381 { 2382 struct sctp_laddr *laddr; 2383 2384 if (ifa == NULL) 2385 return (0); 2386 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2387 if (laddr->ifa == NULL) { 2388 #ifdef SCTP_DEBUG 2389 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 2390 printf("Help I have fallen and I can't get up!\n"); 2391 } 2392 #endif 2393 continue; 2394 } 2395 if ((laddr->ifa == ifa) && laddr->action == 0) 2396 /* same pointer */ 2397 return (1); 2398 } 2399 return (0); 2400 } 2401 2402 2403 2404 static struct sctp_ifa * 2405 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, 2406 struct route *ro, 2407 uint32_t vrf_id, 2408 int non_asoc_addr_ok, 2409 uint8_t dest_is_priv, 2410 uint8_t dest_is_loop, 2411 sa_family_t fam) 2412 { 2413 struct sctp_laddr *laddr, *starting_point; 2414 void *ifn; 2415 int resettotop = 0; 2416 struct sctp_ifn *sctp_ifn; 2417 struct sctp_ifa *sctp_ifa, *pass; 2418 struct sctp_vrf *vrf; 2419 uint32_t ifn_index; 2420 2421 vrf = sctp_find_vrf(vrf_id); 2422 if (vrf == NULL) 2423 return (NULL); 2424 2425 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2426 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2427 sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index); 2428 /* 2429 * first question, is the ifn we will emit on in our list, if so, we 2430 * want such an address. Note that we first looked for a prefered 2431 * address. 2432 */ 2433 if (sctp_ifn) { 2434 /* is a prefered one on the interface we route out? */ 2435 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2436 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2437 continue; 2438 pass = sctp_is_ifa_addr_prefered(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2439 if (pass == NULL) 2440 continue; 2441 if (sctp_is_addr_in_ep(inp, pass)) { 2442 atomic_add_int(&pass->refcount, 1); 2443 return (pass); 2444 } 2445 } 2446 } 2447 /* 2448 * ok, now we now need to find one on the list of the addresses. We 2449 * can't get one on the emitting interface so lets find first a 2450 * prefered one. If not that a acceptable one otherwise... we return 2451 * NULL. 2452 */ 2453 starting_point = inp->next_addr_touse; 2454 once_again: 2455 if (inp->next_addr_touse == NULL) { 2456 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2457 resettotop = 1; 2458 } 2459 for (laddr = inp->next_addr_touse; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2460 if (laddr->ifa == NULL) { 2461 /* address has been removed */ 2462 continue; 2463 } 2464 pass = sctp_is_ifa_addr_prefered(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2465 if (pass == NULL) 2466 continue; 2467 atomic_add_int(&pass->refcount, 1); 2468 return (pass); 2469 } 2470 if (resettotop == 0) { 2471 inp->next_addr_touse = NULL; 2472 goto once_again; 2473 } 2474 inp->next_addr_touse = starting_point; 2475 resettotop = 0; 2476 once_again_too: 2477 if (inp->next_addr_touse == NULL) { 2478 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2479 resettotop = 1; 2480 } 2481 /* ok, what about an acceptable address in the inp */ 2482 for (laddr = inp->next_addr_touse; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2483 if (laddr->ifa == NULL) { 2484 /* address has been removed */ 2485 continue; 2486 } 2487 pass = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2488 if (pass == NULL) 2489 continue; 2490 atomic_add_int(&pass->refcount, 1); 2491 return (pass); 2492 } 2493 if (resettotop == 0) { 2494 inp->next_addr_touse = NULL; 2495 goto once_again_too; 2496 } 2497 /* 2498 * no address bound can be a source for the destination we are in 2499 * trouble 2500 */ 2501 return (NULL); 2502 } 2503 2504 2505 2506 static struct sctp_ifa * 2507 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, 2508 struct sctp_tcb *stcb, 2509 struct sctp_nets *net, 2510 struct route *ro, 2511 uint32_t vrf_id, 2512 uint8_t dest_is_priv, 2513 uint8_t dest_is_loop, 2514 int non_asoc_addr_ok, 2515 sa_family_t fam) 2516 { 2517 struct sctp_laddr *laddr, *starting_point; 2518 void *ifn; 2519 struct sctp_ifn *sctp_ifn; 2520 struct sctp_ifa *sctp_ifa, *pass; 2521 uint8_t start_at_beginning = 0; 2522 struct sctp_vrf *vrf; 2523 uint32_t ifn_index; 2524 2525 /* 2526 * first question, is the ifn we will emit on in our list, if so, we 2527 * want that one. 2528 */ 2529 vrf = sctp_find_vrf(vrf_id); 2530 if (vrf == NULL) 2531 return (NULL); 2532 2533 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2534 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2535 sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index); 2536 2537 /* 2538 * first question, is the ifn we will emit on in our list, if so, we 2539 * want that one.. First we look for a prefered. Second we go for an 2540 * acceptable. 2541 */ 2542 if (sctp_ifn) { 2543 /* first try for an prefered address on the ep */ 2544 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2545 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2546 continue; 2547 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2548 pass = sctp_is_ifa_addr_prefered(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2549 if (pass == NULL) 2550 continue; 2551 if ((non_asoc_addr_ok == 0) && 2552 (sctp_is_addr_restricted(stcb, pass))) { 2553 /* on the no-no list */ 2554 continue; 2555 } 2556 atomic_add_int(&pass->refcount, 1); 2557 return (pass); 2558 } 2559 } 2560 /* next try for an acceptable address on the ep */ 2561 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2562 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2563 continue; 2564 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2565 pass = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2566 if (pass == NULL) 2567 continue; 2568 if ((non_asoc_addr_ok == 0) && 2569 (sctp_is_addr_restricted(stcb, pass))) { 2570 /* on the no-no list */ 2571 continue; 2572 } 2573 atomic_add_int(&pass->refcount, 1); 2574 return (pass); 2575 } 2576 } 2577 2578 } 2579 /* 2580 * if we can't find one like that then we must look at all addresses 2581 * bound to pick one at first prefereable then secondly acceptable. 2582 */ 2583 starting_point = stcb->asoc.last_used_address; 2584 sctp_from_the_top: 2585 if (stcb->asoc.last_used_address == NULL) { 2586 start_at_beginning = 1; 2587 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2588 } 2589 /* search beginning with the last used address */ 2590 for (laddr = stcb->asoc.last_used_address; laddr; 2591 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2592 if (laddr->ifa == NULL) { 2593 /* address has been removed */ 2594 continue; 2595 } 2596 pass = sctp_is_ifa_addr_prefered(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2597 if (pass == NULL) 2598 continue; 2599 if ((non_asoc_addr_ok == 0) && 2600 (sctp_is_addr_restricted(stcb, pass))) { 2601 /* on the no-no list */ 2602 continue; 2603 } 2604 stcb->asoc.last_used_address = laddr; 2605 atomic_add_int(&pass->refcount, 1); 2606 return (pass); 2607 2608 } 2609 if (start_at_beginning == 0) { 2610 stcb->asoc.last_used_address = NULL; 2611 goto sctp_from_the_top; 2612 } 2613 /* now try for any higher scope than the destination */ 2614 stcb->asoc.last_used_address = starting_point; 2615 start_at_beginning = 0; 2616 sctp_from_the_top2: 2617 if (stcb->asoc.last_used_address == NULL) { 2618 start_at_beginning = 1; 2619 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2620 } 2621 /* search beginning with the last used address */ 2622 for (laddr = stcb->asoc.last_used_address; laddr; 2623 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2624 if (laddr->ifa == NULL) { 2625 /* address has been removed */ 2626 continue; 2627 } 2628 pass = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2629 if (pass == NULL) 2630 continue; 2631 if ((non_asoc_addr_ok == 0) && 2632 (sctp_is_addr_restricted(stcb, pass))) { 2633 /* on the no-no list */ 2634 continue; 2635 } 2636 stcb->asoc.last_used_address = laddr; 2637 atomic_add_int(&pass->refcount, 1); 2638 return (pass); 2639 } 2640 if (start_at_beginning == 0) { 2641 stcb->asoc.last_used_address = NULL; 2642 goto sctp_from_the_top2; 2643 } 2644 return (NULL); 2645 } 2646 2647 static struct sctp_ifa * 2648 sctp_select_nth_prefered_addr_from_ifn_boundall(struct sctp_ifn *ifn, 2649 struct sctp_tcb *stcb, 2650 int non_asoc_addr_ok, 2651 uint8_t dest_is_loop, 2652 uint8_t dest_is_priv, 2653 int addr_wanted, 2654 sa_family_t fam) 2655 { 2656 struct sctp_ifa *ifa, *pass; 2657 int num_eligible_addr = 0; 2658 2659 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2660 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2661 continue; 2662 pass = sctp_is_ifa_addr_prefered(ifa, dest_is_loop, dest_is_priv, fam); 2663 if (pass == NULL) 2664 continue; 2665 if (stcb) { 2666 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, pass)) { 2667 /* 2668 * It is restricted for some reason.. 2669 * probably not yet added. 2670 */ 2671 continue; 2672 } 2673 } 2674 if (num_eligible_addr >= addr_wanted) { 2675 return (pass); 2676 } 2677 num_eligible_addr++; 2678 } 2679 return (NULL); 2680 } 2681 2682 2683 static int 2684 sctp_count_num_prefered_boundall(struct sctp_ifn *ifn, 2685 struct sctp_tcb *stcb, 2686 int non_asoc_addr_ok, 2687 uint8_t dest_is_loop, 2688 uint8_t dest_is_priv, 2689 sa_family_t fam) 2690 { 2691 struct sctp_ifa *ifa, *pass; 2692 int num_eligible_addr = 0; 2693 2694 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2695 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) { 2696 continue; 2697 } 2698 pass = sctp_is_ifa_addr_prefered(ifa, dest_is_loop, dest_is_priv, fam); 2699 if (pass == NULL) { 2700 continue; 2701 } 2702 if (stcb) { 2703 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, pass)) { 2704 /* 2705 * It is restricted for some reason.. 2706 * probably not yet added. 2707 */ 2708 continue; 2709 } 2710 } 2711 num_eligible_addr++; 2712 } 2713 return (num_eligible_addr); 2714 } 2715 2716 static struct sctp_ifa * 2717 sctp_choose_boundall(struct sctp_inpcb *inp, 2718 struct sctp_tcb *stcb, 2719 struct sctp_nets *net, 2720 struct route *ro, 2721 uint32_t vrf_id, 2722 uint8_t dest_is_priv, 2723 uint8_t dest_is_loop, 2724 int non_asoc_addr_ok, 2725 sa_family_t fam) 2726 { 2727 int cur_addr_num = 0, num_prefered = 0; 2728 void *ifn; 2729 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; 2730 struct sctp_ifa *sctp_ifa, *pass; 2731 uint32_t ifn_index; 2732 struct sctp_vrf *vrf; 2733 2734 /* 2735 * For boundall we can use any address in the association. If 2736 * non_asoc_addr_ok is set we can use any address (at least in 2737 * theory). So we look for prefered addresses first. If we find one, 2738 * we use it. Otherwise we next try to get an address on the 2739 * interface, which we should be able to do (unless non_asoc_addr_ok 2740 * is false and we are routed out that way). In these cases where we 2741 * can't use the address of the interface we go through all the 2742 * ifn's looking for an address we can use and fill that in. Punting 2743 * means we send back address 0, which will probably cause problems 2744 * actually since then IP will fill in the address of the route ifn, 2745 * which means we probably already rejected it.. i.e. here comes an 2746 * abort :-<. 2747 */ 2748 vrf = sctp_find_vrf(vrf_id); 2749 if (vrf == NULL) 2750 return (NULL); 2751 2752 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2753 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2754 2755 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index); 2756 if (sctp_ifn == NULL) { 2757 /* ?? We don't have this guy ?? */ 2758 goto bound_all_plan_b; 2759 } 2760 if (net) { 2761 cur_addr_num = net->indx_of_eligible_next_to_use; 2762 } 2763 num_prefered = sctp_count_num_prefered_boundall(sctp_ifn, 2764 stcb, 2765 non_asoc_addr_ok, 2766 dest_is_loop, 2767 dest_is_priv, fam); 2768 #ifdef SCTP_DEBUG 2769 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2770 printf("Found %d prefered source addresses\n", num_prefered); 2771 } 2772 #endif 2773 if (num_prefered == 0) { 2774 /* 2775 * no eligible addresses, we must use some other interface 2776 * address if we can find one. 2777 */ 2778 goto bound_all_plan_b; 2779 } 2780 /* 2781 * Ok we have num_eligible_addr set with how many we can use, this 2782 * may vary from call to call due to addresses being deprecated 2783 * etc.. 2784 */ 2785 if (cur_addr_num >= num_prefered) { 2786 cur_addr_num = 0; 2787 } 2788 /* 2789 * select the nth address from the list (where cur_addr_num is the 2790 * nth) and 0 is the first one, 1 is the second one etc... 2791 */ 2792 #ifdef SCTP_DEBUG 2793 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2794 printf("cur_addr_num:%d\n", cur_addr_num); 2795 } 2796 #endif 2797 sctp_ifa = sctp_select_nth_prefered_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 2798 dest_is_priv, cur_addr_num, fam); 2799 2800 /* if sctp_ifa is NULL something changed??, fall to plan b. */ 2801 if (sctp_ifa) { 2802 atomic_add_int(&sctp_ifa->refcount, 1); 2803 if (net) { 2804 /* save off where the next one we will want */ 2805 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 2806 } 2807 return (sctp_ifa); 2808 } 2809 /* 2810 * plan_b: Look at all interfaces and find a prefered address. If no 2811 * prefered fall through to plan_c. 2812 */ 2813 bound_all_plan_b: 2814 #ifdef SCTP_DEBUG 2815 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2816 printf("Plan B?\n"); 2817 } 2818 #endif 2819 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 2820 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 2821 /* wrong base scope */ 2822 continue; 2823 } 2824 if ((sctp_ifn == looked_at) && looked_at) 2825 /* already looked at this guy */ 2826 continue; 2827 num_prefered = sctp_count_num_prefered_boundall(sctp_ifn, stcb, non_asoc_addr_ok, 2828 dest_is_loop, dest_is_priv, fam); 2829 #ifdef SCTP_DEBUG 2830 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2831 printf("Found ifn:%p %d prefered source addresses\n", ifn, num_prefered); 2832 } 2833 #endif 2834 if (num_prefered == 0) { 2835 /* 2836 * None on this interface. 2837 */ 2838 continue; 2839 } 2840 #ifdef SCTP_DEBUG 2841 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2842 printf("num prefered:%d on interface:%p cur_addr_num:%d\n", 2843 num_prefered, 2844 sctp_ifn, 2845 cur_addr_num); 2846 } 2847 #endif 2848 2849 /* 2850 * Ok we have num_eligible_addr set with how many we can 2851 * use, this may vary from call to call due to addresses 2852 * being deprecated etc.. 2853 */ 2854 if (cur_addr_num >= num_prefered) { 2855 cur_addr_num = 0; 2856 } 2857 pass = sctp_select_nth_prefered_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 2858 dest_is_priv, cur_addr_num, fam); 2859 if (pass == NULL) 2860 continue; 2861 if (net) { 2862 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 2863 #ifdef SCTP_DEBUG 2864 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2865 printf("we selected %d\n", cur_addr_num); 2866 printf("Source:"); 2867 sctp_print_address(&pass->address.sa); 2868 printf("Dest:"); 2869 sctp_print_address(&net->ro._l_addr.sa); 2870 } 2871 #endif 2872 } 2873 atomic_add_int(&pass->refcount, 1); 2874 return (pass); 2875 2876 } 2877 2878 /* 2879 * plan_c: See if we have an acceptable address on the emit 2880 * interface 2881 */ 2882 #ifdef SCTP_DEBUG 2883 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2884 printf("Plan C no prefered for Dest, acceptable for?\n"); 2885 } 2886 #endif 2887 2888 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { 2889 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2890 continue; 2891 pass = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2892 if (pass == NULL) 2893 continue; 2894 if (stcb) { 2895 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, pass)) { 2896 /* 2897 * It is restricted for some reason.. 2898 * probably not yet added. 2899 */ 2900 continue; 2901 } 2902 } 2903 atomic_add_int(&pass->refcount, 1); 2904 return (pass); 2905 } 2906 2907 /* 2908 * plan_d: We are in trouble. No prefered address on the emit 2909 * interface. And not even a perfered address on all interfaces. Go 2910 * out and see if we can find an acceptable address somewhere 2911 * amongst all interfaces. 2912 */ 2913 #ifdef SCTP_DEBUG 2914 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 2915 printf("Plan C fails plan D?\n"); 2916 } 2917 #endif 2918 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 2919 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 2920 /* wrong base scope */ 2921 continue; 2922 } 2923 if ((sctp_ifn == looked_at) && looked_at) 2924 /* already looked at this guy */ 2925 continue; 2926 2927 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2928 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2929 continue; 2930 pass = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2931 if (pass == NULL) 2932 continue; 2933 if (stcb) { 2934 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, pass)) { 2935 /* 2936 * It is restricted for some 2937 * reason.. probably not yet added. 2938 */ 2939 continue; 2940 } 2941 } 2942 atomic_add_int(&pass->refcount, 1); 2943 return (pass); 2944 } 2945 } 2946 /* 2947 * Ok we can find NO address to source from that is not on our 2948 * negative list and non_asoc_address is NOT ok, or its on our 2949 * negative list. We cant source to it :-( 2950 */ 2951 return (NULL); 2952 } 2953 2954 2955 2956 /* tcb may be NULL */ 2957 struct sctp_ifa * 2958 sctp_source_address_selection(struct sctp_inpcb *inp, 2959 struct sctp_tcb *stcb, 2960 struct route *ro, 2961 struct sctp_nets *net, 2962 int non_asoc_addr_ok, uint32_t vrf_id) 2963 { 2964 2965 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; 2966 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; 2967 struct sctp_ifa *answer; 2968 uint8_t dest_is_priv, dest_is_loop; 2969 int did_rtalloc = 0; 2970 sa_family_t fam; 2971 2972 /* 2973 * Rules: - Find the route if needed, cache if I can. - Look at 2974 * interface address in route, Is it in the bound list. If so we 2975 * have the best source. - If not we must rotate amongst the 2976 * addresses. 2977 * 2978 * Cavets and issues 2979 * 2980 * Do we need to pay attention to scope. We can have a private address 2981 * or a global address we are sourcing or sending to. So if we draw 2982 * it out zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 2983 * For V4 ------------------------------------------ source * 2984 * dest * result ----------------------------------------- <a> 2985 * Private * Global * NAT 2986 * ----------------------------------------- <b> Private * 2987 * Private * No problem ----------------------------------------- 2988 * <c> Global * Private * Huh, How will this work? 2989 * ----------------------------------------- <d> Global * 2990 * Global * No Problem ------------------------------------------ 2991 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz For V6 2992 * ------------------------------------------ source * dest * 2993 * result ----------------------------------------- <a> Linklocal * 2994 * Global * ----------------------------------------- <b> 2995 * Linklocal * Linklocal * No problem 2996 * ----------------------------------------- <c> Global * 2997 * Linklocal * Huh, How will this work? 2998 * ----------------------------------------- <d> Global * 2999 * Global * No Problem ------------------------------------------ 3000 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3001 * 3002 * And then we add to that what happens if there are multiple addresses 3003 * assigned to an interface. Remember the ifa on a ifn is a linked 3004 * list of addresses. So one interface can have more than one IP 3005 * address. What happens if we have both a private and a global 3006 * address? Do we then use context of destination to sort out which 3007 * one is best? And what about NAT's sending P->G may get you a NAT 3008 * translation, or should you select the G thats on the interface in 3009 * preference. 3010 * 3011 * Decisions: 3012 * 3013 * - count the number of addresses on the interface. - if its one, no 3014 * problem except case <c>. For <a> we will assume a NAT out there. 3015 * - if there are more than one, then we need to worry about scope P 3016 * or G. We should prefer G -> G and P -> P if possible. Then as a 3017 * secondary fall back to mixed types G->P being a last ditch one. - 3018 * The above all works for bound all, but bound specific we need to 3019 * use the same concept but instead only consider the bound 3020 * addresses. If the bound set is NOT assigned to the interface then 3021 * we must use rotation amongst the bound addresses.. 3022 * 3023 */ 3024 if (ro->ro_rt == NULL) { 3025 /* 3026 * Need a route to cache. 3027 * 3028 */ 3029 rtalloc_ign(ro, 0UL); 3030 did_rtalloc = 1; 3031 } 3032 if (ro->ro_rt == NULL) { 3033 return (NULL); 3034 } 3035 fam = to->sin_family; 3036 dest_is_priv = dest_is_loop = 0; 3037 /* Setup our scopes for the destination */ 3038 if (fam == AF_INET) { 3039 /* Scope based on outbound address */ 3040 if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { 3041 dest_is_priv = 1; 3042 } else if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { 3043 dest_is_loop = 1; 3044 if (net != NULL) { 3045 /* mark it as local */ 3046 net->addr_is_local = 1; 3047 } 3048 } 3049 } else if (fam == AF_INET6) { 3050 /* Scope based on outbound address */ 3051 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { 3052 /* 3053 * If the route goes to the loopback address OR the 3054 * address is a loopback address, we are loopback 3055 * scope. But we don't use dest_is_priv (link local 3056 * addresses). 3057 */ 3058 dest_is_loop = 1; 3059 if (net != NULL) { 3060 /* mark it as local */ 3061 net->addr_is_local = 1; 3062 } 3063 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { 3064 dest_is_priv = 1; 3065 } 3066 } 3067 #ifdef SCTP_DEBUG 3068 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 3069 printf("Select source for:"); 3070 sctp_print_address((struct sockaddr *)to); 3071 } 3072 #endif 3073 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3074 /* 3075 * When bound to all if the address list is set it is a 3076 * negative list. Addresses being added by asconf. 3077 */ 3078 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id, 3079 dest_is_priv, 3080 dest_is_loop, 3081 non_asoc_addr_ok, 3082 fam); 3083 return (answer); 3084 } 3085 /* 3086 * Three possiblities here: 3087 * 3088 * a) stcb is NULL, which means we operate only from the list of 3089 * addresses (ifa's) bound to the endpoint and we care not about the 3090 * list. b) stcb is NOT-NULL, which means we have an assoc structure 3091 * and auto-asconf is on. This means that the list of addresses is a 3092 * NOT list. We use the list from the inp, but any listed address in 3093 * our list is NOT yet added. However if the non_asoc_addr_ok is set 3094 * we CAN use an address NOT available (i.e. being added). Its a 3095 * negative list. c) stcb is NOT-NULL, which means we have an assoc 3096 * structure and auto-asconf is off. This means that the list of 3097 * addresses is the ONLY addresses I can use.. its positive. 3098 * 3099 * Note we collapse b & c into the same function just like in the v6 3100 * address selection. 3101 */ 3102 if (stcb) { 3103 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro, vrf_id, 3104 dest_is_priv, dest_is_loop, non_asoc_addr_ok, fam); 3105 3106 } else { 3107 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, non_asoc_addr_ok, dest_is_priv, dest_is_loop, fam); 3108 3109 } 3110 return (answer); 3111 } 3112 3113 static int 3114 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize) 3115 { 3116 struct cmsghdr cmh; 3117 int tlen, at; 3118 3119 tlen = SCTP_BUF_LEN(control); 3120 at = 0; 3121 /* 3122 * Independent of how many mbufs, find the c_type inside the control 3123 * structure and copy out the data. 3124 */ 3125 while (at < tlen) { 3126 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 3127 /* not enough room for one more we are done. */ 3128 return (0); 3129 } 3130 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 3131 if ((cmh.cmsg_len + at) > tlen) { 3132 /* 3133 * this is real messed up since there is not enough 3134 * data here to cover the cmsg header. We are done. 3135 */ 3136 return (0); 3137 } 3138 if ((cmh.cmsg_level == IPPROTO_SCTP) && 3139 (c_type == cmh.cmsg_type)) { 3140 /* found the one we want, copy it out */ 3141 at += CMSG_ALIGN(sizeof(struct cmsghdr)); 3142 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) { 3143 /* 3144 * space of cmsg_len after header not big 3145 * enough 3146 */ 3147 return (0); 3148 } 3149 m_copydata(control, at, cpsize, data); 3150 return (1); 3151 } else { 3152 at += CMSG_ALIGN(cmh.cmsg_len); 3153 if (cmh.cmsg_len == 0) { 3154 break; 3155 } 3156 } 3157 } 3158 /* not found */ 3159 return (0); 3160 } 3161 3162 3163 struct mbuf * 3164 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, 3165 int how, int allonebuf, int type) 3166 { 3167 struct mbuf *m = NULL; 3168 int aloc_size; 3169 int index = 0; 3170 int mbuf_threshold; 3171 3172 if (want_header) { 3173 MGETHDR(m, how, type); 3174 } else { 3175 MGET(m, how, type); 3176 } 3177 if (m == NULL) { 3178 return (NULL); 3179 } 3180 if (allonebuf == 0) 3181 mbuf_threshold = sctp_mbuf_threshold_count; 3182 else 3183 mbuf_threshold = 1; 3184 3185 3186 if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) { 3187 try_again: 3188 index = 4; 3189 if (space_needed <= MCLBYTES) { 3190 aloc_size = MCLBYTES; 3191 } else if (space_needed <= MJUMPAGESIZE) { 3192 aloc_size = MJUMPAGESIZE; 3193 index = 5; 3194 } else if (space_needed <= MJUM9BYTES) { 3195 aloc_size = MJUM9BYTES; 3196 index = 6; 3197 } else { 3198 aloc_size = MJUM16BYTES; 3199 index = 7; 3200 } 3201 m_cljget(m, how, aloc_size); 3202 if (m == NULL) { 3203 return (NULL); 3204 } 3205 if (SCTP_BUF_IS_EXTENDED(m) == 0) { 3206 if ((aloc_size != MCLBYTES) && 3207 (allonebuf == 0)) { 3208 aloc_size -= 10; 3209 goto try_again; 3210 } 3211 sctp_m_freem(m); 3212 return (NULL); 3213 } 3214 } 3215 SCTP_BUF_LEN(m) = 0; 3216 SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL; 3217 #ifdef SCTP_MBUF_LOGGING 3218 if (SCTP_BUF_IS_EXTENDED(m)) { 3219 sctp_log_mb(m, SCTP_MBUF_IALLOC); 3220 } 3221 #endif 3222 return (m); 3223 } 3224 3225 3226 static struct mbuf * 3227 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset, 3228 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in) 3229 { 3230 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 3231 struct sctp_state_cookie *stc; 3232 struct sctp_paramhdr *ph; 3233 uint8_t *signature; 3234 int sig_offset; 3235 uint16_t cookie_sz; 3236 3237 mret = NULL; 3238 3239 3240 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 3241 sizeof(struct sctp_paramhdr)), 0, M_DONTWAIT, 1, MT_DATA); 3242 if (mret == NULL) { 3243 return (NULL); 3244 } 3245 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT); 3246 if (copy_init == NULL) { 3247 sctp_m_freem(mret); 3248 return (NULL); 3249 } 3250 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 3251 M_DONTWAIT); 3252 if (copy_initack == NULL) { 3253 sctp_m_freem(mret); 3254 sctp_m_freem(copy_init); 3255 return (NULL); 3256 } 3257 /* easy side we just drop it on the end */ 3258 ph = mtod(mret, struct sctp_paramhdr *); 3259 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 3260 sizeof(struct sctp_paramhdr); 3261 stc = (struct sctp_state_cookie *)((caddr_t)ph + 3262 sizeof(struct sctp_paramhdr)); 3263 ph->param_type = htons(SCTP_STATE_COOKIE); 3264 ph->param_length = 0; /* fill in at the end */ 3265 /* Fill in the stc cookie data */ 3266 *stc = *stc_in; 3267 3268 /* tack the INIT and then the INIT-ACK onto the chain */ 3269 cookie_sz = 0; 3270 m_at = mret; 3271 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3272 cookie_sz += SCTP_BUF_LEN(m_at); 3273 if (SCTP_BUF_NEXT(m_at) == NULL) { 3274 SCTP_BUF_NEXT(m_at) = copy_init; 3275 break; 3276 } 3277 } 3278 3279 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3280 cookie_sz += SCTP_BUF_LEN(m_at); 3281 if (SCTP_BUF_NEXT(m_at) == NULL) { 3282 SCTP_BUF_NEXT(m_at) = copy_initack; 3283 break; 3284 } 3285 } 3286 3287 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3288 cookie_sz += SCTP_BUF_LEN(m_at); 3289 if (SCTP_BUF_NEXT(m_at) == NULL) { 3290 break; 3291 } 3292 } 3293 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA); 3294 if (sig == NULL) { 3295 /* no space, so free the entire chain */ 3296 sctp_m_freem(mret); 3297 return (NULL); 3298 } 3299 SCTP_BUF_LEN(sig) = 0; 3300 SCTP_BUF_NEXT(m_at) = sig; 3301 sig_offset = 0; 3302 signature = (uint8_t *) (mtod(sig, caddr_t)+sig_offset); 3303 /* Time to sign the cookie */ 3304 sctp_hmac_m(SCTP_HMAC, 3305 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 3306 SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr), 3307 (uint8_t *) signature); 3308 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; 3309 cookie_sz += SCTP_SIGNATURE_SIZE; 3310 3311 ph->param_length = htons(cookie_sz); 3312 return (mret); 3313 } 3314 3315 3316 static __inline uint8_t 3317 sctp_get_ect(struct sctp_tcb *stcb, 3318 struct sctp_tmit_chunk *chk) 3319 { 3320 uint8_t this_random; 3321 3322 /* Huh? */ 3323 if (sctp_ecn_enable == 0) 3324 return (0); 3325 3326 if (sctp_ecn_nonce == 0) 3327 /* no nonce, always return ECT0 */ 3328 return (SCTP_ECT0_BIT); 3329 3330 if (stcb->asoc.peer_supports_ecn_nonce == 0) { 3331 /* Peer does NOT support it, so we send a ECT0 only */ 3332 return (SCTP_ECT0_BIT); 3333 } 3334 if (chk == NULL) 3335 return (SCTP_ECT0_BIT); 3336 3337 if (((stcb->asoc.hb_random_idx == 3) && 3338 (stcb->asoc.hb_ect_randombit > 7)) || 3339 (stcb->asoc.hb_random_idx > 3)) { 3340 uint32_t rndval; 3341 3342 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 3343 memcpy(stcb->asoc.hb_random_values, &rndval, 3344 sizeof(stcb->asoc.hb_random_values)); 3345 this_random = stcb->asoc.hb_random_values[0]; 3346 stcb->asoc.hb_random_idx = 0; 3347 stcb->asoc.hb_ect_randombit = 0; 3348 } else { 3349 if (stcb->asoc.hb_ect_randombit > 7) { 3350 stcb->asoc.hb_ect_randombit = 0; 3351 stcb->asoc.hb_random_idx++; 3352 } 3353 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 3354 } 3355 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) { 3356 if (chk != NULL) 3357 /* ECN Nonce stuff */ 3358 chk->rec.data.ect_nonce = SCTP_ECT1_BIT; 3359 stcb->asoc.hb_ect_randombit++; 3360 return (SCTP_ECT1_BIT); 3361 } else { 3362 stcb->asoc.hb_ect_randombit++; 3363 return (SCTP_ECT0_BIT); 3364 } 3365 } 3366 3367 static int 3368 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 3369 struct sctp_tcb *stcb, /* may be NULL */ 3370 struct sctp_nets *net, 3371 struct sockaddr *to, 3372 struct mbuf *m, 3373 uint32_t auth_offset, 3374 struct sctp_auth_chunk *auth, 3375 int nofragment_flag, 3376 int ecn_ok, 3377 struct sctp_tmit_chunk *chk, 3378 int out_of_asoc_ok) 3379 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 3380 { 3381 /* 3382 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet 3383 * header WITH a SCTPHDR but no IP header, endpoint inp and sa 3384 * structure. - fill in the HMAC digest of any AUTH chunk in the 3385 * packet - calculate SCTP checksum and fill in - prepend a IP 3386 * address header - if boundall use INADDR_ANY - if boundspecific do 3387 * source address selection - set fragmentation option for ipV4 - On 3388 * return from IP output, check/adjust mtu size - of output 3389 * interface and smallest_mtu size as well. 3390 */ 3391 /* Will need ifdefs around this */ 3392 struct mbuf *o_pak; 3393 3394 struct sctphdr *sctphdr; 3395 int packet_length; 3396 int o_flgs; 3397 uint32_t csum; 3398 int ret; 3399 unsigned int have_mtu; 3400 uint32_t vrf_id; 3401 struct route *ro; 3402 3403 3404 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 3405 sctp_m_freem(m); 3406 return (EFAULT); 3407 } 3408 if (stcb == NULL) { 3409 vrf_id = SCTP_DEFAULT_VRFID; 3410 } else { 3411 vrf_id = stcb->asoc.vrf_id; 3412 } 3413 3414 /* fill in the HMAC digest for any AUTH chunk in the packet */ 3415 if ((auth != NULL) && (stcb != NULL)) { 3416 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb); 3417 } 3418 /* Calculate the csum and fill in the length of the packet */ 3419 sctphdr = mtod(m, struct sctphdr *); 3420 have_mtu = 0; 3421 if (sctp_no_csum_on_loopback && 3422 (stcb) && 3423 (stcb->asoc.loopback_scope)) { 3424 sctphdr->checksum = 0; 3425 /* 3426 * This can probably now be taken out since my audit shows 3427 * no more bad pktlen's coming in. But we will wait a while 3428 * yet. 3429 */ 3430 packet_length = sctp_calculate_len(m); 3431 } else { 3432 sctphdr->checksum = 0; 3433 csum = sctp_calculate_sum(m, &packet_length, 0); 3434 sctphdr->checksum = csum; 3435 } 3436 3437 if (to->sa_family == AF_INET) { 3438 struct ip *ip = NULL; 3439 struct route iproute; 3440 uint8_t tos_value; 3441 3442 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip)); 3443 if (o_pak == NULL) { 3444 /* failed to prepend data, give up */ 3445 sctp_m_freem(m); 3446 return (ENOMEM); 3447 } 3448 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip); 3449 packet_length += sizeof(struct ip); 3450 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 3451 ip = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *); 3452 ip->ip_v = IPVERSION; 3453 ip->ip_hl = (sizeof(struct ip) >> 2); 3454 if (net) { 3455 tos_value = net->tos_flowlabel & 0x000000ff; 3456 } else { 3457 tos_value = inp->ip_inp.inp.inp_ip_tos; 3458 } 3459 if (nofragment_flag) { 3460 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__) 3461 ip->ip_off = IP_DF; 3462 #else 3463 ip->ip_off = htons(IP_DF); 3464 #endif 3465 } else 3466 ip->ip_off = 0; 3467 3468 3469 /* FreeBSD has a function for ip_id's */ 3470 ip->ip_id = ip_newid(); 3471 3472 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 3473 ip->ip_len = SCTP_HEADER_LEN(o_pak); 3474 if (stcb) { 3475 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 3476 /* Enable ECN */ 3477 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk)); 3478 } else { 3479 /* No ECN */ 3480 ip->ip_tos = (u_char)(tos_value & 0xfc); 3481 } 3482 } else { 3483 /* no association at all */ 3484 ip->ip_tos = (tos_value & 0xfc); 3485 } 3486 ip->ip_p = IPPROTO_SCTP; 3487 ip->ip_sum = 0; 3488 if (net == NULL) { 3489 ro = &iproute; 3490 memset(&iproute, 0, sizeof(iproute)); 3491 memcpy(&ro->ro_dst, to, to->sa_len); 3492 } else { 3493 ro = (struct route *)&net->ro; 3494 } 3495 /* Now the address selection part */ 3496 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 3497 3498 /* call the routine to select the src address */ 3499 if (net) { 3500 if (net->src_addr_selected == 0) { 3501 /* Cache the source address */ 3502 net->ro._s_addr = sctp_source_address_selection(inp, stcb, 3503 ro, net, out_of_asoc_ok, vrf_id); 3504 if (net->ro._s_addr == NULL) { 3505 /* No route to host */ 3506 goto no_route; 3507 } 3508 net->src_addr_selected = 1; 3509 } 3510 ip->ip_src = net->ro._s_addr->address.sin.sin_addr; 3511 } else { 3512 struct sctp_ifa *_lsrc; 3513 3514 _lsrc = sctp_source_address_selection(inp, 3515 stcb, ro, net, out_of_asoc_ok, vrf_id); 3516 if (_lsrc == NULL) { 3517 goto no_route; 3518 } 3519 ip->ip_src = _lsrc->address.sin.sin_addr; 3520 sctp_free_ifa(_lsrc); 3521 } 3522 3523 /* 3524 * If source address selection fails and we find no route 3525 * then the ip_output should fail as well with a 3526 * NO_ROUTE_TO_HOST type error. We probably should catch 3527 * that somewhere and abort the association right away 3528 * (assuming this is an INIT being sent). 3529 */ 3530 if ((ro->ro_rt == NULL)) { 3531 /* 3532 * src addr selection failed to find a route (or 3533 * valid source addr), so we can't get there from 3534 * here (yet)! 3535 */ 3536 no_route: 3537 #ifdef SCTP_DEBUG 3538 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 3539 printf("low_level_output: dropped packet - no valid source addr\n"); 3540 if (net) { 3541 printf("Destination was "); 3542 sctp_print_address(&net->ro._l_addr.sa); 3543 } 3544 } 3545 #endif /* SCTP_DEBUG */ 3546 if (net) { 3547 if (net->dest_state & SCTP_ADDR_CONFIRMED) { 3548 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { 3549 printf("no route takes interface %p down\n", net); 3550 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 3551 stcb, 3552 SCTP_FAILED_THRESHOLD, 3553 (void *)net); 3554 net->dest_state &= ~SCTP_ADDR_REACHABLE; 3555 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 3556 } 3557 } 3558 if (stcb) { 3559 if (net == stcb->asoc.primary_destination) { 3560 /* need a new primary */ 3561 struct sctp_nets *alt; 3562 3563 alt = sctp_find_alternate_net(stcb, net, 0); 3564 if (alt != net) { 3565 if (sctp_set_primary_addr(stcb, 3566 (struct sockaddr *)NULL, 3567 alt) == 0) { 3568 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 3569 if (net->ro._s_addr) { 3570 sctp_free_ifa(net->ro._s_addr); 3571 net->ro._s_addr = NULL; 3572 } 3573 net->src_addr_selected = 0; 3574 } 3575 } 3576 } 3577 } 3578 } 3579 sctp_m_freem(o_pak); 3580 return (EHOSTUNREACH); 3581 } else { 3582 have_mtu = ro->ro_rt->rt_ifp->if_mtu; 3583 } 3584 if (inp->sctp_socket) { 3585 o_flgs = (IP_RAWOUTPUT | (inp->sctp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST))); 3586 } else { 3587 o_flgs = IP_RAWOUTPUT; 3588 } 3589 #ifdef SCTP_DEBUG 3590 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 3591 printf("Calling ipv4 output routine from low level src addr:%x\n", 3592 (uint32_t) (ntohl(ip->ip_src.s_addr))); 3593 printf("Destination is %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr))); 3594 printf("RTP route is %p through\n", ro->ro_rt); 3595 } 3596 #endif 3597 3598 if ((have_mtu) && (net) && (have_mtu > net->mtu)) { 3599 ro->ro_rt->rt_ifp->if_mtu = net->mtu; 3600 } 3601 if (ro != &iproute) { 3602 memcpy(&iproute, ro, sizeof(*ro)); 3603 } 3604 ret = ip_output(o_pak, inp->ip_inp.inp.inp_options, 3605 ro, o_flgs, inp->ip_inp.inp.inp_moptions 3606 ,(struct inpcb *)NULL 3607 ); 3608 if ((ro->ro_rt) && (have_mtu) && (net) && (have_mtu > net->mtu)) { 3609 ro->ro_rt->rt_ifp->if_mtu = have_mtu; 3610 } 3611 SCTP_STAT_INCR(sctps_sendpackets); 3612 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 3613 if (ret) 3614 SCTP_STAT_INCR(sctps_senderrors); 3615 #ifdef SCTP_DEBUG 3616 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 3617 printf("Ip output returns %d\n", ret); 3618 } 3619 #endif 3620 if (net == NULL) { 3621 /* free tempy routes */ 3622 if (ro->ro_rt) { 3623 RTFREE(ro->ro_rt); 3624 ro->ro_rt = NULL; 3625 } 3626 } else { 3627 /* PMTU check versus smallest asoc MTU goes here */ 3628 if (ro->ro_rt != NULL) { 3629 if (ro->ro_rt->rt_rmx.rmx_mtu && 3630 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) { 3631 sctp_mtu_size_reset(inp, &stcb->asoc, 3632 ro->ro_rt->rt_rmx.rmx_mtu); 3633 } 3634 } else { 3635 /* route was freed */ 3636 if (net->ro._s_addr && 3637 net->src_addr_selected) { 3638 sctp_free_ifa(net->ro._s_addr); 3639 net->ro._s_addr = NULL; 3640 } 3641 net->src_addr_selected = 0; 3642 } 3643 } 3644 return (ret); 3645 } 3646 #ifdef INET6 3647 else if (to->sa_family == AF_INET6) { 3648 uint32_t flowlabel; 3649 struct ip6_hdr *ip6h; 3650 3651 struct route_in6 ip6route; 3652 struct ifnet *ifp; 3653 u_char flowTop; 3654 uint16_t flowBottom; 3655 u_char tosBottom, tosTop; 3656 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 3657 struct sockaddr_in6 lsa6_storage; 3658 int prev_scope = 0; 3659 int error; 3660 u_short prev_port = 0; 3661 3662 if (net != NULL) { 3663 flowlabel = net->tos_flowlabel; 3664 } else { 3665 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 3666 } 3667 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr)); 3668 if (o_pak == NULL) { 3669 /* failed to prepend data, give up */ 3670 sctp_m_freem(m); 3671 return (ENOMEM); 3672 } 3673 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr); 3674 packet_length += sizeof(struct ip6_hdr); 3675 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 3676 ip6h = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *); 3677 /* 3678 * We assume here that inp_flow is in host byte order within 3679 * the TCB! 3680 */ 3681 flowBottom = flowlabel & 0x0000ffff; 3682 flowTop = ((flowlabel & 0x000f0000) >> 16); 3683 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION); 3684 /* protect *sin6 from overwrite */ 3685 sin6 = (struct sockaddr_in6 *)to; 3686 tmp = *sin6; 3687 sin6 = &tmp; 3688 3689 /* KAME hack: embed scopeid */ 3690 if (sa6_embedscope(sin6, ip6_use_defzone) != 0) 3691 return (EINVAL); 3692 if (net == NULL) { 3693 memset(&ip6route, 0, sizeof(ip6route)); 3694 ro = (struct route *)&ip6route; 3695 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 3696 } else { 3697 ro = (struct route *)&net->ro; 3698 } 3699 if (stcb != NULL) { 3700 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 3701 /* Enable ECN */ 3702 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4); 3703 } else { 3704 /* No ECN */ 3705 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 3706 } 3707 } else { 3708 /* we could get no asoc if it is a O-O-T-B packet */ 3709 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 3710 } 3711 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom)); 3712 ip6h->ip6_nxt = IPPROTO_SCTP; 3713 ip6h->ip6_plen = (SCTP_HEADER_LEN(o_pak) - sizeof(struct ip6_hdr)); 3714 ip6h->ip6_dst = sin6->sin6_addr; 3715 3716 /* 3717 * Add SRC address selection here: we can only reuse to a 3718 * limited degree the kame src-addr-sel, since we can try 3719 * their selection but it may not be bound. 3720 */ 3721 bzero(&lsa6_tmp, sizeof(lsa6_tmp)); 3722 lsa6_tmp.sin6_family = AF_INET6; 3723 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 3724 lsa6 = &lsa6_tmp; 3725 if (net) { 3726 if (net->src_addr_selected == 0) { 3727 /* Cache the source address */ 3728 net->ro._s_addr = sctp_source_address_selection(inp, 3729 stcb, 3730 ro, 3731 net, 3732 out_of_asoc_ok, 3733 vrf_id); 3734 if (net->ro._s_addr == NULL) { 3735 #ifdef SCTP_DEBUG 3736 printf("V6:No route to host\n"); 3737 #endif 3738 goto no_route; 3739 } 3740 net->src_addr_selected = 1; 3741 } 3742 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; 3743 } else { 3744 struct sctp_ifa *_lsrc; 3745 3746 _lsrc = sctp_source_address_selection(inp, stcb, ro, net, out_of_asoc_ok, vrf_id); 3747 if (_lsrc == NULL) { 3748 goto no_route; 3749 } 3750 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; 3751 sctp_free_ifa(_lsrc); 3752 } 3753 lsa6->sin6_port = inp->sctp_lport; 3754 3755 if ((ro->ro_rt == NULL)) { 3756 /* 3757 * src addr selection failed to find a route (or 3758 * valid source addr), so we can't get there from 3759 * here! 3760 */ 3761 goto no_route; 3762 } 3763 /* 3764 * XXX: sa6 may not have a valid sin6_scope_id in the 3765 * non-SCOPEDROUTING case. 3766 */ 3767 bzero(&lsa6_storage, sizeof(lsa6_storage)); 3768 lsa6_storage.sin6_family = AF_INET6; 3769 lsa6_storage.sin6_len = sizeof(lsa6_storage); 3770 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 3771 sctp_m_freem(o_pak); 3772 return (error); 3773 } 3774 /* XXX */ 3775 lsa6_storage.sin6_addr = lsa6->sin6_addr; 3776 lsa6_storage.sin6_port = inp->sctp_lport; 3777 lsa6 = &lsa6_storage; 3778 ip6h->ip6_src = lsa6->sin6_addr; 3779 3780 /* 3781 * We set the hop limit now since there is a good chance 3782 * that our ro pointer is now filled 3783 */ 3784 ip6h->ip6_hlim = in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp, 3785 (ro ? 3786 (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) : 3787 (NULL))); 3788 o_flgs = 0; 3789 ifp = ro->ro_rt->rt_ifp; 3790 #ifdef SCTP_DEBUG 3791 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 3792 /* Copy to be sure something bad is not happening */ 3793 sin6->sin6_addr = ip6h->ip6_dst; 3794 lsa6->sin6_addr = ip6h->ip6_src; 3795 3796 printf("Calling ipv6 output routine from low level\n"); 3797 printf("src: "); 3798 sctp_print_address((struct sockaddr *)lsa6); 3799 printf("dst: "); 3800 sctp_print_address((struct sockaddr *)sin6); 3801 } 3802 #endif /* SCTP_DEBUG */ 3803 if (net) { 3804 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 3805 /* preserve the port and scope for link local send */ 3806 prev_scope = sin6->sin6_scope_id; 3807 prev_port = sin6->sin6_port; 3808 } 3809 ret = ip6_output(o_pak, ((struct in6pcb *)inp)->in6p_outputopts, 3810 (struct route_in6 *)ro, 3811 o_flgs, 3812 ((struct in6pcb *)inp)->in6p_moptions, 3813 &ifp 3814 ,NULL 3815 ); 3816 if (net) { 3817 /* for link local this must be done */ 3818 sin6->sin6_scope_id = prev_scope; 3819 sin6->sin6_port = prev_port; 3820 } 3821 #ifdef SCTP_DEBUG 3822 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 3823 printf("return from send is %d\n", ret); 3824 } 3825 #endif /* SCTP_DEBUG_OUTPUT */ 3826 SCTP_STAT_INCR(sctps_sendpackets); 3827 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 3828 if (ret) { 3829 SCTP_STAT_INCR(sctps_senderrors); 3830 } 3831 if (net == NULL) { 3832 /* Now if we had a temp route free it */ 3833 if (ro->ro_rt) { 3834 RTFREE(ro->ro_rt); 3835 } 3836 } else { 3837 /* PMTU check versus smallest asoc MTU goes here */ 3838 if (ro->ro_rt == NULL) { 3839 /* Route was freed */ 3840 3841 if (net->ro._s_addr && 3842 net->src_addr_selected) { 3843 sctp_free_ifa(net->ro._s_addr); 3844 net->ro._s_addr = NULL; 3845 } 3846 net->src_addr_selected = 0; 3847 } 3848 if (ro->ro_rt != NULL) { 3849 if (ro->ro_rt->rt_rmx.rmx_mtu && 3850 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) { 3851 sctp_mtu_size_reset(inp, 3852 &stcb->asoc, 3853 ro->ro_rt->rt_rmx.rmx_mtu); 3854 } 3855 } else if (ifp) { 3856 if (ND_IFINFO(ifp)->linkmtu && 3857 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 3858 sctp_mtu_size_reset(inp, 3859 &stcb->asoc, 3860 ND_IFINFO(ifp)->linkmtu); 3861 } 3862 } 3863 } 3864 return (ret); 3865 } 3866 #endif 3867 else { 3868 #ifdef SCTP_DEBUG 3869 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 3870 printf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family); 3871 } 3872 #endif 3873 sctp_m_freem(m); 3874 return (EFAULT); 3875 } 3876 } 3877 3878 3879 void 3880 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 3881 { 3882 struct mbuf *m, *m_at, *m_last; 3883 struct sctp_nets *net; 3884 struct sctp_init_msg *initm; 3885 struct sctp_supported_addr_param *sup_addr; 3886 struct sctp_ecn_supported_param *ecn; 3887 struct sctp_prsctp_supported_param *prsctp; 3888 struct sctp_ecn_nonce_supported_param *ecn_nonce; 3889 struct sctp_supported_chunk_types_param *pr_supported; 3890 int cnt_inits_to = 0; 3891 int padval, ret; 3892 int num_ext; 3893 int p_len; 3894 3895 /* INIT's always go to the primary (and usually ONLY address) */ 3896 m_last = NULL; 3897 net = stcb->asoc.primary_destination; 3898 if (net == NULL) { 3899 net = TAILQ_FIRST(&stcb->asoc.nets); 3900 if (net == NULL) { 3901 /* TSNH */ 3902 return; 3903 } 3904 /* we confirm any address we send an INIT to */ 3905 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 3906 sctp_set_primary_addr(stcb, NULL, net); 3907 } else { 3908 /* we confirm any address we send an INIT to */ 3909 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 3910 } 3911 #ifdef SCTP_DEBUG 3912 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 3913 printf("Sending INIT\n"); 3914 } 3915 #endif 3916 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) { 3917 /* 3918 * special hook, if we are sending to link local it will not 3919 * show up in our private address count. 3920 */ 3921 struct sockaddr_in6 *sin6l; 3922 3923 sin6l = &net->ro._l_addr.sin6; 3924 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr)) 3925 cnt_inits_to = 1; 3926 } 3927 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 3928 /* This case should not happen */ 3929 return; 3930 } 3931 /* start the INIT timer */ 3932 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net)) { 3933 /* we are hosed since I can't start the INIT timer? */ 3934 return; 3935 } 3936 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA); 3937 if (m == NULL) { 3938 /* No memory, INIT timer will re-attempt. */ 3939 return; 3940 } 3941 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 3942 /* Now lets put the SCTP header in place */ 3943 initm = mtod(m, struct sctp_init_msg *); 3944 initm->sh.src_port = inp->sctp_lport; 3945 initm->sh.dest_port = stcb->rport; 3946 initm->sh.v_tag = 0; 3947 initm->sh.checksum = 0; /* calculate later */ 3948 /* now the chunk header */ 3949 initm->msg.ch.chunk_type = SCTP_INITIATION; 3950 initm->msg.ch.chunk_flags = 0; 3951 /* fill in later from mbuf we build */ 3952 initm->msg.ch.chunk_length = 0; 3953 /* place in my tag */ 3954 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag); 3955 /* set up some of the credits. */ 3956 initm->msg.init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), 3957 SCTP_MINIMAL_RWND)); 3958 3959 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 3960 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 3961 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number); 3962 /* now the address restriction */ 3963 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm + 3964 sizeof(*initm)); 3965 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 3966 /* we support 2 types IPv6/IPv4 */ 3967 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + 3968 sizeof(uint16_t)); 3969 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS); 3970 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS); 3971 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t); 3972 3973 if (inp->sctp_ep.adaptation_layer_indicator) { 3974 struct sctp_adaptation_layer_indication *ali; 3975 3976 ali = (struct sctp_adaptation_layer_indication *)( 3977 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t)); 3978 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 3979 ali->ph.param_length = htons(sizeof(*ali)); 3980 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 3981 SCTP_BUF_LEN(m) += sizeof(*ali); 3982 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 3983 sizeof(*ali)); 3984 } else { 3985 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr + 3986 sizeof(*sup_addr) + sizeof(uint16_t)); 3987 } 3988 3989 /* now any cookie time extensions */ 3990 if (stcb->asoc.cookie_preserve_req) { 3991 struct sctp_cookie_perserve_param *cookie_preserve; 3992 3993 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn); 3994 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 3995 cookie_preserve->ph.param_length = htons( 3996 sizeof(*cookie_preserve)); 3997 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 3998 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve); 3999 ecn = (struct sctp_ecn_supported_param *)( 4000 (caddr_t)cookie_preserve + sizeof(*cookie_preserve)); 4001 stcb->asoc.cookie_preserve_req = 0; 4002 } 4003 /* ECN parameter */ 4004 if (sctp_ecn_enable == 1) { 4005 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 4006 ecn->ph.param_length = htons(sizeof(*ecn)); 4007 SCTP_BUF_LEN(m) += sizeof(*ecn); 4008 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 4009 sizeof(*ecn)); 4010 } else { 4011 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 4012 } 4013 /* And now tell the peer we do pr-sctp */ 4014 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 4015 prsctp->ph.param_length = htons(sizeof(*prsctp)); 4016 SCTP_BUF_LEN(m) += sizeof(*prsctp); 4017 4018 /* And now tell the peer we do all the extensions */ 4019 pr_supported = (struct sctp_supported_chunk_types_param *) 4020 ((caddr_t)prsctp + sizeof(*prsctp)); 4021 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 4022 num_ext = 0; 4023 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 4024 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 4025 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 4026 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 4027 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 4028 if (!sctp_auth_disable) 4029 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 4030 p_len = sizeof(*pr_supported) + num_ext; 4031 pr_supported->ph.param_length = htons(p_len); 4032 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 4033 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4034 4035 /* ECN nonce: And now tell the peer we support ECN nonce */ 4036 if (sctp_ecn_nonce) { 4037 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 4038 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 4039 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 4040 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 4041 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 4042 } 4043 /* add authentication parameters */ 4044 if (!sctp_auth_disable) { 4045 struct sctp_auth_random *random; 4046 struct sctp_auth_hmac_algo *hmacs; 4047 struct sctp_auth_chunk_list *chunks; 4048 4049 /* attach RANDOM parameter, if available */ 4050 if (stcb->asoc.authinfo.random != NULL) { 4051 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4052 p_len = sizeof(*random) + stcb->asoc.authinfo.random_len; 4053 #ifdef SCTP_AUTH_DRAFT_04 4054 random->ph.param_type = htons(SCTP_RANDOM); 4055 random->ph.param_length = htons(p_len); 4056 bcopy(stcb->asoc.authinfo.random->key, 4057 random->random_data, 4058 stcb->asoc.authinfo.random_len); 4059 #else 4060 /* random key already contains the header */ 4061 bcopy(stcb->asoc.authinfo.random->key, random, p_len); 4062 #endif 4063 /* zero out any padding required */ 4064 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len); 4065 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4066 } 4067 /* add HMAC_ALGO parameter */ 4068 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4069 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs, 4070 (uint8_t *) hmacs->hmac_ids); 4071 if (p_len > 0) { 4072 p_len += sizeof(*hmacs); 4073 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 4074 hmacs->ph.param_length = htons(p_len); 4075 /* zero out any padding required */ 4076 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 4077 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4078 } 4079 /* add CHUNKS parameter */ 4080 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4081 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, 4082 chunks->chunk_types); 4083 if (p_len > 0) { 4084 p_len += sizeof(*chunks); 4085 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 4086 chunks->ph.param_length = htons(p_len); 4087 /* zero out any padding required */ 4088 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 4089 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4090 } 4091 } 4092 m_at = m; 4093 /* now the addresses */ 4094 { 4095 struct sctp_scoping scp; 4096 4097 /* 4098 * To optimize this we could put the scoping stuff into a 4099 * structure and remove the individual uint8's from the 4100 * assoc structure. Then we could just pass in the address 4101 * within the stcb.. but for now this is a quick hack to get 4102 * the address stuff teased apart. 4103 */ 4104 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal; 4105 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal; 4106 scp.loopback_scope = stcb->asoc.loopback_scope; 4107 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope; 4108 scp.local_scope = stcb->asoc.local_scope; 4109 scp.site_scope = stcb->asoc.site_scope; 4110 4111 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 4112 } 4113 4114 4115 /* calulate the size and update pkt header and chunk header */ 4116 p_len = 0; 4117 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 4118 if (SCTP_BUF_NEXT(m_at) == NULL) 4119 m_last = m_at; 4120 p_len += SCTP_BUF_LEN(m_at); 4121 } 4122 initm->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr))); 4123 /* 4124 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return 4125 * here since the timer will drive a retranmission. 4126 */ 4127 4128 /* I don't expect this to execute but we will be safe here */ 4129 padval = p_len % 4; 4130 if ((padval) && (m_last)) { 4131 /* 4132 * The compiler worries that m_last may not be set even 4133 * though I think it is impossible :-> however we add m_last 4134 * here just in case. 4135 */ 4136 int ret; 4137 4138 ret = sctp_add_pad_tombuf(m_last, (4 - padval)); 4139 if (ret) { 4140 /* Houston we have a problem, no space */ 4141 sctp_m_freem(m); 4142 return; 4143 } 4144 p_len += padval; 4145 } 4146 ret = sctp_lowlevel_chunk_output(inp, stcb, net, 4147 (struct sockaddr *)&net->ro._l_addr, 4148 m, 0, NULL, 0, 0, NULL, 0); 4149 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 4150 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 4151 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 4152 } 4153 4154 struct mbuf * 4155 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 4156 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp) 4157 { 4158 /* 4159 * Given a mbuf containing an INIT or INIT-ACK with the param_offset 4160 * being equal to the beginning of the params i.e. (iphlen + 4161 * sizeof(struct sctp_init_msg) parse through the parameters to the 4162 * end of the mbuf verifying that all parameters are known. 4163 * 4164 * For unknown parameters build and return a mbuf with 4165 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 4166 * processing this chunk stop, and set *abort_processing to 1. 4167 * 4168 * By having param_offset be pre-set to where parameters begin it is 4169 * hoped that this routine may be reused in the future by new 4170 * features. 4171 */ 4172 struct sctp_paramhdr *phdr, params; 4173 4174 struct mbuf *mat, *op_err; 4175 char tempbuf[SCTP_CHUNK_BUFFER_SIZE]; 4176 int at, limit, pad_needed; 4177 uint16_t ptype, plen; 4178 int err_at; 4179 4180 *abort_processing = 0; 4181 mat = in_initpkt; 4182 err_at = 0; 4183 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 4184 at = param_offset; 4185 op_err = NULL; 4186 4187 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 4188 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 4189 ptype = ntohs(phdr->param_type); 4190 plen = ntohs(phdr->param_length); 4191 limit -= SCTP_SIZE32(plen); 4192 if (plen < sizeof(struct sctp_paramhdr)) { 4193 #ifdef SCTP_DEBUG 4194 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 4195 printf("sctp_output.c:Impossible length in parameter < %d\n", plen); 4196 } 4197 #endif 4198 *abort_processing = 1; 4199 break; 4200 } 4201 /* 4202 * All parameters for all chunks that we know/understand are 4203 * listed here. We process them other places and make 4204 * appropriate stop actions per the upper bits. However this 4205 * is the generic routine processor's can call to get back 4206 * an operr.. to either incorporate (init-ack) or send. 4207 */ 4208 if ((ptype == SCTP_HEARTBEAT_INFO) || 4209 (ptype == SCTP_IPV4_ADDRESS) || 4210 (ptype == SCTP_IPV6_ADDRESS) || 4211 (ptype == SCTP_STATE_COOKIE) || 4212 (ptype == SCTP_UNRECOG_PARAM) || 4213 (ptype == SCTP_COOKIE_PRESERVE) || 4214 (ptype == SCTP_SUPPORTED_ADDRTYPE) || 4215 (ptype == SCTP_PRSCTP_SUPPORTED) || 4216 (ptype == SCTP_ADD_IP_ADDRESS) || 4217 (ptype == SCTP_DEL_IP_ADDRESS) || 4218 (ptype == SCTP_ECN_CAPABLE) || 4219 (ptype == SCTP_ULP_ADAPTATION) || 4220 (ptype == SCTP_ERROR_CAUSE_IND) || 4221 (ptype == SCTP_RANDOM) || 4222 (ptype == SCTP_CHUNK_LIST) || 4223 (ptype == SCTP_CHUNK_LIST) || 4224 (ptype == SCTP_SET_PRIM_ADDR) || 4225 (ptype == SCTP_SUCCESS_REPORT) || 4226 (ptype == SCTP_ULP_ADAPTATION) || 4227 (ptype == SCTP_SUPPORTED_CHUNK_EXT) || 4228 (ptype == SCTP_ECN_NONCE_SUPPORTED) 4229 ) { 4230 /* no skip it */ 4231 at += SCTP_SIZE32(plen); 4232 } else if (ptype == SCTP_HOSTNAME_ADDRESS) { 4233 /* We can NOT handle HOST NAME addresses!! */ 4234 int l_len; 4235 4236 #ifdef SCTP_DEBUG 4237 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 4238 printf("Can't handle hostname addresses.. abort processing\n"); 4239 } 4240 #endif 4241 *abort_processing = 1; 4242 if (op_err == NULL) { 4243 /* Ok need to try to get a mbuf */ 4244 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4245 l_len += plen; 4246 l_len += sizeof(struct sctp_paramhdr); 4247 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4248 if (op_err) { 4249 SCTP_BUF_LEN(op_err) = 0; 4250 /* 4251 * pre-reserve space for ip and sctp 4252 * header and chunk hdr 4253 */ 4254 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4255 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4256 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4257 } 4258 } 4259 if (op_err) { 4260 /* If we have space */ 4261 struct sctp_paramhdr s; 4262 4263 if (err_at % 4) { 4264 uint32_t cpthis = 0; 4265 4266 pad_needed = 4 - (err_at % 4); 4267 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4268 err_at += pad_needed; 4269 } 4270 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 4271 s.param_length = htons(sizeof(s) + plen); 4272 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4273 err_at += sizeof(s); 4274 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen); 4275 if (phdr == NULL) { 4276 sctp_m_freem(op_err); 4277 /* 4278 * we are out of memory but we still 4279 * need to have a look at what to do 4280 * (the system is in trouble 4281 * though). 4282 */ 4283 return (NULL); 4284 } 4285 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 4286 err_at += plen; 4287 } 4288 return (op_err); 4289 } else { 4290 /* 4291 * we do not recognize the parameter figure out what 4292 * we do. 4293 */ 4294 if ((ptype & 0x4000) == 0x4000) { 4295 /* Report bit is set?? */ 4296 if (op_err == NULL) { 4297 int l_len; 4298 4299 /* Ok need to try to get an mbuf */ 4300 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4301 l_len += plen; 4302 l_len += sizeof(struct sctp_paramhdr); 4303 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4304 if (op_err) { 4305 SCTP_BUF_LEN(op_err) = 0; 4306 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4307 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4308 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4309 } 4310 } 4311 if (op_err) { 4312 /* If we have space */ 4313 struct sctp_paramhdr s; 4314 4315 if (err_at % 4) { 4316 uint32_t cpthis = 0; 4317 4318 pad_needed = 4 - (err_at % 4); 4319 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4320 err_at += pad_needed; 4321 } 4322 s.param_type = htons(SCTP_UNRECOG_PARAM); 4323 s.param_length = htons(sizeof(s) + plen); 4324 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4325 err_at += sizeof(s); 4326 if (plen > sizeof(tempbuf)) { 4327 plen = sizeof(tempbuf); 4328 } 4329 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen); 4330 if (phdr == NULL) { 4331 sctp_m_freem(op_err); 4332 /* 4333 * we are out of memory but 4334 * we still need to have a 4335 * look at what to do (the 4336 * system is in trouble 4337 * though). 4338 */ 4339 goto more_processing; 4340 } 4341 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 4342 err_at += plen; 4343 } 4344 } 4345 more_processing: 4346 if ((ptype & 0x8000) == 0x0000) { 4347 return (op_err); 4348 } else { 4349 /* skip this chunk and continue processing */ 4350 at += SCTP_SIZE32(plen); 4351 } 4352 4353 } 4354 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 4355 } 4356 return (op_err); 4357 } 4358 4359 static int 4360 sctp_are_there_new_addresses(struct sctp_association *asoc, 4361 struct mbuf *in_initpkt, int iphlen, int offset) 4362 { 4363 /* 4364 * Given a INIT packet, look through the packet to verify that there 4365 * are NO new addresses. As we go through the parameters add reports 4366 * of any un-understood parameters that require an error. Also we 4367 * must return (1) to drop the packet if we see a un-understood 4368 * parameter that tells us to drop the chunk. 4369 */ 4370 struct sockaddr_in sin4, *sa4; 4371 struct sockaddr_in6 sin6, *sa6; 4372 struct sockaddr *sa_touse; 4373 struct sockaddr *sa; 4374 struct sctp_paramhdr *phdr, params; 4375 struct ip *iph; 4376 struct mbuf *mat; 4377 uint16_t ptype, plen; 4378 int err_at; 4379 uint8_t fnd; 4380 struct sctp_nets *net; 4381 4382 memset(&sin4, 0, sizeof(sin4)); 4383 memset(&sin6, 0, sizeof(sin6)); 4384 sin4.sin_family = AF_INET; 4385 sin4.sin_len = sizeof(sin4); 4386 sin6.sin6_family = AF_INET6; 4387 sin6.sin6_len = sizeof(sin6); 4388 4389 sa_touse = NULL; 4390 /* First what about the src address of the pkt ? */ 4391 iph = mtod(in_initpkt, struct ip *); 4392 if (iph->ip_v == IPVERSION) { 4393 /* source addr is IPv4 */ 4394 sin4.sin_addr = iph->ip_src; 4395 sa_touse = (struct sockaddr *)&sin4; 4396 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 4397 /* source addr is IPv6 */ 4398 struct ip6_hdr *ip6h; 4399 4400 ip6h = mtod(in_initpkt, struct ip6_hdr *); 4401 sin6.sin6_addr = ip6h->ip6_src; 4402 sa_touse = (struct sockaddr *)&sin6; 4403 } else { 4404 return (1); 4405 } 4406 4407 fnd = 0; 4408 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4409 sa = (struct sockaddr *)&net->ro._l_addr; 4410 if (sa->sa_family == sa_touse->sa_family) { 4411 if (sa->sa_family == AF_INET) { 4412 sa4 = (struct sockaddr_in *)sa; 4413 if (sa4->sin_addr.s_addr == 4414 sin4.sin_addr.s_addr) { 4415 fnd = 1; 4416 break; 4417 } 4418 } else if (sa->sa_family == AF_INET6) { 4419 sa6 = (struct sockaddr_in6 *)sa; 4420 if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr, 4421 &sin6.sin6_addr)) { 4422 fnd = 1; 4423 break; 4424 } 4425 } 4426 } 4427 } 4428 if (fnd == 0) { 4429 /* New address added! no need to look futher. */ 4430 return (1); 4431 } 4432 /* Ok so far lets munge through the rest of the packet */ 4433 mat = in_initpkt; 4434 err_at = 0; 4435 sa_touse = NULL; 4436 offset += sizeof(struct sctp_init_chunk); 4437 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 4438 while (phdr) { 4439 ptype = ntohs(phdr->param_type); 4440 plen = ntohs(phdr->param_length); 4441 if (ptype == SCTP_IPV4_ADDRESS) { 4442 struct sctp_ipv4addr_param *p4, p4_buf; 4443 4444 phdr = sctp_get_next_param(mat, offset, 4445 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 4446 if (plen != sizeof(struct sctp_ipv4addr_param) || 4447 phdr == NULL) { 4448 return (1); 4449 } 4450 p4 = (struct sctp_ipv4addr_param *)phdr; 4451 sin4.sin_addr.s_addr = p4->addr; 4452 sa_touse = (struct sockaddr *)&sin4; 4453 } else if (ptype == SCTP_IPV6_ADDRESS) { 4454 struct sctp_ipv6addr_param *p6, p6_buf; 4455 4456 phdr = sctp_get_next_param(mat, offset, 4457 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 4458 if (plen != sizeof(struct sctp_ipv6addr_param) || 4459 phdr == NULL) { 4460 return (1); 4461 } 4462 p6 = (struct sctp_ipv6addr_param *)phdr; 4463 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 4464 sizeof(p6->addr)); 4465 sa_touse = (struct sockaddr *)&sin4; 4466 } 4467 if (sa_touse) { 4468 /* ok, sa_touse points to one to check */ 4469 fnd = 0; 4470 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4471 sa = (struct sockaddr *)&net->ro._l_addr; 4472 if (sa->sa_family != sa_touse->sa_family) { 4473 continue; 4474 } 4475 if (sa->sa_family == AF_INET) { 4476 sa4 = (struct sockaddr_in *)sa; 4477 if (sa4->sin_addr.s_addr == 4478 sin4.sin_addr.s_addr) { 4479 fnd = 1; 4480 break; 4481 } 4482 } else if (sa->sa_family == AF_INET6) { 4483 sa6 = (struct sockaddr_in6 *)sa; 4484 if (SCTP6_ARE_ADDR_EQUAL( 4485 &sa6->sin6_addr, &sin6.sin6_addr)) { 4486 fnd = 1; 4487 break; 4488 } 4489 } 4490 } 4491 if (!fnd) { 4492 /* New addr added! no need to look further */ 4493 return (1); 4494 } 4495 } 4496 offset += SCTP_SIZE32(plen); 4497 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 4498 } 4499 return (0); 4500 } 4501 4502 /* 4503 * Given a MBUF chain that was sent into us containing an INIT. Build a 4504 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 4505 * a pullup to include IPv6/4header, SCTP header and initial part of INIT 4506 * message (i.e. the struct sctp_init_msg). 4507 */ 4508 void 4509 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4510 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh, 4511 struct sctp_init_chunk *init_chk) 4512 { 4513 struct sctp_association *asoc; 4514 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *m_last; 4515 struct sctp_init_msg *initackm_out; 4516 struct sctp_ecn_supported_param *ecn; 4517 struct sctp_prsctp_supported_param *prsctp; 4518 struct sctp_ecn_nonce_supported_param *ecn_nonce; 4519 struct sctp_supported_chunk_types_param *pr_supported; 4520 struct sockaddr_storage store; 4521 struct sockaddr_in *sin; 4522 struct sockaddr_in6 *sin6; 4523 struct route *ro; 4524 struct ip *iph; 4525 struct ip6_hdr *ip6; 4526 struct sockaddr *to; 4527 struct sctp_state_cookie stc; 4528 struct sctp_nets *net = NULL; 4529 int cnt_inits_to = 0; 4530 uint16_t his_limit, i_want; 4531 int abort_flag, padval, sz_of; 4532 int num_ext; 4533 int p_len; 4534 uint32_t vrf_id; 4535 4536 vrf_id = SCTP_DEFAULT_VRFID; 4537 if (stcb) { 4538 asoc = &stcb->asoc; 4539 } else { 4540 asoc = NULL; 4541 } 4542 m_last = NULL; 4543 if ((asoc != NULL) && 4544 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && 4545 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) { 4546 /* new addresses, out of here in non-cookie-wait states */ 4547 /* 4548 * Send a ABORT, we don't add the new address error clause 4549 * though we even set the T bit and copy in the 0 tag.. this 4550 * looks no different than if no listener was present. 4551 */ 4552 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL); 4553 return; 4554 } 4555 abort_flag = 0; 4556 op_err = sctp_arethere_unrecognized_parameters(init_pkt, 4557 (offset + sizeof(struct sctp_init_chunk)), 4558 &abort_flag, (struct sctp_chunkhdr *)init_chk); 4559 if (abort_flag) { 4560 sctp_send_abort(init_pkt, iphlen, sh, init_chk->init.initiate_tag, op_err); 4561 return; 4562 } 4563 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 4564 if (m == NULL) { 4565 /* No memory, INIT timer will re-attempt. */ 4566 if (op_err) 4567 sctp_m_freem(op_err); 4568 return; 4569 } 4570 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 4571 4572 /* the time I built cookie */ 4573 SCTP_GETTIME_TIMEVAL(&stc.time_entered); 4574 4575 /* populate any tie tags */ 4576 if (asoc != NULL) { 4577 /* unlock before tag selections */ 4578 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 4579 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 4580 stc.cookie_life = asoc->cookie_life; 4581 net = asoc->primary_destination; 4582 } else { 4583 stc.tie_tag_my_vtag = 0; 4584 stc.tie_tag_peer_vtag = 0; 4585 /* life I will award this cookie */ 4586 stc.cookie_life = inp->sctp_ep.def_cookie_life; 4587 } 4588 4589 /* copy in the ports for later check */ 4590 stc.myport = sh->dest_port; 4591 stc.peerport = sh->src_port; 4592 4593 /* 4594 * If we wanted to honor cookie life extentions, we would add to 4595 * stc.cookie_life. For now we should NOT honor any extension 4596 */ 4597 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 4598 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4599 struct inpcb *in_inp; 4600 4601 /* Its a V6 socket */ 4602 in_inp = (struct inpcb *)inp; 4603 stc.ipv6_addr_legal = 1; 4604 /* Now look at the binding flag to see if V4 will be legal */ 4605 if (SCTP_IPV6_V6ONLY(in_inp) == 0) { 4606 stc.ipv4_addr_legal = 1; 4607 } else { 4608 /* V4 addresses are NOT legal on the association */ 4609 stc.ipv4_addr_legal = 0; 4610 } 4611 } else { 4612 /* Its a V4 socket, no - V6 */ 4613 stc.ipv4_addr_legal = 1; 4614 stc.ipv6_addr_legal = 0; 4615 } 4616 4617 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 4618 stc.ipv4_scope = 1; 4619 #else 4620 stc.ipv4_scope = 0; 4621 #endif 4622 /* now for scope setup */ 4623 memset((caddr_t)&store, 0, sizeof(store)); 4624 sin = (struct sockaddr_in *)&store; 4625 sin6 = (struct sockaddr_in6 *)&store; 4626 if (net == NULL) { 4627 to = (struct sockaddr *)&store; 4628 iph = mtod(init_pkt, struct ip *); 4629 if (iph->ip_v == IPVERSION) { 4630 struct sctp_ifa *addr; 4631 struct route iproute; 4632 4633 sin->sin_family = AF_INET; 4634 sin->sin_len = sizeof(struct sockaddr_in); 4635 sin->sin_port = sh->src_port; 4636 sin->sin_addr = iph->ip_src; 4637 /* lookup address */ 4638 stc.address[0] = sin->sin_addr.s_addr; 4639 stc.address[1] = 0; 4640 stc.address[2] = 0; 4641 stc.address[3] = 0; 4642 stc.addr_type = SCTP_IPV4_ADDRESS; 4643 /* local from address */ 4644 memset(&iproute, 0, sizeof(iproute)); 4645 ro = &iproute; 4646 memcpy(&ro->ro_dst, sin, sizeof(*sin)); 4647 addr = sctp_source_address_selection(inp, NULL, 4648 ro, NULL, 0, vrf_id); 4649 if (addr == NULL) 4650 return; 4651 4652 if (ro->ro_rt) { 4653 RTFREE(ro->ro_rt); 4654 ro->ro_rt = NULL; 4655 } 4656 stc.laddress[0] = addr->address.sin.sin_addr.s_addr; 4657 stc.laddress[1] = 0; 4658 stc.laddress[2] = 0; 4659 stc.laddress[3] = 0; 4660 stc.laddr_type = SCTP_IPV4_ADDRESS; 4661 /* scope_id is only for v6 */ 4662 stc.scope_id = 0; 4663 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE 4664 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 4665 stc.ipv4_scope = 1; 4666 } 4667 #else 4668 stc.ipv4_scope = 1; 4669 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 4670 /* Must use the address in this case */ 4671 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) { 4672 stc.loopback_scope = 1; 4673 stc.ipv4_scope = 1; 4674 stc.site_scope = 1; 4675 stc.local_scope = 0; 4676 } 4677 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 4678 struct sctp_ifa *addr; 4679 4680 struct route_in6 iproute6; 4681 4682 ip6 = mtod(init_pkt, struct ip6_hdr *); 4683 sin6->sin6_family = AF_INET6; 4684 sin6->sin6_len = sizeof(struct sockaddr_in6); 4685 sin6->sin6_port = sh->src_port; 4686 sin6->sin6_addr = ip6->ip6_src; 4687 /* lookup address */ 4688 memcpy(&stc.address, &sin6->sin6_addr, 4689 sizeof(struct in6_addr)); 4690 sin6->sin6_scope_id = 0; 4691 stc.addr_type = SCTP_IPV6_ADDRESS; 4692 stc.scope_id = 0; 4693 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) { 4694 stc.loopback_scope = 1; 4695 stc.local_scope = 0; 4696 stc.site_scope = 1; 4697 stc.ipv4_scope = 1; 4698 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 4699 /* 4700 * If the new destination is a LINK_LOCAL we 4701 * must have common both site and local 4702 * scope. Don't set local scope though since 4703 * we must depend on the source to be added 4704 * implicitly. We cannot assure just because 4705 * we share one link that all links are 4706 * common. 4707 */ 4708 stc.local_scope = 0; 4709 stc.site_scope = 1; 4710 stc.ipv4_scope = 1; 4711 /* 4712 * we start counting for the private address 4713 * stuff at 1. since the link local we 4714 * source from won't show up in our scoped 4715 * count. 4716 */ 4717 cnt_inits_to = 1; 4718 /* pull out the scope_id from incoming pkt */ 4719 /* FIX ME: does this have scope from rcvif? */ 4720 (void)sa6_recoverscope(sin6); 4721 4722 sa6_embedscope(sin6, ip6_use_defzone); 4723 stc.scope_id = sin6->sin6_scope_id; 4724 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 4725 /* 4726 * If the new destination is SITE_LOCAL then 4727 * we must have site scope in common. 4728 */ 4729 stc.site_scope = 1; 4730 } 4731 /* local from address */ 4732 memset(&iproute6, 0, sizeof(iproute6)); 4733 ro = (struct route *)&iproute6; 4734 memcpy(&ro->ro_dst, sin6, sizeof(*sin6)); 4735 addr = sctp_source_address_selection(inp, NULL, 4736 ro, NULL, 0, vrf_id); 4737 if (addr == NULL) 4738 return; 4739 4740 if (ro->ro_rt) { 4741 RTFREE(ro->ro_rt); 4742 ro->ro_rt = NULL; 4743 } 4744 memcpy(&stc.laddress, &addr->address.sin6.sin6_addr, sizeof(struct in6_addr)); 4745 stc.laddr_type = SCTP_IPV6_ADDRESS; 4746 } 4747 } else { 4748 /* set the scope per the existing tcb */ 4749 struct sctp_nets *lnet; 4750 4751 stc.loopback_scope = asoc->loopback_scope; 4752 stc.ipv4_scope = asoc->ipv4_local_scope; 4753 stc.site_scope = asoc->site_scope; 4754 stc.local_scope = asoc->local_scope; 4755 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 4756 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 4757 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 4758 /* 4759 * if we have a LL address, start 4760 * counting at 1. 4761 */ 4762 cnt_inits_to = 1; 4763 } 4764 } 4765 } 4766 4767 /* use the net pointer */ 4768 to = (struct sockaddr *)&net->ro._l_addr; 4769 if (to->sa_family == AF_INET) { 4770 sin = (struct sockaddr_in *)to; 4771 stc.address[0] = sin->sin_addr.s_addr; 4772 stc.address[1] = 0; 4773 stc.address[2] = 0; 4774 stc.address[3] = 0; 4775 stc.addr_type = SCTP_IPV4_ADDRESS; 4776 if (net->src_addr_selected == 0) { 4777 /* 4778 * strange case here, the INIT should have 4779 * did the selection. 4780 */ 4781 net->ro._s_addr = sctp_source_address_selection(inp, 4782 stcb, (struct route *)&net->ro, 4783 net, 0, vrf_id); 4784 if (net->ro._s_addr == NULL) 4785 return; 4786 4787 net->src_addr_selected = 1; 4788 4789 } 4790 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; 4791 stc.laddress[1] = 0; 4792 stc.laddress[2] = 0; 4793 stc.laddress[3] = 0; 4794 stc.laddr_type = SCTP_IPV4_ADDRESS; 4795 } else if (to->sa_family == AF_INET6) { 4796 sin6 = (struct sockaddr_in6 *)to; 4797 memcpy(&stc.address, &sin6->sin6_addr, 4798 sizeof(struct in6_addr)); 4799 stc.addr_type = SCTP_IPV6_ADDRESS; 4800 if (net->src_addr_selected == 0) { 4801 /* 4802 * strange case here, the INIT should have 4803 * did the selection. 4804 */ 4805 net->ro._s_addr = sctp_source_address_selection(inp, 4806 stcb, (struct route *)&net->ro, 4807 net, 0, vrf_id); 4808 if (net->ro._s_addr == NULL) 4809 return; 4810 4811 net->src_addr_selected = 1; 4812 } 4813 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, 4814 sizeof(struct in6_addr)); 4815 stc.laddr_type = SCTP_IPV6_ADDRESS; 4816 } 4817 } 4818 /* Now lets put the SCTP header in place */ 4819 initackm_out = mtod(m, struct sctp_init_msg *); 4820 initackm_out->sh.src_port = inp->sctp_lport; 4821 initackm_out->sh.dest_port = sh->src_port; 4822 initackm_out->sh.v_tag = init_chk->init.initiate_tag; 4823 /* Save it off for quick ref */ 4824 stc.peers_vtag = init_chk->init.initiate_tag; 4825 initackm_out->sh.checksum = 0; /* calculate later */ 4826 /* who are we */ 4827 memcpy(stc.identification, SCTP_VERSION_STRING, 4828 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 4829 /* now the chunk header */ 4830 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK; 4831 initackm_out->msg.ch.chunk_flags = 0; 4832 /* fill in later from mbuf we build */ 4833 initackm_out->msg.ch.chunk_length = 0; 4834 /* place in my tag */ 4835 if ((asoc != NULL) && 4836 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 4837 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || 4838 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { 4839 /* re-use the v-tags and init-seq here */ 4840 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag); 4841 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number); 4842 } else { 4843 uint32_t vtag; 4844 4845 if (asoc) { 4846 atomic_add_int(&asoc->refcnt, 1); 4847 SCTP_TCB_UNLOCK(stcb); 4848 vtag = sctp_select_a_tag(inp); 4849 initackm_out->msg.init.initiate_tag = htonl(vtag); 4850 /* get a TSN to use too */ 4851 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 4852 SCTP_TCB_LOCK(stcb); 4853 atomic_add_int(&asoc->refcnt, -1); 4854 } else { 4855 vtag = sctp_select_a_tag(inp); 4856 initackm_out->msg.init.initiate_tag = htonl(vtag); 4857 /* get a TSN to use too */ 4858 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 4859 } 4860 } 4861 /* save away my tag to */ 4862 stc.my_vtag = initackm_out->msg.init.initiate_tag; 4863 4864 /* set up some of the credits. */ 4865 initackm_out->msg.init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND)); 4866 /* set what I want */ 4867 his_limit = ntohs(init_chk->init.num_inbound_streams); 4868 /* choose what I want */ 4869 if (asoc != NULL) { 4870 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { 4871 i_want = asoc->streamoutcnt; 4872 } else { 4873 i_want = inp->sctp_ep.pre_open_stream_count; 4874 } 4875 } else { 4876 i_want = inp->sctp_ep.pre_open_stream_count; 4877 } 4878 if (his_limit < i_want) { 4879 /* I Want more :< */ 4880 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams; 4881 } else { 4882 /* I can have what I want :> */ 4883 initackm_out->msg.init.num_outbound_streams = htons(i_want); 4884 } 4885 /* tell him his limt. */ 4886 initackm_out->msg.init.num_inbound_streams = 4887 htons(inp->sctp_ep.max_open_streams_intome); 4888 /* setup the ECN pointer */ 4889 4890 if (inp->sctp_ep.adaptation_layer_indicator) { 4891 struct sctp_adaptation_layer_indication *ali; 4892 4893 ali = (struct sctp_adaptation_layer_indication *)( 4894 (caddr_t)initackm_out + sizeof(*initackm_out)); 4895 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 4896 ali->ph.param_length = htons(sizeof(*ali)); 4897 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 4898 SCTP_BUF_LEN(m) += sizeof(*ali); 4899 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 4900 sizeof(*ali)); 4901 } else { 4902 ecn = (struct sctp_ecn_supported_param *)( 4903 (caddr_t)initackm_out + sizeof(*initackm_out)); 4904 } 4905 4906 /* ECN parameter */ 4907 if (sctp_ecn_enable == 1) { 4908 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 4909 ecn->ph.param_length = htons(sizeof(*ecn)); 4910 SCTP_BUF_LEN(m) += sizeof(*ecn); 4911 4912 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 4913 sizeof(*ecn)); 4914 } else { 4915 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 4916 } 4917 /* And now tell the peer we do pr-sctp */ 4918 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 4919 prsctp->ph.param_length = htons(sizeof(*prsctp)); 4920 SCTP_BUF_LEN(m) += sizeof(*prsctp); 4921 4922 /* And now tell the peer we do all the extensions */ 4923 pr_supported = (struct sctp_supported_chunk_types_param *) 4924 ((caddr_t)prsctp + sizeof(*prsctp)); 4925 4926 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 4927 num_ext = 0; 4928 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 4929 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 4930 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 4931 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 4932 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 4933 if (!sctp_auth_disable) 4934 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 4935 p_len = sizeof(*pr_supported) + num_ext; 4936 pr_supported->ph.param_length = htons(p_len); 4937 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 4938 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4939 4940 /* ECN nonce: And now tell the peer we support ECN nonce */ 4941 if (sctp_ecn_nonce) { 4942 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 4943 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 4944 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 4945 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 4946 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 4947 } 4948 /* add authentication parameters */ 4949 if (!sctp_auth_disable) { 4950 struct sctp_auth_random *random; 4951 struct sctp_auth_hmac_algo *hmacs; 4952 struct sctp_auth_chunk_list *chunks; 4953 uint16_t random_len; 4954 4955 /* generate and add RANDOM parameter */ 4956 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT; 4957 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4958 random->ph.param_type = htons(SCTP_RANDOM); 4959 p_len = sizeof(*random) + random_len; 4960 random->ph.param_length = htons(p_len); 4961 SCTP_READ_RANDOM(random->random_data, random_len); 4962 /* zero out any padding required */ 4963 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len); 4964 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4965 4966 /* add HMAC_ALGO parameter */ 4967 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4968 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 4969 (uint8_t *) hmacs->hmac_ids); 4970 if (p_len > 0) { 4971 p_len += sizeof(*hmacs); 4972 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 4973 hmacs->ph.param_length = htons(p_len); 4974 /* zero out any padding required */ 4975 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 4976 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4977 } 4978 /* add CHUNKS parameter */ 4979 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4980 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 4981 chunks->chunk_types); 4982 if (p_len > 0) { 4983 p_len += sizeof(*chunks); 4984 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 4985 chunks->ph.param_length = htons(p_len); 4986 /* zero out any padding required */ 4987 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 4988 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4989 } 4990 } 4991 m_at = m; 4992 /* now the addresses */ 4993 { 4994 struct sctp_scoping scp; 4995 4996 /* 4997 * To optimize this we could put the scoping stuff into a 4998 * structure and remove the individual uint8's from the stc 4999 * structure. Then we could just pass in the address within 5000 * the stc.. but for now this is a quick hack to get the 5001 * address stuff teased apart. 5002 */ 5003 scp.ipv4_addr_legal = stc.ipv4_addr_legal; 5004 scp.ipv6_addr_legal = stc.ipv6_addr_legal; 5005 scp.loopback_scope = stc.loopback_scope; 5006 scp.ipv4_local_scope = stc.ipv4_scope; 5007 scp.local_scope = stc.local_scope; 5008 scp.site_scope = stc.site_scope; 5009 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 5010 } 5011 5012 /* tack on the operational error if present */ 5013 if (op_err) { 5014 struct mbuf *ol; 5015 int llen; 5016 5017 llen = 0; 5018 ol = op_err; 5019 while (ol) { 5020 llen += SCTP_BUF_LEN(ol); 5021 ol = SCTP_BUF_NEXT(ol); 5022 } 5023 if (llen % 4) { 5024 /* must add a pad to the param */ 5025 uint32_t cpthis = 0; 5026 int padlen; 5027 5028 padlen = 4 - (llen % 4); 5029 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis); 5030 } 5031 while (SCTP_BUF_NEXT(m_at) != NULL) { 5032 m_at = SCTP_BUF_NEXT(m_at); 5033 } 5034 SCTP_BUF_NEXT(m_at) = op_err; 5035 while (SCTP_BUF_NEXT(m_at) != NULL) { 5036 m_at = SCTP_BUF_NEXT(m_at); 5037 } 5038 } 5039 /* Get total size of init packet */ 5040 sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length)); 5041 /* pre-calulate the size and update pkt header and chunk header */ 5042 p_len = 0; 5043 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 5044 p_len += SCTP_BUF_LEN(m_tmp); 5045 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5046 /* m_tmp should now point to last one */ 5047 break; 5048 } 5049 } 5050 /* 5051 * Figure now the size of the cookie. We know the size of the 5052 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK, 5053 * COOKIE-STRUCTURE and SIGNATURE. 5054 */ 5055 5056 /* 5057 * take our earlier INIT calc and add in the sz we just calculated 5058 * minus the size of the sctphdr (its not included in chunk size 5059 */ 5060 5061 /* add once for the INIT-ACK */ 5062 sz_of += (p_len - sizeof(struct sctphdr)); 5063 5064 /* add a second time for the INIT-ACK in the cookie */ 5065 sz_of += (p_len - sizeof(struct sctphdr)); 5066 5067 /* Now add the cookie header and cookie message struct */ 5068 sz_of += sizeof(struct sctp_state_cookie_param); 5069 /* ...and add the size of our signature */ 5070 sz_of += SCTP_SIGNATURE_SIZE; 5071 initackm_out->msg.ch.chunk_length = htons(sz_of); 5072 5073 /* Now we must build a cookie */ 5074 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 5075 sizeof(struct sctphdr), &stc); 5076 if (m_cookie == NULL) { 5077 /* memory problem */ 5078 sctp_m_freem(m); 5079 return; 5080 } 5081 /* Now append the cookie to the end and update the space/size */ 5082 SCTP_BUF_NEXT(m_tmp) = m_cookie; 5083 for (; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 5084 p_len += SCTP_BUF_LEN(m_tmp); 5085 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5086 /* m_tmp should now point to last one */ 5087 m_last = m_tmp; 5088 break; 5089 } 5090 } 5091 5092 /* 5093 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return 5094 * here since the timer will drive a retranmission. 5095 */ 5096 padval = p_len % 4; 5097 if ((padval) && (m_last)) { 5098 /* see my previous comments on m_last */ 5099 int ret; 5100 5101 ret = sctp_add_pad_tombuf(m_last, (4 - padval)); 5102 if (ret) { 5103 /* Houston we have a problem, no space */ 5104 sctp_m_freem(m); 5105 return; 5106 } 5107 p_len += padval; 5108 } 5109 sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 5110 NULL, 0); 5111 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5112 } 5113 5114 5115 void 5116 sctp_insert_on_wheel(struct sctp_tcb *stcb, 5117 struct sctp_association *asoc, 5118 struct sctp_stream_out *strq, int holds_lock) 5119 { 5120 struct sctp_stream_out *stre, *strn; 5121 5122 if (holds_lock == 0) 5123 SCTP_TCB_SEND_LOCK(stcb); 5124 if ((strq->next_spoke.tqe_next) || 5125 (strq->next_spoke.tqe_prev)) { 5126 /* already on wheel */ 5127 goto outof_here; 5128 } 5129 stre = TAILQ_FIRST(&asoc->out_wheel); 5130 if (stre == NULL) { 5131 /* only one on wheel */ 5132 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke); 5133 goto outof_here; 5134 } 5135 for (; stre; stre = strn) { 5136 strn = TAILQ_NEXT(stre, next_spoke); 5137 if (stre->stream_no > strq->stream_no) { 5138 TAILQ_INSERT_BEFORE(stre, strq, next_spoke); 5139 goto outof_here; 5140 } else if (stre->stream_no == strq->stream_no) { 5141 /* huh, should not happen */ 5142 goto outof_here; 5143 } else if (strn == NULL) { 5144 /* next one is null */ 5145 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq, 5146 next_spoke); 5147 } 5148 } 5149 outof_here: 5150 if (holds_lock == 0) 5151 SCTP_TCB_SEND_UNLOCK(stcb); 5152 5153 5154 } 5155 5156 static void 5157 sctp_remove_from_wheel(struct sctp_tcb *stcb, 5158 struct sctp_association *asoc, 5159 struct sctp_stream_out *strq) 5160 { 5161 /* take off and then setup so we know it is not on the wheel */ 5162 SCTP_TCB_SEND_LOCK(stcb); 5163 if (TAILQ_FIRST(&strq->outqueue)) { 5164 /* more was added */ 5165 SCTP_TCB_SEND_UNLOCK(stcb); 5166 return; 5167 } 5168 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke); 5169 strq->next_spoke.tqe_next = NULL; 5170 strq->next_spoke.tqe_prev = NULL; 5171 SCTP_TCB_SEND_UNLOCK(stcb); 5172 } 5173 5174 static void 5175 sctp_prune_prsctp(struct sctp_tcb *stcb, 5176 struct sctp_association *asoc, 5177 struct sctp_sndrcvinfo *srcv, 5178 int dataout) 5179 { 5180 int freed_spc = 0; 5181 struct sctp_tmit_chunk *chk, *nchk; 5182 5183 SCTP_TCB_LOCK_ASSERT(stcb); 5184 if ((asoc->peer_supports_prsctp) && 5185 (asoc->sent_queue_cnt_removeable > 0)) { 5186 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 5187 /* 5188 * Look for chunks marked with the PR_SCTP flag AND 5189 * the buffer space flag. If the one being sent is 5190 * equal or greater priority then purge the old one 5191 * and free some space. 5192 */ 5193 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 5194 /* 5195 * This one is PR-SCTP AND buffer space 5196 * limited type 5197 */ 5198 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 5199 /* 5200 * Lower numbers equates to higher 5201 * priority so if the one we are 5202 * looking at has a larger or equal 5203 * priority we want to drop the data 5204 * and NOT retransmit it. 5205 */ 5206 if (chk->data) { 5207 /* 5208 * We release the book_size 5209 * if the mbuf is here 5210 */ 5211 int ret_spc; 5212 int cause; 5213 5214 if (chk->sent > SCTP_DATAGRAM_UNSENT) 5215 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT; 5216 else 5217 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT; 5218 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 5219 cause, 5220 &asoc->sent_queue); 5221 freed_spc += ret_spc; 5222 if (freed_spc >= dataout) { 5223 return; 5224 } 5225 } /* if chunk was present */ 5226 } /* if of sufficent priority */ 5227 } /* if chunk has enabled */ 5228 } /* tailqforeach */ 5229 5230 chk = TAILQ_FIRST(&asoc->send_queue); 5231 while (chk) { 5232 nchk = TAILQ_NEXT(chk, sctp_next); 5233 /* Here we must move to the sent queue and mark */ 5234 if (PR_SCTP_TTL_ENABLED(chk->flags)) { 5235 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 5236 if (chk->data) { 5237 /* 5238 * We release the book_size 5239 * if the mbuf is here 5240 */ 5241 int ret_spc; 5242 5243 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 5244 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT, 5245 &asoc->send_queue); 5246 5247 freed_spc += ret_spc; 5248 if (freed_spc >= dataout) { 5249 return; 5250 } 5251 } /* end if chk->data */ 5252 } /* end if right class */ 5253 } /* end if chk pr-sctp */ 5254 chk = nchk; 5255 } /* end while (chk) */ 5256 } /* if enabled in asoc */ 5257 } 5258 5259 __inline int 5260 sctp_get_frag_point(struct sctp_tcb *stcb, 5261 struct sctp_association *asoc) 5262 { 5263 int siz, ovh; 5264 5265 /* 5266 * For endpoints that have both v6 and v4 addresses we must reserve 5267 * room for the ipv6 header, for those that are only dealing with V4 5268 * we use a larger frag point. 5269 */ 5270 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 5271 ovh = SCTP_MED_OVERHEAD; 5272 } else { 5273 ovh = SCTP_MED_V4_OVERHEAD; 5274 } 5275 5276 if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu) 5277 siz = asoc->smallest_mtu - ovh; 5278 else 5279 siz = (stcb->sctp_ep->sctp_frag_point - ovh); 5280 /* 5281 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { 5282 */ 5283 /* A data chunk MUST fit in a cluster */ 5284 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ 5285 /* } */ 5286 5287 /* adjust for an AUTH chunk if DATA requires auth */ 5288 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) 5289 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 5290 5291 if (siz % 4) { 5292 /* make it an even word boundary please */ 5293 siz -= (siz % 4); 5294 } 5295 return (siz); 5296 } 5297 5298 static void 5299 sctp_set_prsctp_policy(struct sctp_tcb *stcb, 5300 struct sctp_stream_queue_pending *sp) 5301 { 5302 sp->pr_sctp_on = 0; 5303 if (stcb->asoc.peer_supports_prsctp) { 5304 /* 5305 * We assume that the user wants PR_SCTP_TTL if the user 5306 * provides a positive lifetime but does not specify any 5307 * PR_SCTP policy. This is a BAD assumption and causes 5308 * problems at least with the U-Vancovers MPI folks. I will 5309 * change this to be no policy means NO PR-SCTP. 5310 */ 5311 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 5312 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 5313 sp->pr_sctp_on = 1; 5314 } else { 5315 return; 5316 } 5317 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 5318 case CHUNK_FLAGS_PR_SCTP_BUF: 5319 /* 5320 * Time to live is a priority stored in tv_sec when 5321 * doing the buffer drop thing. 5322 */ 5323 sp->ts.tv_sec = sp->timetolive; 5324 sp->ts.tv_usec = 0; 5325 break; 5326 case CHUNK_FLAGS_PR_SCTP_TTL: 5327 { 5328 struct timeval tv; 5329 5330 SCTP_GETTIME_TIMEVAL(&sp->ts); 5331 tv.tv_sec = sp->timetolive / 1000; 5332 tv.tv_usec = (sp->timetolive * 1000) % 1000000; 5333 timevaladd(&sp->ts, &tv); 5334 } 5335 break; 5336 case CHUNK_FLAGS_PR_SCTP_RTX: 5337 /* 5338 * Time to live is a the number or retransmissions 5339 * stored in tv_sec. 5340 */ 5341 sp->ts.tv_sec = sp->timetolive; 5342 sp->ts.tv_usec = 0; 5343 break; 5344 default: 5345 #ifdef SCTP_DEBUG 5346 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) { 5347 printf("Unknown PR_SCTP policy %u.\n", PR_SCTP_POLICY(sp->sinfo_flags)); 5348 } 5349 #endif 5350 break; 5351 } 5352 } 5353 } 5354 5355 static int 5356 sctp_msg_append(struct sctp_tcb *stcb, 5357 struct sctp_nets *net, 5358 struct mbuf *m, 5359 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) 5360 { 5361 int error = 0, holds_lock; 5362 struct mbuf *at; 5363 struct sctp_stream_queue_pending *sp = NULL; 5364 struct sctp_stream_out *strm; 5365 5366 /* 5367 * Given an mbuf chain, put it into the association send queue and 5368 * place it on the wheel 5369 */ 5370 holds_lock = hold_stcb_lock; 5371 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 5372 /* Invalid stream number */ 5373 error = EINVAL; 5374 goto out_now; 5375 } 5376 if ((stcb->asoc.stream_locked) && 5377 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 5378 error = EAGAIN; 5379 goto out_now; 5380 } 5381 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 5382 /* Now can we send this? */ 5383 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || 5384 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5385 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 5386 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 5387 /* got data while shutting down */ 5388 error = ECONNRESET; 5389 goto out_now; 5390 } 5391 sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending); 5392 if (sp == NULL) { 5393 error = ENOMEM; 5394 goto out_now; 5395 } 5396 SCTP_INCR_STRMOQ_COUNT(); 5397 sp->sinfo_flags = srcv->sinfo_flags; 5398 sp->timetolive = srcv->sinfo_timetolive; 5399 sp->ppid = srcv->sinfo_ppid; 5400 sp->context = srcv->sinfo_context; 5401 sp->strseq = 0; 5402 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 5403 sp->net = net; 5404 sp->addr_over = 1; 5405 } else { 5406 sp->net = stcb->asoc.primary_destination; 5407 sp->addr_over = 0; 5408 } 5409 atomic_add_int(&sp->net->ref_count, 1); 5410 SCTP_GETTIME_TIMEVAL(&sp->ts); 5411 sp->stream = srcv->sinfo_stream; 5412 sp->msg_is_complete = 1; 5413 sp->some_taken = 0; 5414 sp->data = m; 5415 sp->tail_mbuf = NULL; 5416 sp->length = 0; 5417 at = m; 5418 sctp_set_prsctp_policy(stcb, sp); 5419 /* 5420 * We could in theory (for sendall) pass the length in, but we would 5421 * still have to hunt through the chain since we need to setup the 5422 * tail_mbuf 5423 */ 5424 while (at) { 5425 if (SCTP_BUF_NEXT(at) == NULL) 5426 sp->tail_mbuf = at; 5427 sp->length += SCTP_BUF_LEN(at); 5428 at = SCTP_BUF_NEXT(at); 5429 } 5430 SCTP_TCB_SEND_LOCK(stcb); 5431 sctp_snd_sb_alloc(stcb, sp->length); 5432 stcb->asoc.stream_queue_cnt++; 5433 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 5434 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 5435 sp->strseq = strm->next_sequence_sent; 5436 strm->next_sequence_sent++; 5437 } 5438 if ((strm->next_spoke.tqe_next == NULL) && 5439 (strm->next_spoke.tqe_prev == NULL)) { 5440 /* Not on wheel, insert */ 5441 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1); 5442 } 5443 m = NULL; 5444 SCTP_TCB_SEND_UNLOCK(stcb); 5445 out_now: 5446 if (m) { 5447 sctp_m_freem(m); 5448 } 5449 return (error); 5450 } 5451 5452 5453 static struct mbuf * 5454 sctp_copy_mbufchain(struct mbuf *clonechain, 5455 struct mbuf *outchain, 5456 struct mbuf **endofchain, 5457 int can_take_mbuf, 5458 int sizeofcpy, 5459 uint8_t copy_by_ref) 5460 { 5461 struct mbuf *m; 5462 struct mbuf *appendchain; 5463 caddr_t cp; 5464 int len; 5465 5466 if (endofchain == NULL) { 5467 /* error */ 5468 error_out: 5469 if (outchain) 5470 sctp_m_freem(outchain); 5471 return (NULL); 5472 } 5473 if (can_take_mbuf) { 5474 appendchain = clonechain; 5475 } else { 5476 if (!copy_by_ref && (sizeofcpy <= ((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN)))) { 5477 /* Its not in a cluster */ 5478 if (*endofchain == NULL) { 5479 /* lets get a mbuf cluster */ 5480 if (outchain == NULL) { 5481 /* This is the general case */ 5482 new_mbuf: 5483 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 5484 if (outchain == NULL) { 5485 goto error_out; 5486 } 5487 SCTP_BUF_LEN(outchain) = 0; 5488 *endofchain = outchain; 5489 /* get the prepend space */ 5490 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4)); 5491 } else { 5492 /* 5493 * We really should not get a NULL 5494 * in endofchain 5495 */ 5496 /* find end */ 5497 m = outchain; 5498 while (m) { 5499 if (SCTP_BUF_NEXT(m) == NULL) { 5500 *endofchain = m; 5501 break; 5502 } 5503 m = SCTP_BUF_NEXT(m); 5504 } 5505 /* sanity */ 5506 if (*endofchain == NULL) { 5507 /* 5508 * huh, TSNH XXX maybe we 5509 * should panic 5510 */ 5511 sctp_m_freem(outchain); 5512 goto new_mbuf; 5513 } 5514 } 5515 /* get the new end of length */ 5516 len = M_TRAILINGSPACE(*endofchain); 5517 } else { 5518 /* how much is left at the end? */ 5519 len = M_TRAILINGSPACE(*endofchain); 5520 } 5521 /* Find the end of the data, for appending */ 5522 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain))); 5523 5524 /* Now lets copy it out */ 5525 if (len >= sizeofcpy) { 5526 /* It all fits, copy it in */ 5527 m_copydata(clonechain, 0, sizeofcpy, cp); 5528 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 5529 } else { 5530 /* fill up the end of the chain */ 5531 if (len > 0) { 5532 m_copydata(clonechain, 0, len, cp); 5533 SCTP_BUF_LEN((*endofchain)) += len; 5534 /* now we need another one */ 5535 sizeofcpy -= len; 5536 } 5537 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 5538 if (m == NULL) { 5539 /* We failed */ 5540 goto error_out; 5541 } 5542 SCTP_BUF_NEXT((*endofchain)) = m; 5543 *endofchain = m; 5544 cp = mtod((*endofchain), caddr_t); 5545 m_copydata(clonechain, len, sizeofcpy, cp); 5546 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 5547 } 5548 return (outchain); 5549 } else { 5550 /* copy the old fashion way */ 5551 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT); 5552 } 5553 } 5554 if (appendchain == NULL) { 5555 /* error */ 5556 if (outchain) 5557 sctp_m_freem(outchain); 5558 return (NULL); 5559 } 5560 if (outchain) { 5561 /* tack on to the end */ 5562 if (*endofchain != NULL) { 5563 SCTP_BUF_NEXT(((*endofchain))) = appendchain; 5564 } else { 5565 m = outchain; 5566 while (m) { 5567 if (SCTP_BUF_NEXT(m) == NULL) { 5568 SCTP_BUF_NEXT(m) = appendchain; 5569 break; 5570 } 5571 m = SCTP_BUF_NEXT(m); 5572 } 5573 } 5574 /* 5575 * save off the end and update the end-chain postion 5576 */ 5577 m = appendchain; 5578 while (m) { 5579 if (SCTP_BUF_NEXT(m) == NULL) { 5580 *endofchain = m; 5581 break; 5582 } 5583 m = SCTP_BUF_NEXT(m); 5584 } 5585 return (outchain); 5586 } else { 5587 /* save off the end and update the end-chain postion */ 5588 m = appendchain; 5589 while (m) { 5590 if (SCTP_BUF_NEXT(m) == NULL) { 5591 *endofchain = m; 5592 break; 5593 } 5594 m = SCTP_BUF_NEXT(m); 5595 } 5596 return (appendchain); 5597 } 5598 } 5599 5600 int 5601 sctp_med_chunk_output(struct sctp_inpcb *inp, 5602 struct sctp_tcb *stcb, 5603 struct sctp_association *asoc, 5604 int *num_out, 5605 int *reason_code, 5606 int control_only, int *cwnd_full, int from_where, 5607 struct timeval *now, int *now_filled, int frag_point); 5608 5609 static void 5610 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 5611 uint32_t val) 5612 { 5613 struct sctp_copy_all *ca; 5614 struct mbuf *m; 5615 int ret = 0; 5616 int added_control = 0; 5617 int un_sent, do_chunk_output = 1; 5618 struct sctp_association *asoc; 5619 5620 ca = (struct sctp_copy_all *)ptr; 5621 if (ca->m == NULL) { 5622 return; 5623 } 5624 if (ca->inp != inp) { 5625 /* TSNH */ 5626 return; 5627 } 5628 if ((ca->m) && ca->sndlen) { 5629 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT); 5630 if (m == NULL) { 5631 /* can't copy so we are done */ 5632 ca->cnt_failed++; 5633 return; 5634 } 5635 } else { 5636 m = NULL; 5637 } 5638 SCTP_TCB_LOCK_ASSERT(stcb); 5639 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 5640 /* Abort this assoc with m as the user defined reason */ 5641 if (m) { 5642 struct sctp_paramhdr *ph; 5643 5644 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT); 5645 if (m) { 5646 ph = mtod(m, struct sctp_paramhdr *); 5647 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 5648 ph->param_length = htons(ca->sndlen); 5649 } 5650 /* 5651 * We add one here to keep the assoc from 5652 * dis-appearing on us. 5653 */ 5654 atomic_add_int(&stcb->asoc.refcnt, 1); 5655 sctp_abort_an_association(inp, stcb, 5656 SCTP_RESPONSE_TO_USER_REQ, 5657 m); 5658 /* 5659 * sctp_abort_an_association calls sctp_free_asoc() 5660 * free association will NOT free it since we 5661 * incremented the refcnt .. we do this to prevent 5662 * it being freed and things getting tricky since we 5663 * could end up (from free_asoc) calling inpcb_free 5664 * which would get a recursive lock call to the 5665 * iterator lock.. But as a consequence of that the 5666 * stcb will return to us un-locked.. since 5667 * free_asoc returns with either no TCB or the TCB 5668 * unlocked, we must relock.. to unlock in the 5669 * iterator timer :-0 5670 */ 5671 SCTP_TCB_LOCK(stcb); 5672 atomic_add_int(&stcb->asoc.refcnt, -1); 5673 goto no_chunk_output; 5674 } 5675 } else { 5676 if (m) { 5677 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m, 5678 &ca->sndrcv, 1); 5679 } 5680 asoc = &stcb->asoc; 5681 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 5682 /* shutdown this assoc */ 5683 if (TAILQ_EMPTY(&asoc->send_queue) && 5684 TAILQ_EMPTY(&asoc->sent_queue) && 5685 (asoc->stream_queue_cnt == 0)) { 5686 if (asoc->locked_on_sending) { 5687 goto abort_anyway; 5688 } 5689 /* 5690 * there is nothing queued to send, so I'm 5691 * done... 5692 */ 5693 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 5694 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 5695 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5696 /* 5697 * only send SHUTDOWN the first time 5698 * through 5699 */ 5700 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 5701 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 5702 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5703 } 5704 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 5705 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 5706 asoc->primary_destination); 5707 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 5708 asoc->primary_destination); 5709 added_control = 1; 5710 do_chunk_output = 0; 5711 } 5712 } else { 5713 /* 5714 * we still got (or just got) data to send, 5715 * so set SHUTDOWN_PENDING 5716 */ 5717 /* 5718 * XXX sockets draft says that SCTP_EOF 5719 * should be sent with no data. currently, 5720 * we will allow user data to be sent first 5721 * and move to SHUTDOWN-PENDING 5722 */ 5723 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 5724 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 5725 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5726 if (asoc->locked_on_sending) { 5727 /* 5728 * Locked to send out the 5729 * data 5730 */ 5731 struct sctp_stream_queue_pending *sp; 5732 5733 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 5734 if (sp) { 5735 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 5736 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 5737 } 5738 } 5739 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 5740 if (TAILQ_EMPTY(&asoc->send_queue) && 5741 TAILQ_EMPTY(&asoc->sent_queue) && 5742 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 5743 abort_anyway: 5744 atomic_add_int(&stcb->asoc.refcnt, 1); 5745 sctp_abort_an_association(stcb->sctp_ep, stcb, 5746 SCTP_RESPONSE_TO_USER_REQ, 5747 NULL); 5748 atomic_add_int(&stcb->asoc.refcnt, -1); 5749 goto no_chunk_output; 5750 } 5751 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 5752 asoc->primary_destination); 5753 } 5754 } 5755 5756 } 5757 } 5758 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 5759 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 5760 5761 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 5762 (stcb->asoc.total_flight > 0) && 5763 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 5764 ) { 5765 do_chunk_output = 0; 5766 } 5767 if (do_chunk_output) 5768 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 5769 else if (added_control) { 5770 int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0; 5771 struct timeval now; 5772 int frag_point; 5773 5774 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 5775 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 5776 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); 5777 } 5778 no_chunk_output: 5779 if (ret) { 5780 ca->cnt_failed++; 5781 } else { 5782 ca->cnt_sent++; 5783 } 5784 } 5785 5786 static void 5787 sctp_sendall_completes(void *ptr, uint32_t val) 5788 { 5789 struct sctp_copy_all *ca; 5790 5791 ca = (struct sctp_copy_all *)ptr; 5792 /* 5793 * Do a notify here? Kacheong suggests that the notify be done at 5794 * the send time.. so you would push up a notification if any send 5795 * failed. Don't know if this is feasable since the only failures we 5796 * have is "memory" related and if you cannot get an mbuf to send 5797 * the data you surely can't get an mbuf to send up to notify the 5798 * user you can't send the data :-> 5799 */ 5800 5801 /* now free everything */ 5802 sctp_m_freem(ca->m); 5803 SCTP_FREE(ca); 5804 } 5805 5806 5807 #define MC_ALIGN(m, len) do { \ 5808 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ 5809 } while (0) 5810 5811 5812 5813 static struct mbuf * 5814 sctp_copy_out_all(struct uio *uio, int len) 5815 { 5816 struct mbuf *ret, *at; 5817 int left, willcpy, cancpy, error; 5818 5819 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA); 5820 if (ret == NULL) { 5821 /* TSNH */ 5822 return (NULL); 5823 } 5824 left = len; 5825 SCTP_BUF_LEN(ret) = 0; 5826 /* save space for the data chunk header */ 5827 cancpy = M_TRAILINGSPACE(ret); 5828 willcpy = min(cancpy, left); 5829 at = ret; 5830 while (left > 0) { 5831 /* Align data to the end */ 5832 error = uiomove(mtod(at, caddr_t), willcpy, uio); 5833 if (error) { 5834 err_out_now: 5835 sctp_m_freem(at); 5836 return (NULL); 5837 } 5838 SCTP_BUF_LEN(at) = willcpy; 5839 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 5840 left -= willcpy; 5841 if (left > 0) { 5842 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA); 5843 if (SCTP_BUF_NEXT(at) == NULL) { 5844 goto err_out_now; 5845 } 5846 at = SCTP_BUF_NEXT(at); 5847 SCTP_BUF_LEN(at) = 0; 5848 cancpy = M_TRAILINGSPACE(at); 5849 willcpy = min(cancpy, left); 5850 } 5851 } 5852 return (ret); 5853 } 5854 5855 static int 5856 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 5857 struct sctp_sndrcvinfo *srcv) 5858 { 5859 int ret; 5860 struct sctp_copy_all *ca; 5861 5862 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 5863 "CopyAll"); 5864 if (ca == NULL) { 5865 sctp_m_freem(m); 5866 return (ENOMEM); 5867 } 5868 memset(ca, 0, sizeof(struct sctp_copy_all)); 5869 5870 ca->inp = inp; 5871 ca->sndrcv = *srcv; 5872 /* 5873 * take off the sendall flag, it would be bad if we failed to do 5874 * this :-0 5875 */ 5876 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 5877 /* get length and mbuf chain */ 5878 if (uio) { 5879 ca->sndlen = uio->uio_resid; 5880 ca->m = sctp_copy_out_all(uio, ca->sndlen); 5881 if (ca->m == NULL) { 5882 SCTP_FREE(ca); 5883 return (ENOMEM); 5884 } 5885 } else { 5886 /* Gather the length of the send */ 5887 struct mbuf *mat; 5888 5889 mat = m; 5890 ca->sndlen = 0; 5891 while (m) { 5892 ca->sndlen += SCTP_BUF_LEN(m); 5893 m = SCTP_BUF_NEXT(m); 5894 } 5895 ca->m = m; 5896 } 5897 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, 5898 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE, 5899 (void *)ca, 0, 5900 sctp_sendall_completes, inp, 1); 5901 if (ret) { 5902 #ifdef SCTP_DEBUG 5903 printf("Failed to initiate iterator for sendall\n"); 5904 #endif 5905 SCTP_FREE(ca); 5906 return (EFAULT); 5907 } 5908 return (0); 5909 } 5910 5911 5912 void 5913 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 5914 { 5915 struct sctp_tmit_chunk *chk, *nchk; 5916 5917 chk = TAILQ_FIRST(&asoc->control_send_queue); 5918 while (chk) { 5919 nchk = TAILQ_NEXT(chk, sctp_next); 5920 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 5921 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 5922 if (chk->data) { 5923 sctp_m_freem(chk->data); 5924 chk->data = NULL; 5925 } 5926 asoc->ctrl_queue_cnt--; 5927 if (chk->whoTo) 5928 sctp_free_remote_addr(chk->whoTo); 5929 sctp_free_a_chunk(stcb, chk); 5930 } 5931 chk = nchk; 5932 } 5933 } 5934 5935 void 5936 sctp_toss_old_asconf(struct sctp_tcb *stcb) 5937 { 5938 struct sctp_association *asoc; 5939 struct sctp_tmit_chunk *chk, *chk_tmp; 5940 5941 asoc = &stcb->asoc; 5942 for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL; 5943 chk = chk_tmp) { 5944 /* get next chk */ 5945 chk_tmp = TAILQ_NEXT(chk, sctp_next); 5946 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */ 5947 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 5948 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 5949 if (chk->data) { 5950 sctp_m_freem(chk->data); 5951 chk->data = NULL; 5952 } 5953 asoc->ctrl_queue_cnt--; 5954 if (chk->whoTo) 5955 sctp_free_remote_addr(chk->whoTo); 5956 sctp_free_a_chunk(stcb, chk); 5957 } 5958 } 5959 } 5960 5961 5962 static __inline void 5963 sctp_clean_up_datalist(struct sctp_tcb *stcb, 5964 5965 struct sctp_association *asoc, 5966 struct sctp_tmit_chunk **data_list, 5967 int bundle_at, 5968 struct sctp_nets *net) 5969 { 5970 int i; 5971 struct sctp_tmit_chunk *tp1; 5972 5973 for (i = 0; i < bundle_at; i++) { 5974 /* off of the send queue */ 5975 if (i) { 5976 /* 5977 * Any chunk NOT 0 you zap the time chunk 0 gets 5978 * zapped or set based on if a RTO measurment is 5979 * needed. 5980 */ 5981 data_list[i]->do_rtt = 0; 5982 } 5983 /* record time */ 5984 data_list[i]->sent_rcv_time = net->last_sent_time; 5985 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; 5986 TAILQ_REMOVE(&asoc->send_queue, 5987 data_list[i], 5988 sctp_next); 5989 /* on to the sent queue */ 5990 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 5991 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq, 5992 data_list[i]->rec.data.TSN_seq, MAX_TSN))) { 5993 struct sctp_tmit_chunk *tpp; 5994 5995 /* need to move back */ 5996 back_up_more: 5997 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 5998 if (tpp == NULL) { 5999 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 6000 goto all_done; 6001 } 6002 tp1 = tpp; 6003 if (compare_with_wrap(tp1->rec.data.TSN_seq, 6004 data_list[i]->rec.data.TSN_seq, MAX_TSN)) { 6005 goto back_up_more; 6006 } 6007 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 6008 } else { 6009 TAILQ_INSERT_TAIL(&asoc->sent_queue, 6010 data_list[i], 6011 sctp_next); 6012 } 6013 all_done: 6014 /* This does not lower until the cum-ack passes it */ 6015 asoc->sent_queue_cnt++; 6016 asoc->send_queue_cnt--; 6017 if ((asoc->peers_rwnd <= 0) && 6018 (asoc->total_flight == 0) && 6019 (bundle_at == 1)) { 6020 /* Mark the chunk as being a window probe */ 6021 SCTP_STAT_INCR(sctps_windowprobed); 6022 data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE; 6023 } else { 6024 data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE; 6025 } 6026 #ifdef SCTP_AUDITING_ENABLED 6027 sctp_audit_log(0xC2, 3); 6028 #endif 6029 data_list[i]->sent = SCTP_DATAGRAM_SENT; 6030 data_list[i]->snd_count = 1; 6031 data_list[i]->rec.data.chunk_was_revoked = 0; 6032 #ifdef SCTP_FLIGHT_LOGGING 6033 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 6034 data_list[i]->whoTo->flight_size, 6035 data_list[i]->book_size, 6036 (uintptr_t) stcb, 6037 data_list[i]->rec.data.TSN_seq); 6038 #endif 6039 net->flight_size += data_list[i]->book_size; 6040 asoc->total_flight += data_list[i]->book_size; 6041 asoc->total_flight_count++; 6042 #ifdef SCTP_LOG_RWND 6043 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 6044 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 6045 #endif 6046 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 6047 (uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh)); 6048 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 6049 /* SWS sender side engages */ 6050 asoc->peers_rwnd = 0; 6051 } 6052 } 6053 } 6054 6055 static __inline void 6056 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc) 6057 { 6058 struct sctp_tmit_chunk *chk, *nchk; 6059 6060 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 6061 chk; chk = nchk) { 6062 nchk = TAILQ_NEXT(chk, sctp_next); 6063 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 6064 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 6065 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 6066 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 6067 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 6068 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 6069 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 6070 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 6071 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 6072 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 6073 /* Stray chunks must be cleaned up */ 6074 clean_up_anyway: 6075 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 6076 if (chk->data) { 6077 sctp_m_freem(chk->data); 6078 chk->data = NULL; 6079 } 6080 asoc->ctrl_queue_cnt--; 6081 sctp_free_remote_addr(chk->whoTo); 6082 sctp_free_a_chunk(stcb, chk); 6083 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 6084 /* special handling, we must look into the param */ 6085 if (chk != asoc->str_reset) { 6086 goto clean_up_anyway; 6087 } 6088 } 6089 } 6090 } 6091 6092 6093 static __inline int 6094 sctp_can_we_split_this(struct sctp_tcb *stcb, 6095 struct sctp_stream_queue_pending *sp, 6096 int goal_mtu, int frag_point, int eeor_on) 6097 { 6098 /* 6099 * Make a decision on if I should split a msg into multiple parts. 6100 */ 6101 if (goal_mtu < sctp_min_split_point) { 6102 /* you don't want enough */ 6103 return (0); 6104 } 6105 if (sp->msg_is_complete == 0) { 6106 if (eeor_on) { 6107 /* 6108 * If we are doing EEOR we need to always send it if 6109 * its the entire thing. 6110 */ 6111 if (goal_mtu >= sp->length) 6112 return (sp->length); 6113 } else { 6114 if (goal_mtu >= sp->length) { 6115 /* 6116 * If we cannot fill the amount needed there 6117 * is no sense of splitting the chunk. 6118 */ 6119 return (0); 6120 } 6121 } 6122 /* 6123 * If we reach here sp->length is larger than the goal_mtu. 6124 * Do we wish to split it for the sake of packet putting 6125 * together? 6126 */ 6127 if (goal_mtu >= min(sctp_min_split_point, stcb->asoc.smallest_mtu)) { 6128 /* Its ok to split it */ 6129 return (min(goal_mtu, frag_point)); 6130 } 6131 } else { 6132 /* We can always split a complete message to make it fit */ 6133 if (goal_mtu >= sp->length) 6134 /* Take it all */ 6135 return (sp->length); 6136 6137 return (min(goal_mtu, frag_point)); 6138 } 6139 /* Nope, can't split */ 6140 return (0); 6141 6142 } 6143 6144 static int 6145 sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, 6146 struct sctp_stream_out *strq, 6147 int goal_mtu, 6148 int frag_point, 6149 int *locked, 6150 int *giveup, 6151 int eeor_mode) 6152 { 6153 /* Move from the stream to the send_queue keeping track of the total */ 6154 struct sctp_association *asoc; 6155 struct sctp_stream_queue_pending *sp; 6156 struct sctp_tmit_chunk *chk; 6157 struct sctp_data_chunk *dchkh; 6158 int to_move; 6159 uint8_t rcv_flags = 0; 6160 uint8_t some_taken; 6161 uint8_t took_all = 0; 6162 6163 SCTP_TCB_LOCK_ASSERT(stcb); 6164 asoc = &stcb->asoc; 6165 sp = TAILQ_FIRST(&strq->outqueue); 6166 if (sp == NULL) { 6167 *locked = 0; 6168 SCTP_TCB_SEND_LOCK(stcb); 6169 if (strq->last_msg_incomplete) { 6170 printf("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 6171 strq->stream_no, strq->last_msg_incomplete); 6172 strq->last_msg_incomplete = 0; 6173 } 6174 SCTP_TCB_SEND_UNLOCK(stcb); 6175 return (0); 6176 } 6177 SCTP_TCB_SEND_LOCK(stcb); 6178 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 6179 /* Must wait for more data, must be last msg */ 6180 *locked = 1; 6181 *giveup = 1; 6182 SCTP_TCB_SEND_UNLOCK(stcb); 6183 return (0); 6184 } else if (sp->length == 0) { 6185 /* This should not happen */ 6186 panic("sp length is 0?"); 6187 } 6188 some_taken = sp->some_taken; 6189 if ((goal_mtu >= sp->length) && (sp->msg_is_complete)) { 6190 /* It all fits and its a complete msg, no brainer */ 6191 to_move = min(sp->length, frag_point); 6192 if (to_move == sp->length) { 6193 /* Getting it all */ 6194 if (sp->some_taken) { 6195 rcv_flags |= SCTP_DATA_LAST_FRAG; 6196 } else { 6197 rcv_flags |= SCTP_DATA_NOT_FRAG; 6198 } 6199 } else { 6200 /* Not getting it all, frag point overrides */ 6201 if (sp->some_taken == 0) { 6202 rcv_flags |= SCTP_DATA_FIRST_FRAG; 6203 } 6204 sp->some_taken = 1; 6205 } 6206 } else { 6207 to_move = sctp_can_we_split_this(stcb, sp, goal_mtu, 6208 frag_point, eeor_mode); 6209 if (to_move) { 6210 if (to_move >= sp->length) { 6211 to_move = sp->length; 6212 } 6213 if (sp->some_taken == 0) { 6214 rcv_flags |= SCTP_DATA_FIRST_FRAG; 6215 } 6216 sp->some_taken = 1; 6217 } else { 6218 if (sp->some_taken) { 6219 *locked = 1; 6220 } 6221 *giveup = 1; 6222 SCTP_TCB_SEND_UNLOCK(stcb); 6223 return (0); 6224 } 6225 } 6226 SCTP_TCB_SEND_UNLOCK(stcb); 6227 /* If we reach here, we can copy out a chunk */ 6228 sctp_alloc_a_chunk(stcb, chk); 6229 if (chk == NULL) { 6230 /* No chunk memory */ 6231 out_gu: 6232 *giveup = 1; 6233 return (0); 6234 } 6235 /* 6236 * Setup for unordered if needed by looking at the user sent info 6237 * flags. 6238 */ 6239 if (sp->sinfo_flags & SCTP_UNORDERED) { 6240 rcv_flags |= SCTP_DATA_UNORDERED; 6241 } 6242 /* clear out the chunk before setting up */ 6243 memset(chk, sizeof(*chk), 0); 6244 chk->rec.data.rcv_flags = rcv_flags; 6245 SCTP_TCB_SEND_LOCK(stcb); 6246 if (SCTP_BUF_IS_EXTENDED(sp->data)) { 6247 chk->copy_by_ref = 1; 6248 } else { 6249 chk->copy_by_ref = 0; 6250 } 6251 if (to_move >= sp->length) { 6252 /* we can steal the whole thing */ 6253 chk->data = sp->data; 6254 chk->last_mbuf = sp->tail_mbuf; 6255 /* register the stealing */ 6256 sp->data = sp->tail_mbuf = NULL; 6257 took_all = 1; 6258 } else { 6259 struct mbuf *m; 6260 6261 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT); 6262 chk->last_mbuf = NULL; 6263 if (chk->data == NULL) { 6264 sp->some_taken = some_taken; 6265 sctp_free_a_chunk(stcb, chk); 6266 SCTP_TCB_SEND_UNLOCK(stcb); 6267 goto out_gu; 6268 } 6269 /* Pull off the data */ 6270 m_adj(sp->data, to_move); 6271 /* Now lets work our way down and compact it */ 6272 m = sp->data; 6273 while (m && (SCTP_BUF_LEN(m) == 0)) { 6274 sp->data = SCTP_BUF_NEXT(m); 6275 SCTP_BUF_NEXT(m) = NULL; 6276 if (sp->tail_mbuf == m) { 6277 /* freeing tail */ 6278 sp->tail_mbuf = sp->data; 6279 } 6280 sctp_m_free(m); 6281 m = sp->data; 6282 } 6283 } 6284 if (to_move > sp->length) { 6285 panic("Huh, how can to_move be larger?"); 6286 } else { 6287 sp->length -= to_move; 6288 } 6289 6290 if (M_LEADINGSPACE(chk->data) < sizeof(struct sctp_data_chunk)) { 6291 /* Not enough room for a chunk header, get some */ 6292 struct mbuf *m; 6293 6294 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA); 6295 if (m == NULL) { 6296 /* 6297 * we're in trouble here. _PREPEND below will free 6298 * all the data if there is no leading space, so we 6299 * must put the data back and restore. 6300 */ 6301 if (took_all) { 6302 /* unsteal the data */ 6303 sp->data = chk->data; 6304 sp->tail_mbuf = chk->last_mbuf; 6305 } else { 6306 struct mbuf *m; 6307 6308 /* reassemble the data */ 6309 m = sp->data; 6310 sp->data = chk->data; 6311 SCTP_BUF_NEXT(sp->data) = m; 6312 } 6313 sp->some_taken = some_taken; 6314 sp->length += to_move; 6315 chk->data = NULL; 6316 sctp_free_a_chunk(stcb, chk); 6317 SCTP_TCB_SEND_UNLOCK(stcb); 6318 goto out_gu; 6319 } else { 6320 SCTP_BUF_LEN(m) = 0; 6321 SCTP_BUF_NEXT(m) = chk->data; 6322 chk->data = m; 6323 M_ALIGN(chk->data, 4); 6324 } 6325 } 6326 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT); 6327 if (chk->data == NULL) { 6328 /* HELP */ 6329 sctp_free_a_chunk(stcb, chk); 6330 SCTP_TCB_SEND_UNLOCK(stcb); 6331 goto out_gu; 6332 } 6333 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); 6334 chk->book_size = chk->send_size = (to_move + 6335 sizeof(struct sctp_data_chunk)); 6336 chk->book_size_scale = 0; 6337 chk->sent = SCTP_DATAGRAM_UNSENT; 6338 6339 /* 6340 * get last_mbuf and counts of mb useage This is ugly but hopefully 6341 * its only one mbuf. 6342 */ 6343 if (chk->last_mbuf == NULL) { 6344 chk->last_mbuf = chk->data; 6345 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 6346 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 6347 } 6348 } 6349 chk->flags = 0; 6350 chk->asoc = &stcb->asoc; 6351 chk->pad_inplace = 0; 6352 chk->no_fr_allowed = 0; 6353 chk->rec.data.stream_seq = sp->strseq; 6354 chk->rec.data.stream_number = sp->stream; 6355 chk->rec.data.payloadtype = sp->ppid; 6356 chk->rec.data.context = sp->context; 6357 chk->rec.data.doing_fast_retransmit = 0; 6358 chk->rec.data.ect_nonce = 0; /* ECN Nonce */ 6359 6360 chk->rec.data.timetodrop = sp->ts; 6361 chk->flags = sp->act_flags; 6362 chk->addr_over = sp->addr_over; 6363 6364 chk->whoTo = net; 6365 atomic_add_int(&chk->whoTo->ref_count, 1); 6366 6367 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); 6368 #ifdef SCTP_LOG_SENDING_STR 6369 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 6370 (uintptr_t) stcb, (uintptr_t) sp, 6371 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), 6372 chk->rec.data.TSN_seq); 6373 #endif 6374 6375 dchkh = mtod(chk->data, struct sctp_data_chunk *); 6376 /* 6377 * Put the rest of the things in place now. Size was done earlier in 6378 * previous loop prior to padding. 6379 */ 6380 6381 #ifdef SCTP_ASOCLOG_OF_TSNS 6382 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq; 6383 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number; 6384 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq; 6385 asoc->tsn_out_at++; 6386 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { 6387 asoc->tsn_out_at = 0; 6388 } 6389 #endif 6390 6391 dchkh->ch.chunk_type = SCTP_DATA; 6392 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 6393 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); 6394 dchkh->dp.stream_id = htons(strq->stream_no); 6395 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); 6396 dchkh->dp.protocol_id = chk->rec.data.payloadtype; 6397 dchkh->ch.chunk_length = htons(chk->send_size); 6398 /* Now advance the chk->send_size by the actual pad needed. */ 6399 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 6400 /* need a pad */ 6401 struct mbuf *lm; 6402 int pads; 6403 6404 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 6405 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) { 6406 chk->pad_inplace = 1; 6407 } 6408 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) { 6409 /* pad added an mbuf */ 6410 chk->last_mbuf = lm; 6411 } 6412 chk->send_size += pads; 6413 } 6414 /* We only re-set the policy if it is on */ 6415 if (sp->pr_sctp_on) { 6416 sctp_set_prsctp_policy(stcb, sp); 6417 } 6418 if (sp->msg_is_complete && (sp->length == 0)) { 6419 /* All done pull and kill the message */ 6420 asoc->stream_queue_cnt--; 6421 TAILQ_REMOVE(&strq->outqueue, sp, next); 6422 sctp_free_remote_addr(sp->net); 6423 if (sp->data) { 6424 sctp_m_freem(sp->data); 6425 sp->data = NULL; 6426 } 6427 sctp_free_a_strmoq(stcb, sp); 6428 6429 /* we can't be locked to it */ 6430 *locked = 0; 6431 stcb->asoc.locked_on_sending = NULL; 6432 } else { 6433 /* more to go, we are locked */ 6434 *locked = 1; 6435 } 6436 asoc->chunks_on_out_queue++; 6437 if (sp->pr_sctp_on) { 6438 asoc->pr_sctp_cnt++; 6439 chk->pr_sctp_on = 1; 6440 } else { 6441 chk->pr_sctp_on = 0; 6442 } 6443 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 6444 asoc->send_queue_cnt++; 6445 SCTP_TCB_SEND_UNLOCK(stcb); 6446 return (to_move); 6447 } 6448 6449 6450 static struct sctp_stream_out * 6451 sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc) 6452 { 6453 struct sctp_stream_out *strq; 6454 6455 /* Find the next stream to use */ 6456 if (asoc->last_out_stream == NULL) { 6457 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 6458 if (asoc->last_out_stream == NULL) { 6459 /* huh nothing on the wheel, TSNH */ 6460 return (NULL); 6461 } 6462 goto done_it; 6463 } 6464 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke); 6465 done_it: 6466 if (strq == NULL) { 6467 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 6468 } 6469 return (strq); 6470 6471 } 6472 6473 static void 6474 sctp_fill_outqueue(struct sctp_tcb *stcb, 6475 struct sctp_nets *net, int frag_point, int eeor_mode) 6476 { 6477 struct sctp_association *asoc; 6478 struct sctp_stream_out *strq, *strqn, *strqt; 6479 int goal_mtu, moved_how_much, total_moved = 0; 6480 int locked, giveup; 6481 struct sctp_stream_queue_pending *sp; 6482 6483 SCTP_TCB_LOCK_ASSERT(stcb); 6484 asoc = &stcb->asoc; 6485 #ifdef INET6 6486 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 6487 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 6488 } else { 6489 /* ?? not sure what else to do */ 6490 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 6491 } 6492 #else 6493 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 6494 mtu_fromwheel = 0; 6495 #endif 6496 /* Need an allowance for the data chunk header too */ 6497 goal_mtu -= sizeof(struct sctp_data_chunk); 6498 6499 /* must make even word boundary */ 6500 goal_mtu &= 0xfffffffc; 6501 if (asoc->locked_on_sending) { 6502 /* We are stuck on one stream until the message completes. */ 6503 strqn = strq = asoc->locked_on_sending; 6504 locked = 1; 6505 } else { 6506 strqn = strq = sctp_select_a_stream(stcb, asoc); 6507 locked = 0; 6508 } 6509 6510 while ((goal_mtu > 0) && strq) { 6511 sp = TAILQ_FIRST(&strq->outqueue); 6512 /* 6513 * If CMT is off, we must validate that the stream in 6514 * question has the first item pointed towards are network 6515 * destionation requested by the caller. Note that if we 6516 * turn out to be locked to a stream (assigning TSN's then 6517 * we must stop, since we cannot look for another stream 6518 * with data to send to that destination). In CMT's case, by 6519 * skipping this check, we will send one data packet towards 6520 * the requested net. 6521 */ 6522 if (sp == NULL) { 6523 break; 6524 } 6525 if ((sp->net != net) && (sctp_cmt_on_off == 0)) { 6526 /* none for this network */ 6527 if (locked) { 6528 break; 6529 } else { 6530 strq = sctp_select_a_stream(stcb, asoc); 6531 if (strq == NULL) 6532 /* none left */ 6533 break; 6534 if (strqn == strq) { 6535 /* I have circled */ 6536 break; 6537 } 6538 continue; 6539 } 6540 } 6541 giveup = 0; 6542 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked, 6543 &giveup, eeor_mode); 6544 asoc->last_out_stream = strq; 6545 if (locked) { 6546 asoc->locked_on_sending = strq; 6547 if ((moved_how_much == 0) || (giveup)) 6548 /* no more to move for now */ 6549 break; 6550 } else { 6551 asoc->locked_on_sending = NULL; 6552 strqt = sctp_select_a_stream(stcb, asoc); 6553 if (TAILQ_FIRST(&strq->outqueue) == NULL) { 6554 sctp_remove_from_wheel(stcb, asoc, strq); 6555 } 6556 if (giveup) { 6557 break; 6558 } 6559 strq = strqt; 6560 if (strq == NULL) { 6561 break; 6562 } 6563 } 6564 total_moved += moved_how_much; 6565 goal_mtu -= moved_how_much; 6566 goal_mtu &= 0xfffffffc; 6567 } 6568 if (total_moved == 0) { 6569 if ((sctp_cmt_on_off == 0) && 6570 (net == stcb->asoc.primary_destination)) { 6571 /* ran dry for primary network net */ 6572 SCTP_STAT_INCR(sctps_primary_randry); 6573 } else if (sctp_cmt_on_off) { 6574 /* ran dry with CMT on */ 6575 SCTP_STAT_INCR(sctps_cmt_randry); 6576 } 6577 } 6578 } 6579 6580 __inline void 6581 sctp_fix_ecn_echo(struct sctp_association *asoc) 6582 { 6583 struct sctp_tmit_chunk *chk; 6584 6585 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 6586 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 6587 chk->sent = SCTP_DATAGRAM_UNSENT; 6588 } 6589 } 6590 } 6591 6592 static void 6593 sctp_move_to_an_alt(struct sctp_tcb *stcb, 6594 struct sctp_association *asoc, 6595 struct sctp_nets *net) 6596 { 6597 struct sctp_tmit_chunk *chk; 6598 struct sctp_nets *a_net; 6599 6600 SCTP_TCB_LOCK_ASSERT(stcb); 6601 a_net = sctp_find_alternate_net(stcb, net, 0); 6602 if ((a_net != net) && 6603 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) { 6604 /* 6605 * We only proceed if a valid alternate is found that is not 6606 * this one and is reachable. Here we must move all chunks 6607 * queued in the send queue off of the destination address 6608 * to our alternate. 6609 */ 6610 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 6611 if (chk->whoTo == net) { 6612 /* Move the chunk to our alternate */ 6613 sctp_free_remote_addr(chk->whoTo); 6614 chk->whoTo = a_net; 6615 atomic_add_int(&a_net->ref_count, 1); 6616 } 6617 } 6618 } 6619 } 6620 6621 int 6622 sctp_med_chunk_output(struct sctp_inpcb *inp, 6623 struct sctp_tcb *stcb, 6624 struct sctp_association *asoc, 6625 int *num_out, 6626 int *reason_code, 6627 int control_only, int *cwnd_full, int from_where, 6628 struct timeval *now, int *now_filled, int frag_point) 6629 { 6630 /* 6631 * Ok this is the generic chunk service queue. we must do the 6632 * following: - Service the stream queue that is next, moving any 6633 * message (note I must get a complete message i.e. FIRST/MIDDLE and 6634 * LAST to the out queue in one pass) and assigning TSN's - Check to 6635 * see if the cwnd/rwnd allows any output, if so we go ahead and 6636 * fomulate and send the low level chunks. Making sure to combine 6637 * any control in the control chunk queue also. 6638 */ 6639 struct sctp_nets *net; 6640 struct mbuf *outchain, *endoutchain; 6641 struct sctp_tmit_chunk *chk, *nchk; 6642 struct sctphdr *shdr; 6643 6644 /* temp arrays for unlinking */ 6645 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 6646 int no_fragmentflg, error; 6647 int one_chunk, hbflag, skip_data_for_this_net; 6648 int asconf, cookie, no_out_cnt; 6649 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode; 6650 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 6651 struct sctp_nets *start_at, *old_startat = NULL, *send_start_at; 6652 int tsns_sent = 0; 6653 uint32_t auth_offset = 0; 6654 struct sctp_auth_chunk *auth = NULL; 6655 6656 *num_out = 0; 6657 cwnd_full_ind = 0; 6658 6659 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 6660 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || 6661 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 6662 eeor_mode = 1; 6663 } else { 6664 eeor_mode = 0; 6665 } 6666 ctl_cnt = no_out_cnt = asconf = cookie = 0; 6667 /* 6668 * First lets prime the pump. For each destination, if there is room 6669 * in the flight size, attempt to pull an MTU's worth out of the 6670 * stream queues into the general send_queue 6671 */ 6672 #ifdef SCTP_AUDITING_ENABLED 6673 sctp_audit_log(0xC2, 2); 6674 #endif 6675 SCTP_TCB_LOCK_ASSERT(stcb); 6676 hbflag = 0; 6677 if ((control_only) || (asoc->stream_reset_outstanding)) 6678 no_data_chunks = 1; 6679 else 6680 no_data_chunks = 0; 6681 6682 /* Nothing to possible to send? */ 6683 if (TAILQ_EMPTY(&asoc->control_send_queue) && 6684 TAILQ_EMPTY(&asoc->send_queue) && 6685 TAILQ_EMPTY(&asoc->out_wheel)) { 6686 *reason_code = 9; 6687 return (0); 6688 } 6689 if (asoc->peers_rwnd == 0) { 6690 /* No room in peers rwnd */ 6691 *cwnd_full = 1; 6692 *reason_code = 1; 6693 if (asoc->total_flight > 0) { 6694 /* we are allowed one chunk in flight */ 6695 no_data_chunks = 1; 6696 } 6697 } 6698 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) { 6699 if (sctp_cmt_on_off) { 6700 /* 6701 * for CMT we start at the next one past the one we 6702 * last added data to. 6703 */ 6704 if (TAILQ_FIRST(&asoc->send_queue) != NULL) { 6705 goto skip_the_fill_from_streams; 6706 } 6707 if (asoc->last_net_data_came_from) { 6708 net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next); 6709 if (net == NULL) { 6710 net = TAILQ_FIRST(&asoc->nets); 6711 } 6712 } else { 6713 /* back to start */ 6714 net = TAILQ_FIRST(&asoc->nets); 6715 } 6716 6717 } else { 6718 net = asoc->primary_destination; 6719 if (net == NULL) { 6720 /* TSNH */ 6721 net = TAILQ_FIRST(&asoc->nets); 6722 } 6723 } 6724 start_at = net; 6725 one_more_time: 6726 for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 6727 if (old_startat && (old_startat == net)) { 6728 break; 6729 } 6730 if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) { 6731 /* nothing can be in queue for this guy */ 6732 continue; 6733 } 6734 if (net->flight_size >= net->cwnd) { 6735 /* skip this network, no room */ 6736 cwnd_full_ind++; 6737 continue; 6738 } 6739 /* 6740 * @@@ JRI : this for loop we are in takes in each 6741 * net, if its's got space in cwnd and has data sent 6742 * to it (when CMT is off) then it calls 6743 * sctp_fill_outqueue for the net. This gets data on 6744 * the send queue for that network. 6745 * 6746 * In sctp_fill_outqueue TSN's are assigned and data is 6747 * copied out of the stream buffers. Note mostly 6748 * copy by reference (we hope). 6749 */ 6750 #ifdef SCTP_CWND_LOGGING 6751 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 6752 #endif 6753 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode); 6754 } 6755 if (start_at != TAILQ_FIRST(&asoc->nets)) { 6756 /* got to pick up the beginning stuff. */ 6757 old_startat = start_at; 6758 start_at = net = TAILQ_FIRST(&asoc->nets); 6759 goto one_more_time; 6760 } 6761 } 6762 skip_the_fill_from_streams: 6763 *cwnd_full = cwnd_full_ind; 6764 /* now service each destination and send out what we can for it */ 6765 /* Nothing to send? */ 6766 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) && 6767 (TAILQ_FIRST(&asoc->send_queue) == NULL)) { 6768 *reason_code = 8; 6769 return (0); 6770 } 6771 chk = TAILQ_FIRST(&asoc->send_queue); 6772 if (chk) { 6773 send_start_at = chk->whoTo; 6774 } else { 6775 send_start_at = TAILQ_FIRST(&asoc->nets); 6776 } 6777 old_startat = NULL; 6778 again_one_more_time: 6779 for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 6780 /* how much can we send? */ 6781 /* printf("Examine for sending net:%x\n", (uint32_t)net); */ 6782 if (old_startat && (old_startat == net)) { 6783 /* through list ocmpletely. */ 6784 break; 6785 } 6786 tsns_sent = 0; 6787 if (net->ref_count < 2) { 6788 /* 6789 * Ref-count of 1 so we cannot have data or control 6790 * queued to this address. Skip it. 6791 */ 6792 continue; 6793 } 6794 ctl_cnt = bundle_at = 0; 6795 endoutchain = outchain = NULL; 6796 no_fragmentflg = 1; 6797 one_chunk = 0; 6798 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 6799 skip_data_for_this_net = 1; 6800 } else { 6801 skip_data_for_this_net = 0; 6802 } 6803 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { 6804 /* 6805 * if we have a route and an ifp check to see if we 6806 * have room to send to this guy 6807 */ 6808 struct ifnet *ifp; 6809 6810 ifp = net->ro.ro_rt->rt_ifp; 6811 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { 6812 SCTP_STAT_INCR(sctps_ifnomemqueued); 6813 #ifdef SCTP_LOG_MAXBURST 6814 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); 6815 #endif 6816 continue; 6817 } 6818 } 6819 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 6820 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 6821 } else { 6822 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 6823 } 6824 mx_mtu = mtu; 6825 to_out = 0; 6826 if (mtu > asoc->peers_rwnd) { 6827 if (asoc->total_flight > 0) { 6828 /* We have a packet in flight somewhere */ 6829 r_mtu = asoc->peers_rwnd; 6830 } else { 6831 /* We are always allowed to send one MTU out */ 6832 one_chunk = 1; 6833 r_mtu = mtu; 6834 } 6835 } else { 6836 r_mtu = mtu; 6837 } 6838 /************************/ 6839 /* Control transmission */ 6840 /************************/ 6841 /* Now first lets go through the control queue */ 6842 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 6843 chk; chk = nchk) { 6844 nchk = TAILQ_NEXT(chk, sctp_next); 6845 if (chk->whoTo != net) { 6846 /* 6847 * No, not sent to the network we are 6848 * looking at 6849 */ 6850 continue; 6851 } 6852 if (chk->data == NULL) { 6853 continue; 6854 } 6855 if (chk->sent != SCTP_DATAGRAM_UNSENT) { 6856 /* 6857 * It must be unsent. Cookies and ASCONF's 6858 * hang around but there timers will force 6859 * when marked for resend. 6860 */ 6861 continue; 6862 } 6863 /* 6864 * if no AUTH is yet included and this chunk 6865 * requires it, make sure to account for it. We 6866 * don't apply the size until the AUTH chunk is 6867 * actually added below in case there is no room for 6868 * this chunk. NOTE: we overload the use of "omtu" 6869 * here 6870 */ 6871 if ((auth == NULL) && 6872 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 6873 stcb->asoc.peer_auth_chunks)) { 6874 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 6875 } else 6876 omtu = 0; 6877 /* Here we do NOT factor the r_mtu */ 6878 if ((chk->send_size < (int)(mtu - omtu)) || 6879 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 6880 /* 6881 * We probably should glom the mbuf chain 6882 * from the chk->data for control but the 6883 * problem is it becomes yet one more level 6884 * of tracking to do if for some reason 6885 * output fails. Then I have got to 6886 * reconstruct the merged control chain.. el 6887 * yucko.. for now we take the easy way and 6888 * do the copy 6889 */ 6890 /* 6891 * Add an AUTH chunk, if chunk requires it 6892 * save the offset into the chain for AUTH 6893 */ 6894 if ((auth == NULL) && 6895 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 6896 stcb->asoc.peer_auth_chunks))) { 6897 outchain = sctp_add_auth_chunk(outchain, 6898 &endoutchain, 6899 &auth, 6900 &auth_offset, 6901 stcb, 6902 chk->rec.chunk_id.id); 6903 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 6904 } 6905 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 6906 (int)chk->rec.chunk_id.can_take_data, 6907 chk->send_size, chk->copy_by_ref); 6908 if (outchain == NULL) { 6909 *reason_code = 8; 6910 return (ENOMEM); 6911 } 6912 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 6913 /* update our MTU size */ 6914 if (mtu > (chk->send_size + omtu)) 6915 mtu -= (chk->send_size + omtu); 6916 else 6917 mtu = 0; 6918 to_out += (chk->send_size + omtu); 6919 /* Do clear IP_DF ? */ 6920 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 6921 no_fragmentflg = 0; 6922 } 6923 if (chk->rec.chunk_id.can_take_data) 6924 chk->data = NULL; 6925 /* Mark things to be removed, if needed */ 6926 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 6927 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 6928 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 6929 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 6930 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 6931 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 6932 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 6933 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 6934 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 6935 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 6936 6937 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) 6938 hbflag = 1; 6939 /* remove these chunks at the end */ 6940 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 6941 /* turn off the timer */ 6942 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 6943 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 6944 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); 6945 } 6946 } 6947 ctl_cnt++; 6948 } else { 6949 /* 6950 * Other chunks, since they have 6951 * timers running (i.e. COOKIE or 6952 * ASCONF) we just "trust" that it 6953 * gets sent or retransmitted. 6954 */ 6955 ctl_cnt++; 6956 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 6957 cookie = 1; 6958 no_out_cnt = 1; 6959 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) { 6960 /* 6961 * set hb flag since we can 6962 * use these for RTO 6963 */ 6964 hbflag = 1; 6965 asconf = 1; 6966 } 6967 chk->sent = SCTP_DATAGRAM_SENT; 6968 chk->snd_count++; 6969 } 6970 if (mtu == 0) { 6971 /* 6972 * Ok we are out of room but we can 6973 * output without effecting the 6974 * flight size since this little guy 6975 * is a control only packet. 6976 */ 6977 if (asconf) { 6978 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 6979 asconf = 0; 6980 } 6981 if (cookie) { 6982 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 6983 cookie = 0; 6984 } 6985 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 6986 if (outchain == NULL) { 6987 /* no memory */ 6988 error = ENOBUFS; 6989 goto error_out_again; 6990 } 6991 shdr = mtod(outchain, struct sctphdr *); 6992 shdr->src_port = inp->sctp_lport; 6993 shdr->dest_port = stcb->rport; 6994 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 6995 shdr->checksum = 0; 6996 auth_offset += sizeof(struct sctphdr); 6997 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 6998 (struct sockaddr *)&net->ro._l_addr, 6999 outchain, auth_offset, auth, 7000 no_fragmentflg, 0, NULL, asconf))) { 7001 if (error == ENOBUFS) { 7002 asoc->ifp_had_enobuf = 1; 7003 } 7004 SCTP_STAT_INCR(sctps_lowlevelerr); 7005 if (from_where == 0) { 7006 SCTP_STAT_INCR(sctps_lowlevelerrusr); 7007 } 7008 error_out_again: 7009 /* error, could not output */ 7010 if (hbflag) { 7011 if (*now_filled == 0) { 7012 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7013 *now_filled = 1; 7014 *now = net->last_sent_time; 7015 } else { 7016 net->last_sent_time = *now; 7017 } 7018 hbflag = 0; 7019 } 7020 if (error == EHOSTUNREACH) { 7021 /* 7022 * Destination went 7023 * unreachable 7024 * during this send 7025 */ 7026 sctp_move_to_an_alt(stcb, asoc, net); 7027 } 7028 *reason_code = 7; 7029 continue; 7030 } else 7031 asoc->ifp_had_enobuf = 0; 7032 /* Only HB or ASCONF advances time */ 7033 if (hbflag) { 7034 if (*now_filled == 0) { 7035 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7036 *now_filled = 1; 7037 *now = net->last_sent_time; 7038 } else { 7039 net->last_sent_time = *now; 7040 } 7041 hbflag = 0; 7042 } 7043 /* 7044 * increase the number we sent, if a 7045 * cookie is sent we don't tell them 7046 * any was sent out. 7047 */ 7048 outchain = endoutchain = NULL; 7049 auth = NULL; 7050 auth_offset = 0; 7051 if (!no_out_cnt) 7052 *num_out += ctl_cnt; 7053 /* recalc a clean slate and setup */ 7054 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7055 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 7056 } else { 7057 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD); 7058 } 7059 to_out = 0; 7060 no_fragmentflg = 1; 7061 } 7062 } 7063 } 7064 /*********************/ 7065 /* Data transmission */ 7066 /*********************/ 7067 /* 7068 * if AUTH for DATA is required and no AUTH has been added 7069 * yet, account for this in the mtu now... if no data can be 7070 * bundled, this adjustment won't matter anyways since the 7071 * packet will be going out... 7072 */ 7073 if ((auth == NULL) && 7074 sctp_auth_is_required_chunk(SCTP_DATA, 7075 stcb->asoc.peer_auth_chunks)) { 7076 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 7077 } 7078 /* now lets add any data within the MTU constraints */ 7079 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 7080 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) 7081 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 7082 else 7083 omtu = 0; 7084 } else { 7085 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) 7086 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 7087 else 7088 omtu = 0; 7089 } 7090 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && (skip_data_for_this_net == 0)) || 7091 (cookie)) { 7092 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) { 7093 if (no_data_chunks) { 7094 /* let only control go out */ 7095 *reason_code = 1; 7096 break; 7097 } 7098 if (net->flight_size >= net->cwnd) { 7099 /* skip this net, no room for data */ 7100 *reason_code = 2; 7101 break; 7102 } 7103 nchk = TAILQ_NEXT(chk, sctp_next); 7104 if (chk->whoTo != net) { 7105 /* No, not sent to this net */ 7106 continue; 7107 } 7108 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 7109 /* 7110 * strange, we have a chunk that is 7111 * to bit for its destination and 7112 * yet no fragment ok flag. 7113 * Something went wrong when the 7114 * PMTU changed...we did not mark 7115 * this chunk for some reason?? I 7116 * will fix it here by letting IP 7117 * fragment it for now and printing 7118 * a warning. This really should not 7119 * happen ... 7120 */ 7121 #ifdef SCTP_DEBUG 7122 printf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 7123 chk->send_size, mtu); 7124 #endif 7125 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 7126 } 7127 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 7128 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 7129 /* ok we will add this one */ 7130 7131 /* 7132 * Add an AUTH chunk, if chunk 7133 * requires it, save the offset into 7134 * the chain for AUTH 7135 */ 7136 if ((auth == NULL) && 7137 (sctp_auth_is_required_chunk(SCTP_DATA, 7138 stcb->asoc.peer_auth_chunks))) { 7139 7140 outchain = sctp_add_auth_chunk(outchain, 7141 &endoutchain, 7142 &auth, 7143 &auth_offset, 7144 stcb, 7145 SCTP_DATA); 7146 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7147 } 7148 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 7149 chk->send_size, chk->copy_by_ref); 7150 if (outchain == NULL) { 7151 #ifdef SCTP_DEBUG 7152 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 7153 printf("No memory?\n"); 7154 } 7155 #endif 7156 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 7157 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 7158 } 7159 *reason_code = 3; 7160 return (ENOMEM); 7161 } 7162 /* upate our MTU size */ 7163 /* Do clear IP_DF ? */ 7164 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 7165 no_fragmentflg = 0; 7166 } 7167 /* unsigned subtraction of mtu */ 7168 if (mtu > chk->send_size) 7169 mtu -= chk->send_size; 7170 else 7171 mtu = 0; 7172 /* unsigned subtraction of r_mtu */ 7173 if (r_mtu > chk->send_size) 7174 r_mtu -= chk->send_size; 7175 else 7176 r_mtu = 0; 7177 7178 to_out += chk->send_size; 7179 if (to_out > mx_mtu) { 7180 #ifdef INVARIANTS 7181 panic("gag"); 7182 #else 7183 printf("Exceeding mtu of %d out size is %d\n", 7184 mx_mtu, to_out); 7185 #endif 7186 } 7187 data_list[bundle_at++] = chk; 7188 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 7189 mtu = 0; 7190 break; 7191 } 7192 if (chk->sent == SCTP_DATAGRAM_UNSENT) { 7193 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 7194 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 7195 } else { 7196 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 7197 } 7198 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 7199 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 7200 /* 7201 * Count number of 7202 * user msg's that 7203 * were fragmented 7204 * we do this by 7205 * counting when we 7206 * see a LAST 7207 * fragment only. 7208 */ 7209 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 7210 } 7211 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 7212 break; 7213 } 7214 } else { 7215 /* 7216 * Must be sent in order of the 7217 * TSN's (on a network) 7218 */ 7219 break; 7220 } 7221 } /* for () */ 7222 } /* if asoc.state OPEN */ 7223 /* Is there something to send for this destination? */ 7224 if (outchain) { 7225 /* We may need to start a control timer or two */ 7226 if (asconf) { 7227 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 7228 asconf = 0; 7229 } 7230 if (cookie) { 7231 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 7232 cookie = 0; 7233 } 7234 /* must start a send timer if data is being sent */ 7235 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 7236 /* 7237 * no timer running on this destination 7238 * restart it. 7239 */ 7240 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 7241 } 7242 /* Now send it, if there is anything to send :> */ 7243 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 7244 if (outchain == NULL) { 7245 /* out of mbufs */ 7246 error = ENOBUFS; 7247 goto errored_send; 7248 } 7249 shdr = mtod(outchain, struct sctphdr *); 7250 shdr->src_port = inp->sctp_lport; 7251 shdr->dest_port = stcb->rport; 7252 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 7253 shdr->checksum = 0; 7254 auth_offset += sizeof(struct sctphdr); 7255 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 7256 (struct sockaddr *)&net->ro._l_addr, 7257 outchain, 7258 auth_offset, 7259 auth, 7260 no_fragmentflg, 7261 bundle_at, 7262 data_list[0], 7263 asconf))) { 7264 /* error, we could not output */ 7265 if (error == ENOBUFS) { 7266 asoc->ifp_had_enobuf = 1; 7267 } 7268 SCTP_STAT_INCR(sctps_lowlevelerr); 7269 if (from_where == 0) { 7270 SCTP_STAT_INCR(sctps_lowlevelerrusr); 7271 } 7272 errored_send: 7273 #ifdef SCTP_DEBUG 7274 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 7275 printf("Gak send error %d\n", error); 7276 } 7277 #endif 7278 if (hbflag) { 7279 if (*now_filled == 0) { 7280 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7281 *now_filled = 1; 7282 *now = net->last_sent_time; 7283 } else { 7284 net->last_sent_time = *now; 7285 } 7286 hbflag = 0; 7287 } 7288 if (error == EHOSTUNREACH) { 7289 /* 7290 * Destination went unreachable 7291 * during this send 7292 */ 7293 sctp_move_to_an_alt(stcb, asoc, net); 7294 } 7295 *reason_code = 6; 7296 continue; 7297 } else { 7298 asoc->ifp_had_enobuf = 0; 7299 } 7300 outchain = endoutchain = NULL; 7301 auth = NULL; 7302 auth_offset = 0; 7303 if (bundle_at || hbflag) { 7304 /* For data/asconf and hb set time */ 7305 if (*now_filled == 0) { 7306 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7307 *now_filled = 1; 7308 *now = net->last_sent_time; 7309 } else { 7310 net->last_sent_time = *now; 7311 } 7312 } 7313 if (!no_out_cnt) { 7314 *num_out += (ctl_cnt + bundle_at); 7315 } 7316 if (bundle_at) { 7317 /* setup for a RTO measurement */ 7318 tsns_sent = data_list[0]->rec.data.TSN_seq; 7319 7320 data_list[0]->do_rtt = 1; 7321 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 7322 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 7323 if (sctp_early_fr) { 7324 if (net->flight_size < net->cwnd) { 7325 /* start or restart it */ 7326 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 7327 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 7328 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); 7329 } 7330 SCTP_STAT_INCR(sctps_earlyfrstrout); 7331 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net); 7332 } else { 7333 /* stop it if its running */ 7334 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 7335 SCTP_STAT_INCR(sctps_earlyfrstpout); 7336 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 7337 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); 7338 } 7339 } 7340 } 7341 } 7342 if (one_chunk) { 7343 break; 7344 } 7345 } 7346 #ifdef SCTP_CWND_LOGGING 7347 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 7348 #endif 7349 } 7350 if (old_startat == NULL) { 7351 old_startat = send_start_at; 7352 send_start_at = TAILQ_FIRST(&asoc->nets); 7353 goto again_one_more_time; 7354 } 7355 /* 7356 * At the end there should be no NON timed chunks hanging on this 7357 * queue. 7358 */ 7359 #ifdef SCTP_CWND_LOGGING 7360 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 7361 #endif 7362 if ((*num_out == 0) && (*reason_code == 0)) { 7363 *reason_code = 4; 7364 } else { 7365 *reason_code = 5; 7366 } 7367 sctp_clean_up_ctl(stcb, asoc); 7368 return (0); 7369 } 7370 7371 void 7372 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 7373 { 7374 /* 7375 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 7376 * the control chunk queue. 7377 */ 7378 struct sctp_chunkhdr *hdr; 7379 struct sctp_tmit_chunk *chk; 7380 struct mbuf *mat; 7381 7382 SCTP_TCB_LOCK_ASSERT(stcb); 7383 sctp_alloc_a_chunk(stcb, chk); 7384 if (chk == NULL) { 7385 /* no memory */ 7386 sctp_m_freem(op_err); 7387 return; 7388 } 7389 chk->copy_by_ref = 0; 7390 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT); 7391 if (op_err == NULL) { 7392 sctp_free_a_chunk(stcb, chk); 7393 return; 7394 } 7395 chk->send_size = 0; 7396 mat = op_err; 7397 while (mat != NULL) { 7398 chk->send_size += SCTP_BUF_LEN(mat); 7399 mat = SCTP_BUF_NEXT(mat); 7400 } 7401 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 7402 chk->rec.chunk_id.can_take_data = 1; 7403 chk->sent = SCTP_DATAGRAM_UNSENT; 7404 chk->snd_count = 0; 7405 chk->flags = 0; 7406 chk->asoc = &stcb->asoc; 7407 chk->data = op_err; 7408 chk->whoTo = chk->asoc->primary_destination; 7409 atomic_add_int(&chk->whoTo->ref_count, 1); 7410 hdr = mtod(op_err, struct sctp_chunkhdr *); 7411 hdr->chunk_type = SCTP_OPERATION_ERROR; 7412 hdr->chunk_flags = 0; 7413 hdr->chunk_length = htons(chk->send_size); 7414 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, 7415 chk, 7416 sctp_next); 7417 chk->asoc->ctrl_queue_cnt++; 7418 } 7419 7420 int 7421 sctp_send_cookie_echo(struct mbuf *m, 7422 int offset, 7423 struct sctp_tcb *stcb, 7424 struct sctp_nets *net) 7425 { 7426 /* 7427 * pull out the cookie and put it at the front of the control chunk 7428 * queue. 7429 */ 7430 int at; 7431 struct mbuf *cookie; 7432 struct sctp_paramhdr parm, *phdr; 7433 struct sctp_chunkhdr *hdr; 7434 struct sctp_tmit_chunk *chk; 7435 uint16_t ptype, plen; 7436 7437 /* First find the cookie in the param area */ 7438 cookie = NULL; 7439 at = offset + sizeof(struct sctp_init_chunk); 7440 7441 SCTP_TCB_LOCK_ASSERT(stcb); 7442 do { 7443 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); 7444 if (phdr == NULL) { 7445 return (-3); 7446 } 7447 ptype = ntohs(phdr->param_type); 7448 plen = ntohs(phdr->param_length); 7449 if (ptype == SCTP_STATE_COOKIE) { 7450 int pad; 7451 7452 /* found the cookie */ 7453 if ((pad = (plen % 4))) { 7454 plen += 4 - pad; 7455 } 7456 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT); 7457 if (cookie == NULL) { 7458 /* No memory */ 7459 return (-2); 7460 } 7461 break; 7462 } 7463 at += SCTP_SIZE32(plen); 7464 } while (phdr); 7465 if (cookie == NULL) { 7466 /* Did not find the cookie */ 7467 return (-3); 7468 } 7469 /* ok, we got the cookie lets change it into a cookie echo chunk */ 7470 7471 /* first the change from param to cookie */ 7472 hdr = mtod(cookie, struct sctp_chunkhdr *); 7473 hdr->chunk_type = SCTP_COOKIE_ECHO; 7474 hdr->chunk_flags = 0; 7475 /* get the chunk stuff now and place it in the FRONT of the queue */ 7476 sctp_alloc_a_chunk(stcb, chk); 7477 if (chk == NULL) { 7478 /* no memory */ 7479 sctp_m_freem(cookie); 7480 return (-5); 7481 } 7482 chk->copy_by_ref = 0; 7483 chk->send_size = plen; 7484 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 7485 chk->rec.chunk_id.can_take_data = 0; 7486 chk->sent = SCTP_DATAGRAM_UNSENT; 7487 chk->snd_count = 0; 7488 chk->flags = 0; 7489 chk->asoc = &stcb->asoc; 7490 chk->data = cookie; 7491 chk->whoTo = chk->asoc->primary_destination; 7492 atomic_add_int(&chk->whoTo->ref_count, 1); 7493 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 7494 chk->asoc->ctrl_queue_cnt++; 7495 return (0); 7496 } 7497 7498 void 7499 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 7500 struct mbuf *m, 7501 int offset, 7502 int chk_length, 7503 struct sctp_nets *net) 7504 { 7505 /* 7506 * take a HB request and make it into a HB ack and send it. 7507 */ 7508 struct mbuf *outchain; 7509 struct sctp_chunkhdr *chdr; 7510 struct sctp_tmit_chunk *chk; 7511 7512 7513 if (net == NULL) 7514 /* must have a net pointer */ 7515 return; 7516 7517 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT); 7518 if (outchain == NULL) { 7519 /* gak out of memory */ 7520 return; 7521 } 7522 chdr = mtod(outchain, struct sctp_chunkhdr *); 7523 chdr->chunk_type = SCTP_HEARTBEAT_ACK; 7524 chdr->chunk_flags = 0; 7525 if (chk_length % 4) { 7526 /* need pad */ 7527 uint32_t cpthis = 0; 7528 int padlen; 7529 7530 padlen = 4 - (chk_length % 4); 7531 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); 7532 } 7533 sctp_alloc_a_chunk(stcb, chk); 7534 if (chk == NULL) { 7535 /* no memory */ 7536 sctp_m_freem(outchain); 7537 return; 7538 } 7539 chk->copy_by_ref = 0; 7540 chk->send_size = chk_length; 7541 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 7542 chk->rec.chunk_id.can_take_data = 1; 7543 chk->sent = SCTP_DATAGRAM_UNSENT; 7544 chk->snd_count = 0; 7545 chk->flags = 0; 7546 chk->asoc = &stcb->asoc; 7547 chk->data = outchain; 7548 chk->whoTo = net; 7549 atomic_add_int(&chk->whoTo->ref_count, 1); 7550 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7551 chk->asoc->ctrl_queue_cnt++; 7552 } 7553 7554 int 7555 sctp_send_cookie_ack(struct sctp_tcb *stcb) 7556 { 7557 /* formulate and queue a cookie-ack back to sender */ 7558 struct mbuf *cookie_ack; 7559 struct sctp_chunkhdr *hdr; 7560 struct sctp_tmit_chunk *chk; 7561 7562 cookie_ack = NULL; 7563 SCTP_TCB_LOCK_ASSERT(stcb); 7564 7565 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER); 7566 if (cookie_ack == NULL) { 7567 /* no mbuf's */ 7568 return (-1); 7569 } 7570 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 7571 sctp_alloc_a_chunk(stcb, chk); 7572 if (chk == NULL) { 7573 /* no memory */ 7574 sctp_m_freem(cookie_ack); 7575 return (-1); 7576 } 7577 chk->copy_by_ref = 0; 7578 chk->send_size = sizeof(struct sctp_chunkhdr); 7579 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 7580 chk->rec.chunk_id.can_take_data = 1; 7581 chk->sent = SCTP_DATAGRAM_UNSENT; 7582 chk->snd_count = 0; 7583 chk->flags = 0; 7584 chk->asoc = &stcb->asoc; 7585 chk->data = cookie_ack; 7586 if (chk->asoc->last_control_chunk_from != NULL) { 7587 chk->whoTo = chk->asoc->last_control_chunk_from; 7588 } else { 7589 chk->whoTo = chk->asoc->primary_destination; 7590 } 7591 atomic_add_int(&chk->whoTo->ref_count, 1); 7592 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 7593 hdr->chunk_type = SCTP_COOKIE_ACK; 7594 hdr->chunk_flags = 0; 7595 hdr->chunk_length = htons(chk->send_size); 7596 SCTP_BUF_LEN(cookie_ack) = chk->send_size; 7597 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7598 chk->asoc->ctrl_queue_cnt++; 7599 return (0); 7600 } 7601 7602 7603 int 7604 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 7605 { 7606 /* formulate and queue a SHUTDOWN-ACK back to the sender */ 7607 struct mbuf *m_shutdown_ack; 7608 struct sctp_shutdown_ack_chunk *ack_cp; 7609 struct sctp_tmit_chunk *chk; 7610 7611 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 7612 if (m_shutdown_ack == NULL) { 7613 /* no mbuf's */ 7614 return (-1); 7615 } 7616 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 7617 sctp_alloc_a_chunk(stcb, chk); 7618 if (chk == NULL) { 7619 /* no memory */ 7620 sctp_m_freem(m_shutdown_ack); 7621 return (-1); 7622 } 7623 chk->copy_by_ref = 0; 7624 7625 chk->send_size = sizeof(struct sctp_chunkhdr); 7626 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 7627 chk->rec.chunk_id.can_take_data = 1; 7628 chk->sent = SCTP_DATAGRAM_UNSENT; 7629 chk->snd_count = 0; 7630 chk->flags = 0; 7631 chk->asoc = &stcb->asoc; 7632 chk->data = m_shutdown_ack; 7633 chk->whoTo = net; 7634 atomic_add_int(&net->ref_count, 1); 7635 7636 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 7637 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 7638 ack_cp->ch.chunk_flags = 0; 7639 ack_cp->ch.chunk_length = htons(chk->send_size); 7640 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 7641 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7642 chk->asoc->ctrl_queue_cnt++; 7643 return (0); 7644 } 7645 7646 int 7647 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 7648 { 7649 /* formulate and queue a SHUTDOWN to the sender */ 7650 struct mbuf *m_shutdown; 7651 struct sctp_shutdown_chunk *shutdown_cp; 7652 struct sctp_tmit_chunk *chk; 7653 7654 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 7655 if (m_shutdown == NULL) { 7656 /* no mbuf's */ 7657 return (-1); 7658 } 7659 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 7660 sctp_alloc_a_chunk(stcb, chk); 7661 if (chk == NULL) { 7662 /* no memory */ 7663 sctp_m_freem(m_shutdown); 7664 return (-1); 7665 } 7666 chk->copy_by_ref = 0; 7667 chk->send_size = sizeof(struct sctp_shutdown_chunk); 7668 chk->rec.chunk_id.id = SCTP_SHUTDOWN; 7669 chk->rec.chunk_id.can_take_data = 1; 7670 chk->sent = SCTP_DATAGRAM_UNSENT; 7671 chk->snd_count = 0; 7672 chk->flags = 0; 7673 chk->asoc = &stcb->asoc; 7674 chk->data = m_shutdown; 7675 chk->whoTo = net; 7676 atomic_add_int(&net->ref_count, 1); 7677 7678 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 7679 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 7680 shutdown_cp->ch.chunk_flags = 0; 7681 shutdown_cp->ch.chunk_length = htons(chk->send_size); 7682 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 7683 SCTP_BUF_LEN(m_shutdown) = chk->send_size; 7684 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7685 chk->asoc->ctrl_queue_cnt++; 7686 return (0); 7687 } 7688 7689 int 7690 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net) 7691 { 7692 /* 7693 * formulate and queue an ASCONF to the peer ASCONF parameters 7694 * should be queued on the assoc queue 7695 */ 7696 struct sctp_tmit_chunk *chk; 7697 struct mbuf *m_asconf; 7698 struct sctp_asconf_chunk *acp; 7699 int len; 7700 7701 7702 SCTP_TCB_LOCK_ASSERT(stcb); 7703 /* compose an ASCONF chunk, maximum length is PMTU */ 7704 m_asconf = sctp_compose_asconf(stcb, &len); 7705 if (m_asconf == NULL) { 7706 return (-1); 7707 } 7708 acp = mtod(m_asconf, struct sctp_asconf_chunk *); 7709 sctp_alloc_a_chunk(stcb, chk); 7710 if (chk == NULL) { 7711 /* no memory */ 7712 sctp_m_freem(m_asconf); 7713 return (-1); 7714 } 7715 chk->copy_by_ref = 0; 7716 chk->data = m_asconf; 7717 chk->send_size = len; 7718 chk->rec.chunk_id.id = SCTP_ASCONF; 7719 chk->rec.chunk_id.can_take_data = 0; 7720 chk->sent = SCTP_DATAGRAM_UNSENT; 7721 chk->snd_count = 0; 7722 chk->flags = 0; 7723 chk->asoc = &stcb->asoc; 7724 chk->whoTo = chk->asoc->primary_destination; 7725 atomic_add_int(&chk->whoTo->ref_count, 1); 7726 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7727 chk->asoc->ctrl_queue_cnt++; 7728 return (0); 7729 } 7730 7731 int 7732 sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans) 7733 { 7734 /* 7735 * formulate and queue a asconf-ack back to sender the asconf-ack 7736 * must be stored in the tcb 7737 */ 7738 struct sctp_tmit_chunk *chk; 7739 struct mbuf *m_ack, *m; 7740 7741 SCTP_TCB_LOCK_ASSERT(stcb); 7742 /* is there a asconf-ack mbuf chain to send? */ 7743 if (stcb->asoc.last_asconf_ack_sent == NULL) { 7744 return (-1); 7745 } 7746 /* copy the asconf_ack */ 7747 m_ack = SCTP_M_COPYM(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL, M_DONTWAIT); 7748 if (m_ack == NULL) { 7749 /* couldn't copy it */ 7750 7751 return (-1); 7752 } 7753 sctp_alloc_a_chunk(stcb, chk); 7754 if (chk == NULL) { 7755 /* no memory */ 7756 if (m_ack) 7757 sctp_m_freem(m_ack); 7758 return (-1); 7759 } 7760 chk->copy_by_ref = 0; 7761 /* figure out where it goes to */ 7762 if (retrans) { 7763 /* we're doing a retransmission */ 7764 if (stcb->asoc.used_alt_asconfack > 2) { 7765 /* tried alternate nets already, go back */ 7766 chk->whoTo = NULL; 7767 } else { 7768 /* need to try and alternate net */ 7769 chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 7770 stcb->asoc.used_alt_asconfack++; 7771 } 7772 if (chk->whoTo == NULL) { 7773 /* no alternate */ 7774 if (stcb->asoc.last_control_chunk_from == NULL) 7775 chk->whoTo = stcb->asoc.primary_destination; 7776 else 7777 chk->whoTo = stcb->asoc.last_control_chunk_from; 7778 stcb->asoc.used_alt_asconfack = 0; 7779 } 7780 } else { 7781 /* normal case */ 7782 if (stcb->asoc.last_control_chunk_from == NULL) 7783 chk->whoTo = stcb->asoc.primary_destination; 7784 else 7785 chk->whoTo = stcb->asoc.last_control_chunk_from; 7786 stcb->asoc.used_alt_asconfack = 0; 7787 } 7788 chk->data = m_ack; 7789 chk->send_size = 0; 7790 /* Get size */ 7791 m = m_ack; 7792 while (m) { 7793 chk->send_size += SCTP_BUF_LEN(m); 7794 m = SCTP_BUF_NEXT(m); 7795 } 7796 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 7797 chk->rec.chunk_id.can_take_data = 1; 7798 chk->sent = SCTP_DATAGRAM_UNSENT; 7799 chk->snd_count = 0; 7800 chk->flags = 0; 7801 chk->asoc = &stcb->asoc; 7802 atomic_add_int(&chk->whoTo->ref_count, 1); 7803 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7804 chk->asoc->ctrl_queue_cnt++; 7805 return (0); 7806 } 7807 7808 7809 static int 7810 sctp_chunk_retransmission(struct sctp_inpcb *inp, 7811 struct sctp_tcb *stcb, 7812 struct sctp_association *asoc, 7813 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done) 7814 { 7815 /* 7816 * send out one MTU of retransmission. If fast_retransmit is 7817 * happening we ignore the cwnd. Otherwise we obey the cwnd and 7818 * rwnd. For a Cookie or Asconf in the control chunk queue we 7819 * retransmit them by themselves. 7820 * 7821 * For data chunks we will pick out the lowest TSN's in the sent_queue 7822 * marked for resend and bundle them all together (up to a MTU of 7823 * destination). The address to send to should have been 7824 * selected/changed where the retransmission was marked (i.e. in FR 7825 * or t3-timeout routines). 7826 */ 7827 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 7828 struct sctp_tmit_chunk *chk, *fwd; 7829 struct mbuf *m, *endofchain; 7830 struct sctphdr *shdr; 7831 int asconf; 7832 struct sctp_nets *net; 7833 uint32_t tsns_sent = 0; 7834 int no_fragmentflg, bundle_at, cnt_thru; 7835 unsigned int mtu; 7836 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 7837 struct sctp_auth_chunk *auth = NULL; 7838 uint32_t auth_offset = 0; 7839 uint32_t dmtu = 0; 7840 7841 SCTP_TCB_LOCK_ASSERT(stcb); 7842 tmr_started = ctl_cnt = bundle_at = error = 0; 7843 no_fragmentflg = 1; 7844 asconf = 0; 7845 fwd_tsn = 0; 7846 *cnt_out = 0; 7847 fwd = NULL; 7848 endofchain = m = NULL; 7849 #ifdef SCTP_AUDITING_ENABLED 7850 sctp_audit_log(0xC3, 1); 7851 #endif 7852 if (TAILQ_EMPTY(&asoc->sent_queue)) { 7853 #ifdef SCTP_DEBUG 7854 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 7855 printf("SCTP hits empty queue with cnt set to %d?\n", 7856 asoc->sent_queue_retran_cnt); 7857 } 7858 #endif 7859 asoc->sent_queue_cnt = 0; 7860 asoc->sent_queue_cnt_removeable = 0; 7861 } 7862 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7863 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 7864 (chk->rec.chunk_id.id == SCTP_ASCONF) || 7865 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 7866 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 7867 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 7868 if (chk != asoc->str_reset) { 7869 /* 7870 * not eligible for retran if its 7871 * not ours 7872 */ 7873 continue; 7874 } 7875 } 7876 ctl_cnt++; 7877 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 7878 no_fragmentflg = 1; 7879 asconf = 1; 7880 } 7881 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 7882 fwd_tsn = 1; 7883 fwd = chk; 7884 } 7885 /* 7886 * Add an AUTH chunk, if chunk requires it save the 7887 * offset into the chain for AUTH 7888 */ 7889 if ((auth == NULL) && 7890 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7891 stcb->asoc.peer_auth_chunks))) { 7892 m = sctp_add_auth_chunk(m, &endofchain, 7893 &auth, &auth_offset, 7894 stcb, 7895 chk->rec.chunk_id.id); 7896 } 7897 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 7898 break; 7899 } 7900 } 7901 one_chunk = 0; 7902 cnt_thru = 0; 7903 /* do we have control chunks to retransmit? */ 7904 if (m != NULL) { 7905 /* Start a timer no matter if we suceed or fail */ 7906 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 7907 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 7908 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) 7909 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 7910 7911 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 7912 if (m == NULL) { 7913 return (ENOBUFS); 7914 } 7915 shdr = mtod(m, struct sctphdr *); 7916 shdr->src_port = inp->sctp_lport; 7917 shdr->dest_port = stcb->rport; 7918 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 7919 shdr->checksum = 0; 7920 auth_offset += sizeof(struct sctphdr); 7921 chk->snd_count++; /* update our count */ 7922 7923 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 7924 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset, 7925 auth, no_fragmentflg, 0, NULL, asconf))) { 7926 SCTP_STAT_INCR(sctps_lowlevelerr); 7927 return (error); 7928 } 7929 m = endofchain = NULL; 7930 auth = NULL; 7931 auth_offset = 0; 7932 /* 7933 * We don't want to mark the net->sent time here since this 7934 * we use this for HB and retrans cannot measure RTT 7935 */ 7936 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 7937 *cnt_out += 1; 7938 chk->sent = SCTP_DATAGRAM_SENT; 7939 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 7940 if (fwd_tsn == 0) { 7941 return (0); 7942 } else { 7943 /* Clean up the fwd-tsn list */ 7944 sctp_clean_up_ctl(stcb, asoc); 7945 return (0); 7946 } 7947 } 7948 /* 7949 * Ok, it is just data retransmission we need to do or that and a 7950 * fwd-tsn with it all. 7951 */ 7952 if (TAILQ_EMPTY(&asoc->sent_queue)) { 7953 return (-1); 7954 } 7955 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || 7956 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { 7957 /* not yet open, resend the cookie and that is it */ 7958 return (1); 7959 } 7960 #ifdef SCTP_AUDITING_ENABLED 7961 sctp_auditing(20, inp, stcb, NULL); 7962 #endif 7963 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 7964 if (chk->sent != SCTP_DATAGRAM_RESEND) { 7965 /* No, not sent to this net or not ready for rtx */ 7966 continue; 7967 7968 } 7969 /* pick up the net */ 7970 net = chk->whoTo; 7971 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7972 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 7973 } else { 7974 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 7975 } 7976 7977 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 7978 /* No room in peers rwnd */ 7979 uint32_t tsn; 7980 7981 tsn = asoc->last_acked_seq + 1; 7982 if (tsn == chk->rec.data.TSN_seq) { 7983 /* 7984 * we make a special exception for this 7985 * case. The peer has no rwnd but is missing 7986 * the lowest chunk.. which is probably what 7987 * is holding up the rwnd. 7988 */ 7989 goto one_chunk_around; 7990 } 7991 return (1); 7992 } 7993 one_chunk_around: 7994 if (asoc->peers_rwnd < mtu) { 7995 one_chunk = 1; 7996 } 7997 #ifdef SCTP_AUDITING_ENABLED 7998 sctp_audit_log(0xC3, 2); 7999 #endif 8000 bundle_at = 0; 8001 m = NULL; 8002 net->fast_retran_ip = 0; 8003 if (chk->rec.data.doing_fast_retransmit == 0) { 8004 /* 8005 * if no FR in progress skip destination that have 8006 * flight_size > cwnd. 8007 */ 8008 if (net->flight_size >= net->cwnd) { 8009 continue; 8010 } 8011 } else { 8012 /* 8013 * Mark the destination net to have FR recovery 8014 * limits put on it. 8015 */ 8016 *fr_done = 1; 8017 net->fast_retran_ip = 1; 8018 } 8019 8020 /* 8021 * if no AUTH is yet included and this chunk requires it, 8022 * make sure to account for it. We don't apply the size 8023 * until the AUTH chunk is actually added below in case 8024 * there is no room for this chunk. 8025 */ 8026 if ((auth == NULL) && 8027 sctp_auth_is_required_chunk(SCTP_DATA, 8028 stcb->asoc.peer_auth_chunks)) { 8029 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 8030 } else 8031 dmtu = 0; 8032 8033 if ((chk->send_size <= (mtu - dmtu)) || 8034 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 8035 /* ok we will add this one */ 8036 if ((auth == NULL) && 8037 (sctp_auth_is_required_chunk(SCTP_DATA, 8038 stcb->asoc.peer_auth_chunks))) { 8039 m = sctp_add_auth_chunk(m, &endofchain, 8040 &auth, &auth_offset, 8041 stcb, SCTP_DATA); 8042 } 8043 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 8044 if (m == NULL) { 8045 return (ENOMEM); 8046 } 8047 /* Do clear IP_DF ? */ 8048 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8049 no_fragmentflg = 0; 8050 } 8051 /* upate our MTU size */ 8052 if (mtu > (chk->send_size + dmtu)) 8053 mtu -= (chk->send_size + dmtu); 8054 else 8055 mtu = 0; 8056 data_list[bundle_at++] = chk; 8057 if (one_chunk && (asoc->total_flight <= 0)) { 8058 SCTP_STAT_INCR(sctps_windowprobed); 8059 chk->rec.data.state_flags |= SCTP_WINDOW_PROBE; 8060 } 8061 } 8062 if (one_chunk == 0) { 8063 /* 8064 * now are there anymore forward from chk to pick 8065 * up? 8066 */ 8067 fwd = TAILQ_NEXT(chk, sctp_next); 8068 while (fwd) { 8069 if (fwd->sent != SCTP_DATAGRAM_RESEND) { 8070 /* Nope, not for retran */ 8071 fwd = TAILQ_NEXT(fwd, sctp_next); 8072 continue; 8073 } 8074 if (fwd->whoTo != net) { 8075 /* Nope, not the net in question */ 8076 fwd = TAILQ_NEXT(fwd, sctp_next); 8077 continue; 8078 } 8079 if ((auth == NULL) && 8080 sctp_auth_is_required_chunk(SCTP_DATA, 8081 stcb->asoc.peer_auth_chunks)) { 8082 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 8083 } else 8084 dmtu = 0; 8085 if (fwd->send_size <= (mtu - dmtu)) { 8086 if ((auth == NULL) && 8087 (sctp_auth_is_required_chunk(SCTP_DATA, 8088 stcb->asoc.peer_auth_chunks))) { 8089 m = sctp_add_auth_chunk(m, 8090 &endofchain, 8091 &auth, &auth_offset, 8092 stcb, 8093 SCTP_DATA); 8094 } 8095 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 8096 if (m == NULL) { 8097 return (ENOMEM); 8098 } 8099 /* Do clear IP_DF ? */ 8100 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8101 no_fragmentflg = 0; 8102 } 8103 /* upate our MTU size */ 8104 if (mtu > (fwd->send_size + dmtu)) 8105 mtu -= (fwd->send_size + dmtu); 8106 else 8107 mtu = 0; 8108 data_list[bundle_at++] = fwd; 8109 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 8110 break; 8111 } 8112 fwd = TAILQ_NEXT(fwd, sctp_next); 8113 } else { 8114 /* can't fit so we are done */ 8115 break; 8116 } 8117 } 8118 } 8119 /* Is there something to send for this destination? */ 8120 if (m) { 8121 /* 8122 * No matter if we fail/or suceed we should start a 8123 * timer. A failure is like a lost IP packet :-) 8124 */ 8125 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 8126 /* 8127 * no timer running on this destination 8128 * restart it. 8129 */ 8130 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8131 tmr_started = 1; 8132 } 8133 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 8134 if (m == NULL) { 8135 return (ENOBUFS); 8136 } 8137 shdr = mtod(m, struct sctphdr *); 8138 shdr->src_port = inp->sctp_lport; 8139 shdr->dest_port = stcb->rport; 8140 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 8141 shdr->checksum = 0; 8142 auth_offset += sizeof(struct sctphdr); 8143 /* Now lets send it, if there is anything to send :> */ 8144 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 8145 (struct sockaddr *)&net->ro._l_addr, m, auth_offset, 8146 auth, no_fragmentflg, 0, NULL, asconf))) { 8147 /* error, we could not output */ 8148 SCTP_STAT_INCR(sctps_lowlevelerr); 8149 return (error); 8150 } 8151 m = endofchain = NULL; 8152 auth = NULL; 8153 auth_offset = 0; 8154 /* For HB's */ 8155 /* 8156 * We don't want to mark the net->sent time here 8157 * since this we use this for HB and retrans cannot 8158 * measure RTT 8159 */ 8160 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 8161 8162 /* For auto-close */ 8163 cnt_thru++; 8164 if (*now_filled == 0) { 8165 SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 8166 *now = asoc->time_last_sent; 8167 *now_filled = 1; 8168 } else { 8169 asoc->time_last_sent = *now; 8170 } 8171 *cnt_out += bundle_at; 8172 #ifdef SCTP_AUDITING_ENABLED 8173 sctp_audit_log(0xC4, bundle_at); 8174 #endif 8175 if (bundle_at) { 8176 tsns_sent = data_list[0]->rec.data.TSN_seq; 8177 } 8178 for (i = 0; i < bundle_at; i++) { 8179 SCTP_STAT_INCR(sctps_sendretransdata); 8180 data_list[i]->sent = SCTP_DATAGRAM_SENT; 8181 /* 8182 * When we have a revoked data, and we 8183 * retransmit it, then we clear the revoked 8184 * flag since this flag dictates if we 8185 * subtracted from the fs 8186 */ 8187 if (data_list[i]->rec.data.chunk_was_revoked) { 8188 /* Deflate the cwnd */ 8189 data_list[i]->whoTo->cwnd -= data_list[i]->book_size; 8190 data_list[i]->rec.data.chunk_was_revoked = 0; 8191 } 8192 data_list[i]->snd_count++; 8193 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 8194 /* record the time */ 8195 data_list[i]->sent_rcv_time = asoc->time_last_sent; 8196 if (asoc->sent_queue_retran_cnt < 0) { 8197 asoc->sent_queue_retran_cnt = 0; 8198 } 8199 if (data_list[i]->book_size_scale) { 8200 /* 8201 * need to double the book size on 8202 * this one 8203 */ 8204 data_list[i]->book_size_scale = 0; 8205 /* 8206 * Since we double the booksize, we 8207 * must also double the output queue 8208 * size, since this get shrunk when 8209 * we free by this amount. 8210 */ 8211 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); 8212 data_list[i]->book_size *= 2; 8213 8214 8215 } else { 8216 sctp_ucount_incr(asoc->total_flight_count); 8217 #ifdef SCTP_LOG_RWND 8218 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 8219 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 8220 #endif 8221 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 8222 (uint32_t) (data_list[i]->send_size + 8223 sctp_peer_chunk_oh)); 8224 } 8225 #ifdef SCTP_FLIGHT_LOGGING 8226 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 8227 data_list[i]->whoTo->flight_size, 8228 data_list[i]->book_size, 8229 (uintptr_t) stcb, 8230 data_list[i]->rec.data.TSN_seq); 8231 #endif 8232 net->flight_size += data_list[i]->book_size; 8233 asoc->total_flight += data_list[i]->book_size; 8234 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 8235 /* SWS sender side engages */ 8236 asoc->peers_rwnd = 0; 8237 } 8238 if ((i == 0) && 8239 (data_list[i]->rec.data.doing_fast_retransmit)) { 8240 SCTP_STAT_INCR(sctps_sendfastretrans); 8241 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 8242 (tmr_started == 0)) { 8243 /* 8244 * ok we just fast-retrans'd 8245 * the lowest TSN, i.e the 8246 * first on the list. In 8247 * this case we want to give 8248 * some more time to get a 8249 * SACK back without a 8250 * t3-expiring. 8251 */ 8252 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 8253 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); 8254 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8255 } 8256 } 8257 } 8258 #ifdef SCTP_CWND_LOGGING 8259 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 8260 #endif 8261 #ifdef SCTP_AUDITING_ENABLED 8262 sctp_auditing(21, inp, stcb, NULL); 8263 #endif 8264 } else { 8265 /* None will fit */ 8266 return (1); 8267 } 8268 if (asoc->sent_queue_retran_cnt <= 0) { 8269 /* all done we have no more to retran */ 8270 asoc->sent_queue_retran_cnt = 0; 8271 break; 8272 } 8273 if (one_chunk) { 8274 /* No more room in rwnd */ 8275 return (1); 8276 } 8277 /* stop the for loop here. we sent out a packet */ 8278 break; 8279 } 8280 return (0); 8281 } 8282 8283 8284 static int 8285 sctp_timer_validation(struct sctp_inpcb *inp, 8286 struct sctp_tcb *stcb, 8287 struct sctp_association *asoc, 8288 int ret) 8289 { 8290 struct sctp_nets *net; 8291 8292 /* Validate that a timer is running somewhere */ 8293 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 8294 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 8295 /* Here is a timer */ 8296 return (ret); 8297 } 8298 } 8299 SCTP_TCB_LOCK_ASSERT(stcb); 8300 /* Gak, we did not have a timer somewhere */ 8301 #ifdef SCTP_DEBUG 8302 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 8303 printf("Deadlock avoided starting timer on a dest at retran\n"); 8304 } 8305 #endif 8306 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 8307 return (ret); 8308 } 8309 8310 int 8311 sctp_chunk_output(struct sctp_inpcb *inp, 8312 struct sctp_tcb *stcb, 8313 int from_where) 8314 { 8315 /* 8316 * Ok this is the generic chunk service queue. we must do the 8317 * following: - See if there are retransmits pending, if so we must 8318 * do these first and return. - Service the stream queue that is 8319 * next, moving any message (note I must get a complete message i.e. 8320 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 8321 * TSN's - Check to see if the cwnd/rwnd allows any output, if so we 8322 * go ahead and fomulate and send the low level chunks. Making sure 8323 * to combine any control in the control chunk queue also. 8324 */ 8325 struct sctp_association *asoc; 8326 struct sctp_nets *net; 8327 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0, 8328 burst_cnt = 0, burst_limit = 0; 8329 struct timeval now; 8330 int now_filled = 0; 8331 int cwnd_full = 0; 8332 int nagle_on = 0; 8333 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 8334 int un_sent = 0; 8335 int fr_done, tot_frs = 0; 8336 8337 asoc = &stcb->asoc; 8338 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 8339 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 8340 nagle_on = 0; 8341 } else { 8342 nagle_on = 1; 8343 } 8344 } 8345 SCTP_TCB_LOCK_ASSERT(stcb); 8346 8347 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 8348 8349 if ((un_sent <= 0) && 8350 (TAILQ_EMPTY(&asoc->control_send_queue)) && 8351 (asoc->sent_queue_retran_cnt == 0)) { 8352 /* Nothing to do unless there is something to be sent left */ 8353 return (error); 8354 } 8355 /* 8356 * Do we have something to send, data or control AND a sack timer 8357 * running, if so piggy-back the sack. 8358 */ 8359 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 8360 sctp_send_sack(stcb); 8361 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 8362 } 8363 while (asoc->sent_queue_retran_cnt) { 8364 /* 8365 * Ok, it is retransmission time only, we send out only ONE 8366 * packet with a single call off to the retran code. 8367 */ 8368 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 8369 /* 8370 * Special hook for handling cookiess discarded by 8371 * peer that carried data. Send cookie-ack only and 8372 * then the next call with get the retran's. 8373 */ 8374 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 8375 &cwnd_full, from_where, 8376 &now, &now_filled, frag_point); 8377 return (0); 8378 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 8379 /* if its not from a HB then do it */ 8380 fr_done = 0; 8381 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done); 8382 if (fr_done) { 8383 tot_frs++; 8384 } 8385 } else { 8386 /* 8387 * its from any other place, we don't allow retran 8388 * output (only control) 8389 */ 8390 ret = 1; 8391 } 8392 if (ret > 0) { 8393 /* Can't send anymore */ 8394 /* 8395 * now lets push out control by calling med-level 8396 * output once. this assures that we WILL send HB's 8397 * if queued too. 8398 */ 8399 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 8400 &cwnd_full, from_where, 8401 &now, &now_filled, frag_point); 8402 #ifdef SCTP_AUDITING_ENABLED 8403 sctp_auditing(8, inp, stcb, NULL); 8404 #endif 8405 return (sctp_timer_validation(inp, stcb, asoc, ret)); 8406 } 8407 if (ret < 0) { 8408 /* 8409 * The count was off.. retran is not happening so do 8410 * the normal retransmission. 8411 */ 8412 #ifdef SCTP_AUDITING_ENABLED 8413 sctp_auditing(9, inp, stcb, NULL); 8414 #endif 8415 break; 8416 } 8417 if (from_where == SCTP_OUTPUT_FROM_T3) { 8418 /* Only one transmission allowed out of a timeout */ 8419 #ifdef SCTP_AUDITING_ENABLED 8420 sctp_auditing(10, inp, stcb, NULL); 8421 #endif 8422 /* Push out any control */ 8423 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where, 8424 &now, &now_filled, frag_point); 8425 return (ret); 8426 } 8427 if (tot_frs > asoc->max_burst) { 8428 /* Hit FR burst limit */ 8429 return (0); 8430 } 8431 if ((num_out == 0) && (ret == 0)) { 8432 8433 /* No more retrans to send */ 8434 break; 8435 } 8436 } 8437 #ifdef SCTP_AUDITING_ENABLED 8438 sctp_auditing(12, inp, stcb, NULL); 8439 #endif 8440 /* Check for bad destinations, if they exist move chunks around. */ 8441 burst_limit = asoc->max_burst; 8442 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 8443 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 8444 SCTP_ADDR_NOT_REACHABLE) { 8445 /* 8446 * if possible move things off of this address we 8447 * still may send below due to the dormant state but 8448 * we try to find an alternate address to send to 8449 * and if we have one we move all queued data on the 8450 * out wheel to this alternate address. 8451 */ 8452 if (net->ref_count > 1) 8453 sctp_move_to_an_alt(stcb, asoc, net); 8454 } else { 8455 /* 8456 * if ((asoc->sat_network) || (net->addr_is_local)) 8457 * { burst_limit = asoc->max_burst * 8458 * SCTP_SAT_NETWORK_BURST_INCR; } 8459 */ 8460 if (sctp_use_cwnd_based_maxburst) { 8461 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) { 8462 int old_cwnd; 8463 8464 if (net->ssthresh < net->cwnd) 8465 net->ssthresh = net->cwnd; 8466 old_cwnd = net->cwnd; 8467 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 8468 8469 #ifdef SCTP_CWND_MONITOR 8470 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 8471 #endif 8472 8473 #ifdef SCTP_LOG_MAXBURST 8474 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED); 8475 #endif 8476 SCTP_STAT_INCR(sctps_maxburstqueued); 8477 } 8478 net->fast_retran_ip = 0; 8479 } else { 8480 if (net->flight_size == 0) { 8481 /* Should be decaying the cwnd here */ 8482 ; 8483 } 8484 } 8485 } 8486 8487 } 8488 burst_cnt = 0; 8489 cwnd_full = 0; 8490 do { 8491 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 8492 &reason_code, 0, &cwnd_full, from_where, 8493 &now, &now_filled, frag_point); 8494 if (error) { 8495 #ifdef SCTP_DEBUG 8496 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 8497 printf("Error %d was returned from med-c-op\n", error); 8498 } 8499 #endif 8500 #ifdef SCTP_LOG_MAXBURST 8501 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 8502 #endif 8503 #ifdef SCTP_CWND_LOGGING 8504 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 8505 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 8506 #endif 8507 8508 break; 8509 } 8510 #ifdef SCTP_DEBUG 8511 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 8512 printf("m-c-o put out %d\n", num_out); 8513 } 8514 #endif 8515 tot_out += num_out; 8516 burst_cnt++; 8517 #ifdef SCTP_CWND_LOGGING 8518 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 8519 if (num_out == 0) { 8520 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 8521 } 8522 #endif 8523 if (nagle_on) { 8524 /* 8525 * When nagle is on, we look at how much is un_sent, 8526 * then if its smaller than an MTU and we have data 8527 * in flight we stop. 8528 */ 8529 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 8530 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) 8531 * sizeof(struct sctp_data_chunk))); 8532 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 8533 (stcb->asoc.total_flight > 0)) { 8534 break; 8535 } 8536 } 8537 if (TAILQ_EMPTY(&asoc->control_send_queue) && 8538 TAILQ_EMPTY(&asoc->send_queue) && 8539 TAILQ_EMPTY(&asoc->out_wheel)) { 8540 /* Nothing left to send */ 8541 break; 8542 } 8543 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 8544 /* Nothing left to send */ 8545 break; 8546 } 8547 } while (num_out && (sctp_use_cwnd_based_maxburst || 8548 (burst_cnt < burst_limit))); 8549 8550 if (sctp_use_cwnd_based_maxburst == 0) { 8551 if (burst_cnt >= burst_limit) { 8552 SCTP_STAT_INCR(sctps_maxburstqueued); 8553 asoc->burst_limit_applied = 1; 8554 #ifdef SCTP_LOG_MAXBURST 8555 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 8556 #endif 8557 } else { 8558 asoc->burst_limit_applied = 0; 8559 } 8560 } 8561 #ifdef SCTP_CWND_LOGGING 8562 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 8563 #endif 8564 #ifdef SCTP_DEBUG 8565 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 8566 printf("Ok, we have put out %d chunks\n", tot_out); 8567 } 8568 #endif 8569 /* 8570 * Now we need to clean up the control chunk chain if a ECNE is on 8571 * it. It must be marked as UNSENT again so next call will continue 8572 * to send it until such time that we get a CWR, to remove it. 8573 */ 8574 if (stcb->asoc.ecn_echo_cnt_onq) 8575 sctp_fix_ecn_echo(asoc); 8576 return (error); 8577 } 8578 8579 8580 int 8581 sctp_output(inp, m, addr, control, p, flags) 8582 struct sctp_inpcb *inp; 8583 struct mbuf *m; 8584 struct sockaddr *addr; 8585 struct mbuf *control; 8586 8587 struct thread *p; 8588 int flags; 8589 { 8590 if (inp == NULL) { 8591 return (EINVAL); 8592 } 8593 if (inp->sctp_socket == NULL) { 8594 return (EINVAL); 8595 } 8596 return (sctp_sosend(inp->sctp_socket, 8597 addr, 8598 (struct uio *)NULL, 8599 m, 8600 control, 8601 flags, 8602 p)); 8603 } 8604 8605 void 8606 send_forward_tsn(struct sctp_tcb *stcb, 8607 struct sctp_association *asoc) 8608 { 8609 struct sctp_tmit_chunk *chk; 8610 struct sctp_forward_tsn_chunk *fwdtsn; 8611 8612 SCTP_TCB_LOCK_ASSERT(stcb); 8613 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8614 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 8615 /* mark it to unsent */ 8616 chk->sent = SCTP_DATAGRAM_UNSENT; 8617 chk->snd_count = 0; 8618 /* Do we correct its output location? */ 8619 if (chk->whoTo != asoc->primary_destination) { 8620 sctp_free_remote_addr(chk->whoTo); 8621 chk->whoTo = asoc->primary_destination; 8622 atomic_add_int(&chk->whoTo->ref_count, 1); 8623 } 8624 goto sctp_fill_in_rest; 8625 } 8626 } 8627 /* Ok if we reach here we must build one */ 8628 sctp_alloc_a_chunk(stcb, chk); 8629 if (chk == NULL) { 8630 return; 8631 } 8632 chk->copy_by_ref = 0; 8633 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 8634 chk->rec.chunk_id.can_take_data = 0; 8635 chk->asoc = asoc; 8636 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 8637 if (chk->data == NULL) { 8638 atomic_subtract_int(&chk->whoTo->ref_count, 1); 8639 sctp_free_a_chunk(stcb, chk); 8640 return; 8641 } 8642 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8643 chk->sent = SCTP_DATAGRAM_UNSENT; 8644 chk->snd_count = 0; 8645 chk->whoTo = asoc->primary_destination; 8646 atomic_add_int(&chk->whoTo->ref_count, 1); 8647 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 8648 asoc->ctrl_queue_cnt++; 8649 sctp_fill_in_rest: 8650 /* 8651 * Here we go through and fill out the part that deals with 8652 * stream/seq of the ones we skip. 8653 */ 8654 SCTP_BUF_LEN(chk->data) = 0; 8655 { 8656 struct sctp_tmit_chunk *at, *tp1, *last; 8657 struct sctp_strseq *strseq; 8658 unsigned int cnt_of_space, i, ovh; 8659 unsigned int space_needed; 8660 unsigned int cnt_of_skipped = 0; 8661 8662 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 8663 if (at->sent != SCTP_FORWARD_TSN_SKIP) { 8664 /* no more to look at */ 8665 break; 8666 } 8667 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 8668 /* We don't report these */ 8669 continue; 8670 } 8671 cnt_of_skipped++; 8672 } 8673 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 8674 (cnt_of_skipped * sizeof(struct sctp_strseq))); 8675 8676 cnt_of_space = M_TRAILINGSPACE(chk->data); 8677 8678 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 8679 ovh = SCTP_MIN_OVERHEAD; 8680 } else { 8681 ovh = SCTP_MIN_V4_OVERHEAD; 8682 } 8683 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 8684 /* trim to a mtu size */ 8685 cnt_of_space = asoc->smallest_mtu - ovh; 8686 } 8687 if (cnt_of_space < space_needed) { 8688 /* 8689 * ok we must trim down the chunk by lowering the 8690 * advance peer ack point. 8691 */ 8692 cnt_of_skipped = (cnt_of_space - 8693 ((sizeof(struct sctp_forward_tsn_chunk)) / 8694 sizeof(struct sctp_strseq))); 8695 /* 8696 * Go through and find the TSN that will be the one 8697 * we report. 8698 */ 8699 at = TAILQ_FIRST(&asoc->sent_queue); 8700 for (i = 0; i < cnt_of_skipped; i++) { 8701 tp1 = TAILQ_NEXT(at, sctp_next); 8702 at = tp1; 8703 } 8704 last = at; 8705 /* 8706 * last now points to last one I can report, update 8707 * peer ack point 8708 */ 8709 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq; 8710 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq)); 8711 } 8712 chk->send_size = space_needed; 8713 /* Setup the chunk */ 8714 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 8715 fwdtsn->ch.chunk_length = htons(chk->send_size); 8716 fwdtsn->ch.chunk_flags = 0; 8717 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 8718 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point); 8719 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) + 8720 (cnt_of_skipped * sizeof(struct sctp_strseq))); 8721 SCTP_BUF_LEN(chk->data) = chk->send_size; 8722 fwdtsn++; 8723 /* 8724 * Move pointer to after the fwdtsn and transfer to the 8725 * strseq pointer. 8726 */ 8727 strseq = (struct sctp_strseq *)fwdtsn; 8728 /* 8729 * Now populate the strseq list. This is done blindly 8730 * without pulling out duplicate stream info. This is 8731 * inefficent but won't harm the process since the peer will 8732 * look at these in sequence and will thus release anything. 8733 * It could mean we exceed the PMTU and chop off some that 8734 * we could have included.. but this is unlikely (aka 1432/4 8735 * would mean 300+ stream seq's would have to be reported in 8736 * one FWD-TSN. With a bit of work we can later FIX this to 8737 * optimize and pull out duplcates.. but it does add more 8738 * overhead. So for now... not! 8739 */ 8740 at = TAILQ_FIRST(&asoc->sent_queue); 8741 for (i = 0; i < cnt_of_skipped; i++) { 8742 tp1 = TAILQ_NEXT(at, sctp_next); 8743 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 8744 /* We don't report these */ 8745 i--; 8746 at = tp1; 8747 continue; 8748 } 8749 strseq->stream = ntohs(at->rec.data.stream_number); 8750 strseq->sequence = ntohs(at->rec.data.stream_seq); 8751 strseq++; 8752 at = tp1; 8753 } 8754 } 8755 return; 8756 8757 } 8758 8759 void 8760 sctp_send_sack(struct sctp_tcb *stcb) 8761 { 8762 /* 8763 * Queue up a SACK in the control queue. We must first check to see 8764 * if a SACK is somehow on the control queue. If so, we will take 8765 * and and remove the old one. 8766 */ 8767 struct sctp_association *asoc; 8768 struct sctp_tmit_chunk *chk, *a_chk; 8769 struct sctp_sack_chunk *sack; 8770 struct sctp_gap_ack_block *gap_descriptor; 8771 struct sack_track *selector; 8772 int mergeable = 0; 8773 int offset; 8774 caddr_t limit; 8775 uint32_t *dup; 8776 int limit_reached = 0; 8777 unsigned int i, jstart, siz, j; 8778 unsigned int num_gap_blocks = 0, space; 8779 int num_dups = 0; 8780 int space_req; 8781 8782 8783 a_chk = NULL; 8784 asoc = &stcb->asoc; 8785 SCTP_TCB_LOCK_ASSERT(stcb); 8786 if (asoc->last_data_chunk_from == NULL) { 8787 /* Hmm we never received anything */ 8788 return; 8789 } 8790 sctp_set_rwnd(stcb, asoc); 8791 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8792 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 8793 /* Hmm, found a sack already on queue, remove it */ 8794 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 8795 asoc->ctrl_queue_cnt++; 8796 a_chk = chk; 8797 if (a_chk->data) { 8798 sctp_m_freem(a_chk->data); 8799 a_chk->data = NULL; 8800 } 8801 sctp_free_remote_addr(a_chk->whoTo); 8802 a_chk->whoTo = NULL; 8803 break; 8804 } 8805 } 8806 if (a_chk == NULL) { 8807 sctp_alloc_a_chunk(stcb, a_chk); 8808 if (a_chk == NULL) { 8809 /* No memory so we drop the idea, and set a timer */ 8810 if (stcb->asoc.delayed_ack) { 8811 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 8812 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 8813 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 8814 stcb->sctp_ep, stcb, NULL); 8815 } else { 8816 stcb->asoc.send_sack = 1; 8817 } 8818 return; 8819 } 8820 a_chk->copy_by_ref = 0; 8821 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */ 8822 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; 8823 a_chk->rec.chunk_id.can_take_data = 1; 8824 } 8825 /* Clear our pkt counts */ 8826 asoc->data_pkts_seen = 0; 8827 8828 a_chk->asoc = asoc; 8829 a_chk->snd_count = 0; 8830 a_chk->send_size = 0; /* fill in later */ 8831 a_chk->sent = SCTP_DATAGRAM_UNSENT; 8832 8833 if ((asoc->numduptsns) || 8834 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE) 8835 ) { 8836 /* 8837 * Ok, we have some duplicates or the destination for the 8838 * sack is unreachable, lets see if we can select an 8839 * alternate than asoc->last_data_chunk_from 8840 */ 8841 if ((!(asoc->last_data_chunk_from->dest_state & 8842 SCTP_ADDR_NOT_REACHABLE)) && 8843 (asoc->used_alt_onsack > asoc->numnets)) { 8844 /* We used an alt last time, don't this time */ 8845 a_chk->whoTo = NULL; 8846 } else { 8847 asoc->used_alt_onsack++; 8848 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 8849 } 8850 if (a_chk->whoTo == NULL) { 8851 /* Nope, no alternate */ 8852 a_chk->whoTo = asoc->last_data_chunk_from; 8853 asoc->used_alt_onsack = 0; 8854 } 8855 } else { 8856 /* 8857 * No duplicates so we use the last place we received data 8858 * from. 8859 */ 8860 asoc->used_alt_onsack = 0; 8861 a_chk->whoTo = asoc->last_data_chunk_from; 8862 } 8863 if (a_chk->whoTo) { 8864 atomic_add_int(&a_chk->whoTo->ref_count, 1); 8865 } 8866 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) { 8867 /* no gaps */ 8868 space_req = sizeof(struct sctp_sack_chunk); 8869 } else { 8870 /* gaps get a cluster */ 8871 space_req = MCLBYTES; 8872 } 8873 /* Ok now lets formulate a MBUF with our sack */ 8874 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA); 8875 if ((a_chk->data == NULL) || 8876 (a_chk->whoTo == NULL)) { 8877 /* rats, no mbuf memory */ 8878 if (a_chk->data) { 8879 /* was a problem with the destination */ 8880 sctp_m_freem(a_chk->data); 8881 a_chk->data = NULL; 8882 } 8883 if (a_chk->whoTo) 8884 atomic_subtract_int(&a_chk->whoTo->ref_count, 1); 8885 sctp_free_a_chunk(stcb, a_chk); 8886 if (stcb->asoc.delayed_ack) { 8887 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 8888 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 8889 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 8890 stcb->sctp_ep, stcb, NULL); 8891 } else { 8892 stcb->asoc.send_sack = 1; 8893 } 8894 return; 8895 } 8896 /* ok, lets go through and fill it in */ 8897 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 8898 space = M_TRAILINGSPACE(a_chk->data); 8899 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 8900 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 8901 } 8902 limit = mtod(a_chk->data, caddr_t); 8903 limit += space; 8904 8905 sack = mtod(a_chk->data, struct sctp_sack_chunk *); 8906 sack->ch.chunk_type = SCTP_SELECTIVE_ACK; 8907 /* 0x01 is used by nonce for ecn */ 8908 if ((sctp_ecn_enable) && 8909 (sctp_ecn_nonce) && 8910 (asoc->peer_supports_ecn_nonce)) 8911 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM); 8912 else 8913 sack->ch.chunk_flags = 0; 8914 8915 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 8916 /* 8917 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 8918 * received, then set high bit to 1, else 0. Reset 8919 * pkts_rcvd. 8920 */ 8921 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6); 8922 asoc->cmt_dac_pkts_rcvd = 0; 8923 } 8924 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 8925 sack->sack.a_rwnd = htonl(asoc->my_rwnd); 8926 asoc->my_last_reported_rwnd = asoc->my_rwnd; 8927 8928 /* reset the readers interpretation */ 8929 stcb->freed_by_sorcv_sincelast = 0; 8930 8931 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 8932 8933 8934 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 8935 if (asoc->cumulative_tsn < asoc->mapping_array_base_tsn) { 8936 offset = 1; 8937 /* 8938 * cum-ack behind the mapping array, so we start and use all 8939 * entries. 8940 */ 8941 jstart = 0; 8942 } else { 8943 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 8944 /* 8945 * we skip the first one when the cum-ack is at or above the 8946 * mapping array base. 8947 */ 8948 jstart = 1; 8949 } 8950 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) { 8951 /* we have a gap .. maybe */ 8952 for (i = 0; i < siz; i++) { 8953 selector = &sack_array[asoc->mapping_array[i]]; 8954 if (mergeable && selector->right_edge) { 8955 /* 8956 * Backup, left and right edges were ok to 8957 * merge. 8958 */ 8959 num_gap_blocks--; 8960 gap_descriptor--; 8961 } 8962 if (selector->num_entries == 0) 8963 mergeable = 0; 8964 else { 8965 for (j = jstart; j < selector->num_entries; j++) { 8966 if (mergeable && selector->right_edge) { 8967 /* 8968 * do a merge by NOT setting 8969 * the left side 8970 */ 8971 mergeable = 0; 8972 } else { 8973 /* 8974 * no merge, set the left 8975 * side 8976 */ 8977 mergeable = 0; 8978 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 8979 } 8980 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 8981 num_gap_blocks++; 8982 gap_descriptor++; 8983 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 8984 /* no more room */ 8985 limit_reached = 1; 8986 break; 8987 } 8988 } 8989 if (selector->left_edge) { 8990 mergeable = 1; 8991 } 8992 } 8993 if (limit_reached) { 8994 /* Reached the limit stop */ 8995 break; 8996 } 8997 jstart = 0; 8998 offset += 8; 8999 } 9000 if (num_gap_blocks == 0) { 9001 /* reneged all chunks */ 9002 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 9003 } 9004 } 9005 /* now we must add any dups we are going to report. */ 9006 if ((limit_reached == 0) && (asoc->numduptsns)) { 9007 dup = (uint32_t *) gap_descriptor; 9008 for (i = 0; i < asoc->numduptsns; i++) { 9009 *dup = htonl(asoc->dup_tsns[i]); 9010 dup++; 9011 num_dups++; 9012 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 9013 /* no more room */ 9014 break; 9015 } 9016 } 9017 asoc->numduptsns = 0; 9018 } 9019 /* 9020 * now that the chunk is prepared queue it to the control chunk 9021 * queue. 9022 */ 9023 a_chk->send_size = (sizeof(struct sctp_sack_chunk) + 9024 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) + 9025 (num_dups * sizeof(int32_t))); 9026 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 9027 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 9028 sack->sack.num_dup_tsns = htons(num_dups); 9029 sack->ch.chunk_length = htons(a_chk->send_size); 9030 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 9031 asoc->ctrl_queue_cnt++; 9032 asoc->send_sack = 0; 9033 SCTP_STAT_INCR(sctps_sendsacks); 9034 return; 9035 } 9036 9037 9038 void 9039 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr) 9040 { 9041 struct mbuf *m_abort; 9042 struct mbuf *m_out = NULL, *m_end = NULL; 9043 struct sctp_abort_chunk *abort = NULL; 9044 int sz; 9045 uint32_t auth_offset = 0; 9046 struct sctp_auth_chunk *auth = NULL; 9047 struct sctphdr *shdr; 9048 9049 /* 9050 * Add an AUTH chunk, if chunk requires it and save the offset into 9051 * the chain for AUTH 9052 */ 9053 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 9054 stcb->asoc.peer_auth_chunks)) { 9055 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset, 9056 stcb, SCTP_ABORT_ASSOCIATION); 9057 } 9058 SCTP_TCB_LOCK_ASSERT(stcb); 9059 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 9060 if (m_abort == NULL) { 9061 /* no mbuf's */ 9062 if (m_out) 9063 sctp_m_freem(m_out); 9064 return; 9065 } 9066 /* link in any error */ 9067 SCTP_BUF_NEXT(m_abort) = operr; 9068 sz = 0; 9069 if (operr) { 9070 struct mbuf *n; 9071 9072 n = operr; 9073 while (n) { 9074 sz += SCTP_BUF_LEN(n); 9075 n = SCTP_BUF_NEXT(n); 9076 } 9077 } 9078 SCTP_BUF_LEN(m_abort) = sizeof(*abort); 9079 if (m_out == NULL) { 9080 /* NO Auth chunk prepended, so reserve space in front */ 9081 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 9082 m_out = m_abort; 9083 } else { 9084 /* Put AUTH chunk at the front of the chain */ 9085 SCTP_BUF_NEXT(m_end) = m_abort; 9086 } 9087 9088 /* fill in the ABORT chunk */ 9089 abort = mtod(m_abort, struct sctp_abort_chunk *); 9090 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 9091 abort->ch.chunk_flags = 0; 9092 abort->ch.chunk_length = htons(sizeof(*abort) + sz); 9093 9094 /* prepend and fill in the SCTP header */ 9095 SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT); 9096 if (m_out == NULL) { 9097 /* TSNH: no memory */ 9098 return; 9099 } 9100 shdr = mtod(m_out, struct sctphdr *); 9101 shdr->src_port = stcb->sctp_ep->sctp_lport; 9102 shdr->dest_port = stcb->rport; 9103 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 9104 shdr->checksum = 0; 9105 auth_offset += sizeof(struct sctphdr); 9106 9107 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, 9108 stcb->asoc.primary_destination, 9109 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr, 9110 m_out, auth_offset, auth, 1, 0, NULL, 0); 9111 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9112 } 9113 9114 int 9115 sctp_send_shutdown_complete(struct sctp_tcb *stcb, 9116 struct sctp_nets *net) 9117 { 9118 /* formulate and SEND a SHUTDOWN-COMPLETE */ 9119 struct mbuf *m_shutdown_comp; 9120 struct sctp_shutdown_complete_msg *comp_cp; 9121 9122 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 0, M_DONTWAIT, 1, MT_HEADER); 9123 if (m_shutdown_comp == NULL) { 9124 /* no mbuf's */ 9125 return (-1); 9126 } 9127 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *); 9128 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 9129 comp_cp->shut_cmp.ch.chunk_flags = 0; 9130 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 9131 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport; 9132 comp_cp->sh.dest_port = stcb->rport; 9133 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag); 9134 comp_cp->sh.checksum = 0; 9135 9136 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg); 9137 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 9138 (struct sockaddr *)&net->ro._l_addr, 9139 m_shutdown_comp, 0, NULL, 1, 0, NULL, 0); 9140 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9141 return (0); 9142 } 9143 9144 int 9145 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh) 9146 { 9147 /* formulate and SEND a SHUTDOWN-COMPLETE */ 9148 struct mbuf *o_pak; 9149 struct mbuf *mout; 9150 struct ip *iph, *iph_out; 9151 struct ip6_hdr *ip6, *ip6_out; 9152 int offset_out, len; 9153 struct sctp_shutdown_complete_msg *comp_cp; 9154 9155 /* Get room for the largest message */ 9156 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg)); 9157 9158 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(len); 9159 if (o_pak == NULL) { 9160 /* no mbuf's */ 9161 return (-1); 9162 } 9163 mout = SCTP_HEADER_TO_CHAIN(o_pak); 9164 iph = mtod(m, struct ip *); 9165 iph_out = NULL; 9166 ip6_out = NULL; 9167 offset_out = 0; 9168 if (iph->ip_v == IPVERSION) { 9169 SCTP_BUF_LEN(mout) = sizeof(struct ip) + 9170 sizeof(struct sctp_shutdown_complete_msg); 9171 SCTP_BUF_NEXT(mout) = NULL; 9172 iph_out = mtod(mout, struct ip *); 9173 9174 /* Fill in the IP header for the ABORT */ 9175 iph_out->ip_v = IPVERSION; 9176 iph_out->ip_hl = (sizeof(struct ip) / 4); 9177 iph_out->ip_tos = (u_char)0; 9178 iph_out->ip_id = 0; 9179 iph_out->ip_off = 0; 9180 iph_out->ip_ttl = MAXTTL; 9181 iph_out->ip_p = IPPROTO_SCTP; 9182 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 9183 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 9184 9185 /* let IP layer calculate this */ 9186 iph_out->ip_sum = 0; 9187 offset_out += sizeof(*iph_out); 9188 comp_cp = (struct sctp_shutdown_complete_msg *)( 9189 (caddr_t)iph_out + offset_out); 9190 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 9191 ip6 = (struct ip6_hdr *)iph; 9192 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr) + 9193 sizeof(struct sctp_shutdown_complete_msg); 9194 SCTP_BUF_NEXT(mout) = NULL; 9195 ip6_out = mtod(mout, struct ip6_hdr *); 9196 9197 /* Fill in the IPv6 header for the ABORT */ 9198 ip6_out->ip6_flow = ip6->ip6_flow; 9199 ip6_out->ip6_hlim = ip6_defhlim; 9200 ip6_out->ip6_nxt = IPPROTO_SCTP; 9201 ip6_out->ip6_src = ip6->ip6_dst; 9202 ip6_out->ip6_dst = ip6->ip6_src; 9203 /* 9204 * ?? The old code had both the iph len + payload, I think 9205 * this is wrong and would never have worked 9206 */ 9207 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg); 9208 offset_out += sizeof(*ip6_out); 9209 comp_cp = (struct sctp_shutdown_complete_msg *)( 9210 (caddr_t)ip6_out + offset_out); 9211 } else { 9212 /* Currently not supported. */ 9213 return (-1); 9214 } 9215 9216 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout); 9217 /* Now copy in and fill in the ABORT tags etc. */ 9218 comp_cp->sh.src_port = sh->dest_port; 9219 comp_cp->sh.dest_port = sh->src_port; 9220 comp_cp->sh.checksum = 0; 9221 comp_cp->sh.v_tag = sh->v_tag; 9222 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB; 9223 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 9224 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 9225 9226 /* add checksum */ 9227 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(o_pak)) { 9228 comp_cp->sh.checksum = 0; 9229 } else { 9230 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out); 9231 } 9232 if (iph_out != NULL) { 9233 struct route ro; 9234 9235 bzero(&ro, sizeof ro); 9236 /* set IPv4 length */ 9237 iph_out->ip_len = SCTP_HEADER_LEN(o_pak); 9238 /* out it goes */ 9239 ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 9240 ,NULL 9241 ); 9242 /* Free the route if we got one back */ 9243 if (ro.ro_rt) 9244 RTFREE(ro.ro_rt); 9245 } else if (ip6_out != NULL) { 9246 struct route_in6 ro; 9247 9248 bzero(&ro, sizeof(ro)); 9249 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 9250 ,NULL 9251 ); 9252 /* Free the route if we got one back */ 9253 if (ro.ro_rt) 9254 RTFREE(ro.ro_rt); 9255 } 9256 SCTP_STAT_INCR(sctps_sendpackets); 9257 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 9258 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9259 return (0); 9260 } 9261 9262 static struct sctp_nets * 9263 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now) 9264 { 9265 struct sctp_nets *net, *hnet; 9266 int ms_goneby, highest_ms, state_overide = 0; 9267 9268 SCTP_GETTIME_TIMEVAL(now); 9269 highest_ms = 0; 9270 hnet = NULL; 9271 SCTP_TCB_LOCK_ASSERT(stcb); 9272 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 9273 if ( 9274 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) || 9275 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE) 9276 ) { 9277 /* 9278 * Skip this guy from consideration if HB is off AND 9279 * its confirmed 9280 */ 9281 continue; 9282 } 9283 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) { 9284 /* skip this dest net from consideration */ 9285 continue; 9286 } 9287 if (net->last_sent_time.tv_sec) { 9288 /* Sent to so we subtract */ 9289 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000; 9290 } else 9291 /* Never been sent to */ 9292 ms_goneby = 0x7fffffff; 9293 /* 9294 * When the address state is unconfirmed but still 9295 * considered reachable, we HB at a higher rate. Once it 9296 * goes confirmed OR reaches the "unreachable" state, thenw 9297 * we cut it back to HB at a more normal pace. 9298 */ 9299 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) { 9300 state_overide = 1; 9301 } else { 9302 state_overide = 0; 9303 } 9304 9305 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) && 9306 (ms_goneby > highest_ms)) { 9307 highest_ms = ms_goneby; 9308 hnet = net; 9309 } 9310 } 9311 if (hnet && 9312 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) { 9313 state_overide = 1; 9314 } else { 9315 state_overide = 0; 9316 } 9317 9318 if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) { 9319 /* 9320 * Found the one with longest delay bounds OR it is 9321 * unconfirmed and still not marked unreachable. 9322 */ 9323 #ifdef SCTP_DEBUG 9324 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 9325 printf("net:%p is the hb winner -", 9326 hnet); 9327 if (hnet) 9328 sctp_print_address((struct sockaddr *)&hnet->ro._l_addr); 9329 else 9330 printf(" none\n"); 9331 } 9332 #endif 9333 /* update the timer now */ 9334 hnet->last_sent_time = *now; 9335 return (hnet); 9336 } 9337 /* Nothing to HB */ 9338 return (NULL); 9339 } 9340 9341 int 9342 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net) 9343 { 9344 struct sctp_tmit_chunk *chk; 9345 struct sctp_nets *net; 9346 struct sctp_heartbeat_chunk *hb; 9347 struct timeval now; 9348 struct sockaddr_in *sin; 9349 struct sockaddr_in6 *sin6; 9350 9351 SCTP_TCB_LOCK_ASSERT(stcb); 9352 if (user_req == 0) { 9353 net = sctp_select_hb_destination(stcb, &now); 9354 if (net == NULL) { 9355 /* 9356 * All our busy none to send to, just start the 9357 * timer again. 9358 */ 9359 if (stcb->asoc.state == 0) { 9360 return (0); 9361 } 9362 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, 9363 stcb->sctp_ep, 9364 stcb, 9365 net); 9366 return (0); 9367 } 9368 } else { 9369 net = u_net; 9370 if (net == NULL) { 9371 return (0); 9372 } 9373 SCTP_GETTIME_TIMEVAL(&now); 9374 } 9375 sin = (struct sockaddr_in *)&net->ro._l_addr; 9376 if (sin->sin_family != AF_INET) { 9377 if (sin->sin_family != AF_INET6) { 9378 /* huh */ 9379 return (0); 9380 } 9381 } 9382 sctp_alloc_a_chunk(stcb, chk); 9383 if (chk == NULL) { 9384 #ifdef SCTP_DEBUG 9385 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 9386 printf("Gak, can't get a chunk for hb\n"); 9387 } 9388 #endif 9389 return (0); 9390 } 9391 chk->copy_by_ref = 0; 9392 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 9393 chk->rec.chunk_id.can_take_data = 1; 9394 chk->asoc = &stcb->asoc; 9395 chk->send_size = sizeof(struct sctp_heartbeat_chunk); 9396 9397 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 9398 if (chk->data == NULL) { 9399 sctp_free_a_chunk(stcb, chk); 9400 return (0); 9401 } 9402 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9403 SCTP_BUF_LEN(chk->data) = chk->send_size; 9404 chk->sent = SCTP_DATAGRAM_UNSENT; 9405 chk->snd_count = 0; 9406 chk->whoTo = net; 9407 atomic_add_int(&chk->whoTo->ref_count, 1); 9408 /* Now we have a mbuf that we can fill in with the details */ 9409 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 9410 9411 /* fill out chunk header */ 9412 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 9413 hb->ch.chunk_flags = 0; 9414 hb->ch.chunk_length = htons(chk->send_size); 9415 /* Fill out hb parameter */ 9416 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 9417 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 9418 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 9419 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 9420 /* Did our user request this one, put it in */ 9421 hb->heartbeat.hb_info.user_req = user_req; 9422 hb->heartbeat.hb_info.addr_family = sin->sin_family; 9423 hb->heartbeat.hb_info.addr_len = sin->sin_len; 9424 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 9425 /* 9426 * we only take from the entropy pool if the address is not 9427 * confirmed. 9428 */ 9429 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 9430 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 9431 } else { 9432 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 9433 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 9434 } 9435 if (sin->sin_family == AF_INET) { 9436 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr)); 9437 } else if (sin->sin_family == AF_INET6) { 9438 /* We leave the scope the way it is in our lookup table. */ 9439 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 9440 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr)); 9441 } else { 9442 /* huh compiler bug */ 9443 return (0); 9444 } 9445 /* ok we have a destination that needs a beat */ 9446 /* lets do the theshold management Qiaobing style */ 9447 9448 if (sctp_threshold_management(stcb->sctp_ep, stcb, net, 9449 stcb->asoc.max_send_times)) { 9450 /* 9451 * we have lost the association, in a way this is quite bad 9452 * since we really are one less time since we really did not 9453 * send yet. This is the down side to the Q's style as 9454 * defined in the RFC and not my alternate style defined in 9455 * the RFC. 9456 */ 9457 atomic_subtract_int(&chk->whoTo->ref_count, 1); 9458 if (chk->data != NULL) { 9459 sctp_m_freem(chk->data); 9460 chk->data = NULL; 9461 } 9462 sctp_free_a_chunk(stcb, chk); 9463 return (-1); 9464 } 9465 net->hb_responded = 0; 9466 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9467 stcb->asoc.ctrl_queue_cnt++; 9468 SCTP_STAT_INCR(sctps_sendheartbeat); 9469 /* 9470 * Call directly med level routine to put out the chunk. It will 9471 * always tumble out control chunks aka HB but it may even tumble 9472 * out data too. 9473 */ 9474 return (1); 9475 } 9476 9477 void 9478 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 9479 uint32_t high_tsn) 9480 { 9481 struct sctp_association *asoc; 9482 struct sctp_ecne_chunk *ecne; 9483 struct sctp_tmit_chunk *chk; 9484 9485 asoc = &stcb->asoc; 9486 SCTP_TCB_LOCK_ASSERT(stcb); 9487 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9488 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 9489 /* found a previous ECN_ECHO update it if needed */ 9490 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 9491 ecne->tsn = htonl(high_tsn); 9492 return; 9493 } 9494 } 9495 /* nope could not find one to update so we must build one */ 9496 sctp_alloc_a_chunk(stcb, chk); 9497 if (chk == NULL) { 9498 return; 9499 } 9500 chk->copy_by_ref = 0; 9501 SCTP_STAT_INCR(sctps_sendecne); 9502 chk->rec.chunk_id.id = SCTP_ECN_ECHO; 9503 chk->rec.chunk_id.can_take_data = 0; 9504 chk->asoc = &stcb->asoc; 9505 chk->send_size = sizeof(struct sctp_ecne_chunk); 9506 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 9507 if (chk->data == NULL) { 9508 sctp_free_a_chunk(stcb, chk); 9509 return; 9510 } 9511 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9512 SCTP_BUF_LEN(chk->data) = chk->send_size; 9513 chk->sent = SCTP_DATAGRAM_UNSENT; 9514 chk->snd_count = 0; 9515 chk->whoTo = net; 9516 atomic_add_int(&chk->whoTo->ref_count, 1); 9517 stcb->asoc.ecn_echo_cnt_onq++; 9518 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 9519 ecne->ch.chunk_type = SCTP_ECN_ECHO; 9520 ecne->ch.chunk_flags = 0; 9521 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 9522 ecne->tsn = htonl(high_tsn); 9523 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9524 asoc->ctrl_queue_cnt++; 9525 } 9526 9527 void 9528 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 9529 struct mbuf *m, int iphlen, int bad_crc) 9530 { 9531 struct sctp_association *asoc; 9532 struct sctp_pktdrop_chunk *drp; 9533 struct sctp_tmit_chunk *chk; 9534 uint8_t *datap; 9535 int len; 9536 unsigned int small_one; 9537 struct ip *iph; 9538 9539 long spc; 9540 9541 asoc = &stcb->asoc; 9542 SCTP_TCB_LOCK_ASSERT(stcb); 9543 if (asoc->peer_supports_pktdrop == 0) { 9544 /* 9545 * peer must declare support before I send one. 9546 */ 9547 return; 9548 } 9549 if (stcb->sctp_socket == NULL) { 9550 return; 9551 } 9552 sctp_alloc_a_chunk(stcb, chk); 9553 if (chk == NULL) { 9554 return; 9555 } 9556 chk->copy_by_ref = 0; 9557 iph = mtod(m, struct ip *); 9558 if (iph == NULL) { 9559 return; 9560 } 9561 if (iph->ip_v == IPVERSION) { 9562 /* IPv4 */ 9563 len = chk->send_size = iph->ip_len; 9564 } else { 9565 struct ip6_hdr *ip6h; 9566 9567 /* IPv6 */ 9568 ip6h = mtod(m, struct ip6_hdr *); 9569 len = chk->send_size = htons(ip6h->ip6_plen); 9570 } 9571 chk->asoc = &stcb->asoc; 9572 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 9573 if (chk->data == NULL) { 9574 jump_out: 9575 sctp_free_a_chunk(stcb, chk); 9576 return; 9577 } 9578 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9579 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 9580 if (drp == NULL) { 9581 sctp_m_freem(chk->data); 9582 chk->data = NULL; 9583 goto jump_out; 9584 } 9585 small_one = asoc->smallest_mtu; 9586 if (small_one > MCLBYTES) { 9587 /* Only one cluster worth of data MAX */ 9588 small_one = MCLBYTES; 9589 } 9590 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 9591 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 9592 chk->book_size_scale = 0; 9593 if (chk->book_size > small_one) { 9594 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 9595 drp->trunc_len = htons(chk->send_size); 9596 chk->send_size = small_one - (SCTP_MED_OVERHEAD + 9597 sizeof(struct sctp_pktdrop_chunk) + 9598 sizeof(struct sctphdr)); 9599 len = chk->send_size; 9600 } else { 9601 /* no truncation needed */ 9602 drp->ch.chunk_flags = 0; 9603 drp->trunc_len = htons(0); 9604 } 9605 if (bad_crc) { 9606 drp->ch.chunk_flags |= SCTP_BADCRC; 9607 } 9608 chk->send_size += sizeof(struct sctp_pktdrop_chunk); 9609 SCTP_BUF_LEN(chk->data) = chk->send_size; 9610 chk->sent = SCTP_DATAGRAM_UNSENT; 9611 chk->snd_count = 0; 9612 if (net) { 9613 /* we should hit here */ 9614 chk->whoTo = net; 9615 } else { 9616 chk->whoTo = asoc->primary_destination; 9617 } 9618 atomic_add_int(&chk->whoTo->ref_count, 1); 9619 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 9620 chk->rec.chunk_id.can_take_data = 1; 9621 drp->ch.chunk_type = SCTP_PACKET_DROPPED; 9622 drp->ch.chunk_length = htons(chk->send_size); 9623 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); 9624 if (spc < 0) { 9625 spc = 0; 9626 } 9627 drp->bottle_bw = htonl(spc); 9628 if (asoc->my_rwnd) { 9629 drp->current_onq = htonl(asoc->size_on_reasm_queue + 9630 asoc->size_on_all_streams + 9631 asoc->my_rwnd_control_len + 9632 stcb->sctp_socket->so_rcv.sb_cc); 9633 } else { 9634 /* 9635 * If my rwnd is 0, possibly from mbuf depletion as well as 9636 * space used, tell the peer there is NO space aka onq == bw 9637 */ 9638 drp->current_onq = htonl(spc); 9639 } 9640 drp->reserved = 0; 9641 datap = drp->data; 9642 m_copydata(m, iphlen, len, (caddr_t)datap); 9643 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9644 asoc->ctrl_queue_cnt++; 9645 } 9646 9647 void 9648 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn) 9649 { 9650 struct sctp_association *asoc; 9651 struct sctp_cwr_chunk *cwr; 9652 struct sctp_tmit_chunk *chk; 9653 9654 asoc = &stcb->asoc; 9655 SCTP_TCB_LOCK_ASSERT(stcb); 9656 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9657 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) { 9658 /* found a previous ECN_CWR update it if needed */ 9659 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 9660 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn), 9661 MAX_TSN)) { 9662 cwr->tsn = htonl(high_tsn); 9663 } 9664 return; 9665 } 9666 } 9667 /* nope could not find one to update so we must build one */ 9668 sctp_alloc_a_chunk(stcb, chk); 9669 if (chk == NULL) { 9670 return; 9671 } 9672 chk->copy_by_ref = 0; 9673 chk->rec.chunk_id.id = SCTP_ECN_CWR; 9674 chk->rec.chunk_id.can_take_data = 1; 9675 chk->asoc = &stcb->asoc; 9676 chk->send_size = sizeof(struct sctp_cwr_chunk); 9677 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 9678 if (chk->data == NULL) { 9679 sctp_free_a_chunk(stcb, chk); 9680 return; 9681 } 9682 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9683 SCTP_BUF_LEN(chk->data) = chk->send_size; 9684 chk->sent = SCTP_DATAGRAM_UNSENT; 9685 chk->snd_count = 0; 9686 chk->whoTo = net; 9687 atomic_add_int(&chk->whoTo->ref_count, 1); 9688 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 9689 cwr->ch.chunk_type = SCTP_ECN_CWR; 9690 cwr->ch.chunk_flags = 0; 9691 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 9692 cwr->tsn = htonl(high_tsn); 9693 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9694 asoc->ctrl_queue_cnt++; 9695 } 9696 9697 void 9698 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, 9699 int number_entries, uint16_t * list, 9700 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 9701 { 9702 int len, old_len, i; 9703 struct sctp_stream_reset_out_request *req_out; 9704 struct sctp_chunkhdr *ch; 9705 9706 ch = mtod(chk->data, struct sctp_chunkhdr *); 9707 9708 9709 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9710 9711 /* get to new offset for the param. */ 9712 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 9713 /* now how long will this param be? */ 9714 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 9715 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 9716 req_out->ph.param_length = htons(len); 9717 req_out->request_seq = htonl(seq); 9718 req_out->response_seq = htonl(resp_seq); 9719 req_out->send_reset_at_tsn = htonl(last_sent); 9720 if (number_entries) { 9721 for (i = 0; i < number_entries; i++) { 9722 req_out->list_of_streams[i] = htons(list[i]); 9723 } 9724 } 9725 if (SCTP_SIZE32(len) > len) { 9726 /* 9727 * Need to worry about the pad we may end up adding to the 9728 * end. This is easy since the struct is either aligned to 4 9729 * bytes or 2 bytes off. 9730 */ 9731 req_out->list_of_streams[number_entries] = 0; 9732 } 9733 /* now fix the chunk length */ 9734 ch->chunk_length = htons(len + old_len); 9735 chk->book_size = len + old_len; 9736 chk->book_size_scale = 0; 9737 chk->send_size = SCTP_SIZE32(chk->book_size); 9738 SCTP_BUF_LEN(chk->data) = chk->send_size; 9739 return; 9740 } 9741 9742 9743 void 9744 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 9745 int number_entries, uint16_t * list, 9746 uint32_t seq) 9747 { 9748 int len, old_len, i; 9749 struct sctp_stream_reset_in_request *req_in; 9750 struct sctp_chunkhdr *ch; 9751 9752 ch = mtod(chk->data, struct sctp_chunkhdr *); 9753 9754 9755 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9756 9757 /* get to new offset for the param. */ 9758 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 9759 /* now how long will this param be? */ 9760 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 9761 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 9762 req_in->ph.param_length = htons(len); 9763 req_in->request_seq = htonl(seq); 9764 if (number_entries) { 9765 for (i = 0; i < number_entries; i++) { 9766 req_in->list_of_streams[i] = htons(list[i]); 9767 } 9768 } 9769 if (SCTP_SIZE32(len) > len) { 9770 /* 9771 * Need to worry about the pad we may end up adding to the 9772 * end. This is easy since the struct is either aligned to 4 9773 * bytes or 2 bytes off. 9774 */ 9775 req_in->list_of_streams[number_entries] = 0; 9776 } 9777 /* now fix the chunk length */ 9778 ch->chunk_length = htons(len + old_len); 9779 chk->book_size = len + old_len; 9780 chk->book_size_scale = 0; 9781 chk->send_size = SCTP_SIZE32(chk->book_size); 9782 SCTP_BUF_LEN(chk->data) = chk->send_size; 9783 return; 9784 } 9785 9786 9787 void 9788 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 9789 uint32_t seq) 9790 { 9791 int len, old_len; 9792 struct sctp_stream_reset_tsn_request *req_tsn; 9793 struct sctp_chunkhdr *ch; 9794 9795 ch = mtod(chk->data, struct sctp_chunkhdr *); 9796 9797 9798 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9799 9800 /* get to new offset for the param. */ 9801 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 9802 /* now how long will this param be? */ 9803 len = sizeof(struct sctp_stream_reset_tsn_request); 9804 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 9805 req_tsn->ph.param_length = htons(len); 9806 req_tsn->request_seq = htonl(seq); 9807 9808 /* now fix the chunk length */ 9809 ch->chunk_length = htons(len + old_len); 9810 chk->send_size = len + old_len; 9811 chk->book_size = SCTP_SIZE32(chk->send_size); 9812 chk->book_size_scale = 0; 9813 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 9814 return; 9815 } 9816 9817 void 9818 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 9819 uint32_t resp_seq, uint32_t result) 9820 { 9821 int len, old_len; 9822 struct sctp_stream_reset_response *resp; 9823 struct sctp_chunkhdr *ch; 9824 9825 ch = mtod(chk->data, struct sctp_chunkhdr *); 9826 9827 9828 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9829 9830 /* get to new offset for the param. */ 9831 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 9832 /* now how long will this param be? */ 9833 len = sizeof(struct sctp_stream_reset_response); 9834 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 9835 resp->ph.param_length = htons(len); 9836 resp->response_seq = htonl(resp_seq); 9837 resp->result = ntohl(result); 9838 9839 /* now fix the chunk length */ 9840 ch->chunk_length = htons(len + old_len); 9841 chk->book_size = len + old_len; 9842 chk->book_size_scale = 0; 9843 chk->send_size = SCTP_SIZE32(chk->book_size); 9844 SCTP_BUF_LEN(chk->data) = chk->send_size; 9845 return; 9846 9847 } 9848 9849 9850 void 9851 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 9852 uint32_t resp_seq, uint32_t result, 9853 uint32_t send_una, uint32_t recv_next) 9854 { 9855 int len, old_len; 9856 struct sctp_stream_reset_response_tsn *resp; 9857 struct sctp_chunkhdr *ch; 9858 9859 ch = mtod(chk->data, struct sctp_chunkhdr *); 9860 9861 9862 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9863 9864 /* get to new offset for the param. */ 9865 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 9866 /* now how long will this param be? */ 9867 len = sizeof(struct sctp_stream_reset_response_tsn); 9868 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 9869 resp->ph.param_length = htons(len); 9870 resp->response_seq = htonl(resp_seq); 9871 resp->result = htonl(result); 9872 resp->senders_next_tsn = htonl(send_una); 9873 resp->receivers_next_tsn = htonl(recv_next); 9874 9875 /* now fix the chunk length */ 9876 ch->chunk_length = htons(len + old_len); 9877 chk->book_size = len + old_len; 9878 chk->send_size = SCTP_SIZE32(chk->book_size); 9879 chk->book_size_scale = 0; 9880 SCTP_BUF_LEN(chk->data) = chk->send_size; 9881 return; 9882 } 9883 9884 9885 int 9886 sctp_send_str_reset_req(struct sctp_tcb *stcb, 9887 int number_entries, uint16_t * list, 9888 uint8_t send_out_req, uint32_t resp_seq, 9889 uint8_t send_in_req, 9890 uint8_t send_tsn_req) 9891 { 9892 9893 struct sctp_association *asoc; 9894 struct sctp_tmit_chunk *chk; 9895 struct sctp_chunkhdr *ch; 9896 uint32_t seq; 9897 9898 asoc = &stcb->asoc; 9899 if (asoc->stream_reset_outstanding) { 9900 /* 9901 * Already one pending, must get ACK back to clear the flag. 9902 */ 9903 return (EBUSY); 9904 } 9905 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) { 9906 /* nothing to do */ 9907 return (EINVAL); 9908 } 9909 if (send_tsn_req && (send_out_req || send_in_req)) { 9910 /* error, can't do that */ 9911 return (EINVAL); 9912 } 9913 sctp_alloc_a_chunk(stcb, chk); 9914 if (chk == NULL) { 9915 return (ENOMEM); 9916 } 9917 chk->copy_by_ref = 0; 9918 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 9919 chk->rec.chunk_id.can_take_data = 0; 9920 chk->asoc = &stcb->asoc; 9921 chk->book_size = sizeof(struct sctp_chunkhdr); 9922 chk->send_size = SCTP_SIZE32(chk->book_size); 9923 chk->book_size_scale = 0; 9924 9925 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 9926 if (chk->data == NULL) { 9927 sctp_free_a_chunk(stcb, chk); 9928 return (ENOMEM); 9929 } 9930 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9931 9932 /* setup chunk parameters */ 9933 chk->sent = SCTP_DATAGRAM_UNSENT; 9934 chk->snd_count = 0; 9935 chk->whoTo = asoc->primary_destination; 9936 atomic_add_int(&chk->whoTo->ref_count, 1); 9937 9938 ch = mtod(chk->data, struct sctp_chunkhdr *); 9939 ch->chunk_type = SCTP_STREAM_RESET; 9940 ch->chunk_flags = 0; 9941 ch->chunk_length = htons(chk->book_size); 9942 SCTP_BUF_LEN(chk->data) = chk->send_size; 9943 9944 seq = stcb->asoc.str_reset_seq_out; 9945 if (send_out_req) { 9946 sctp_add_stream_reset_out(chk, number_entries, list, 9947 seq, resp_seq, (stcb->asoc.sending_seq - 1)); 9948 asoc->stream_reset_out_is_outstanding = 1; 9949 seq++; 9950 asoc->stream_reset_outstanding++; 9951 } 9952 if (send_in_req) { 9953 sctp_add_stream_reset_in(chk, number_entries, list, seq); 9954 asoc->stream_reset_outstanding++; 9955 } 9956 if (send_tsn_req) { 9957 sctp_add_stream_reset_tsn(chk, seq); 9958 asoc->stream_reset_outstanding++; 9959 } 9960 asoc->str_reset = chk; 9961 9962 /* insert the chunk for sending */ 9963 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 9964 chk, 9965 sctp_next); 9966 asoc->ctrl_queue_cnt++; 9967 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 9968 return (0); 9969 } 9970 9971 void 9972 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag, 9973 struct mbuf *err_cause) 9974 { 9975 /* 9976 * Formulate the abort message, and send it back down. 9977 */ 9978 struct mbuf *o_pak; 9979 struct mbuf *mout; 9980 struct sctp_abort_msg *abm; 9981 struct ip *iph, *iph_out; 9982 struct ip6_hdr *ip6, *ip6_out; 9983 int iphlen_out; 9984 9985 /* don't respond to ABORT with ABORT */ 9986 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 9987 if (err_cause) 9988 sctp_m_freem(err_cause); 9989 return; 9990 } 9991 o_pak = SCTP_GET_HEADER_FOR_OUTPUT((sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg))); 9992 if (o_pak == NULL) { 9993 if (err_cause) 9994 sctp_m_freem(err_cause); 9995 return; 9996 } 9997 mout = SCTP_HEADER_TO_CHAIN(o_pak); 9998 iph = mtod(m, struct ip *); 9999 iph_out = NULL; 10000 ip6_out = NULL; 10001 if (iph->ip_v == IPVERSION) { 10002 iph_out = mtod(mout, struct ip *); 10003 SCTP_BUF_LEN(mout) = sizeof(*iph_out) + sizeof(*abm); 10004 SCTP_BUF_NEXT(mout) = err_cause; 10005 10006 /* Fill in the IP header for the ABORT */ 10007 iph_out->ip_v = IPVERSION; 10008 iph_out->ip_hl = (sizeof(struct ip) / 4); 10009 iph_out->ip_tos = (u_char)0; 10010 iph_out->ip_id = 0; 10011 iph_out->ip_off = 0; 10012 iph_out->ip_ttl = MAXTTL; 10013 iph_out->ip_p = IPPROTO_SCTP; 10014 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 10015 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 10016 /* let IP layer calculate this */ 10017 iph_out->ip_sum = 0; 10018 10019 iphlen_out = sizeof(*iph_out); 10020 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out); 10021 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 10022 ip6 = (struct ip6_hdr *)iph; 10023 ip6_out = mtod(mout, struct ip6_hdr *); 10024 SCTP_BUF_LEN(mout) = sizeof(*ip6_out) + sizeof(*abm); 10025 SCTP_BUF_NEXT(mout) = err_cause; 10026 10027 /* Fill in the IP6 header for the ABORT */ 10028 ip6_out->ip6_flow = ip6->ip6_flow; 10029 ip6_out->ip6_hlim = ip6_defhlim; 10030 ip6_out->ip6_nxt = IPPROTO_SCTP; 10031 ip6_out->ip6_src = ip6->ip6_dst; 10032 ip6_out->ip6_dst = ip6->ip6_src; 10033 10034 iphlen_out = sizeof(*ip6_out); 10035 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out); 10036 } else { 10037 /* Currently not supported */ 10038 return; 10039 } 10040 10041 abm->sh.src_port = sh->dest_port; 10042 abm->sh.dest_port = sh->src_port; 10043 abm->sh.checksum = 0; 10044 if (vtag == 0) { 10045 abm->sh.v_tag = sh->v_tag; 10046 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB; 10047 } else { 10048 abm->sh.v_tag = htonl(vtag); 10049 abm->msg.ch.chunk_flags = 0; 10050 } 10051 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION; 10052 10053 if (err_cause) { 10054 struct mbuf *m_tmp = err_cause; 10055 int err_len = 0; 10056 10057 /* get length of the err_cause chain */ 10058 while (m_tmp != NULL) { 10059 err_len += SCTP_BUF_LEN(m_tmp); 10060 m_tmp = SCTP_BUF_NEXT(m_tmp); 10061 } 10062 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout) + err_len; 10063 if (err_len % 4) { 10064 /* need pad at end of chunk */ 10065 uint32_t cpthis = 0; 10066 int padlen; 10067 10068 padlen = 4 - (SCTP_HEADER_LEN(o_pak) % 4); 10069 m_copyback(mout, SCTP_HEADER_LEN(o_pak), padlen, (caddr_t)&cpthis); 10070 SCTP_HEADER_LEN(o_pak) += padlen; 10071 } 10072 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len); 10073 } else { 10074 SCTP_HEADER_LEN(mout) = SCTP_BUF_LEN(mout); 10075 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch)); 10076 } 10077 10078 /* add checksum */ 10079 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) { 10080 abm->sh.checksum = 0; 10081 } else { 10082 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out); 10083 } 10084 if (iph_out != NULL) { 10085 struct route ro; 10086 10087 /* zap the stack pointer to the route */ 10088 bzero(&ro, sizeof ro); 10089 #ifdef SCTP_DEBUG 10090 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 10091 printf("sctp_send_abort calling ip_output:\n"); 10092 sctp_print_address_pkt(iph_out, &abm->sh); 10093 } 10094 #endif 10095 /* set IPv4 length */ 10096 iph_out->ip_len = SCTP_HEADER_LEN(o_pak); 10097 /* out it goes */ 10098 (void)ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 10099 ,NULL 10100 ); 10101 /* Free the route if we got one back */ 10102 if (ro.ro_rt) 10103 RTFREE(ro.ro_rt); 10104 } else if (ip6_out != NULL) { 10105 struct route_in6 ro; 10106 10107 /* zap the stack pointer to the route */ 10108 bzero(&ro, sizeof(ro)); 10109 #ifdef SCTP_DEBUG 10110 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 10111 printf("sctp_send_abort calling ip6_output:\n"); 10112 sctp_print_address_pkt((struct ip *)ip6_out, &abm->sh); 10113 } 10114 #endif 10115 ip6_out->ip6_plen = SCTP_HEADER_LEN(o_pak) - sizeof(*ip6_out); 10116 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 10117 ,NULL 10118 ); 10119 /* Free the route if we got one back */ 10120 if (ro.ro_rt) 10121 RTFREE(ro.ro_rt); 10122 } 10123 SCTP_STAT_INCR(sctps_sendpackets); 10124 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 10125 } 10126 10127 void 10128 sctp_send_operr_to(struct mbuf *m, int iphlen, 10129 struct mbuf *scm, 10130 uint32_t vtag) 10131 { 10132 struct mbuf *o_pak; 10133 struct sctphdr *ihdr; 10134 int retcode; 10135 struct sctphdr *ohdr; 10136 struct sctp_chunkhdr *ophdr; 10137 10138 struct ip *iph; 10139 10140 #ifdef SCTP_DEBUG 10141 struct sockaddr_in6 lsa6, fsa6; 10142 10143 #endif 10144 uint32_t val; 10145 struct mbuf *at; 10146 int len; 10147 10148 iph = mtod(m, struct ip *); 10149 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen); 10150 10151 SCTP_BUF_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT); 10152 if (scm == NULL) { 10153 /* can't send because we can't add a mbuf */ 10154 return; 10155 } 10156 ohdr = mtod(scm, struct sctphdr *); 10157 ohdr->src_port = ihdr->dest_port; 10158 ohdr->dest_port = ihdr->src_port; 10159 ohdr->v_tag = vtag; 10160 ohdr->checksum = 0; 10161 ophdr = (struct sctp_chunkhdr *)(ohdr + 1); 10162 ophdr->chunk_type = SCTP_OPERATION_ERROR; 10163 ophdr->chunk_flags = 0; 10164 len = 0; 10165 at = scm; 10166 while (at) { 10167 len += SCTP_BUF_LEN(at); 10168 at = SCTP_BUF_NEXT(at); 10169 } 10170 10171 ophdr->chunk_length = htons(len - sizeof(struct sctphdr)); 10172 if (len % 4) { 10173 /* need padding */ 10174 uint32_t cpthis = 0; 10175 int padlen; 10176 10177 padlen = 4 - (len % 4); 10178 m_copyback(scm, len, padlen, (caddr_t)&cpthis); 10179 len += padlen; 10180 } 10181 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) { 10182 val = 0; 10183 } else { 10184 val = sctp_calculate_sum(scm, NULL, 0); 10185 } 10186 ohdr->checksum = val; 10187 if (iph->ip_v == IPVERSION) { 10188 /* V4 */ 10189 struct ip *out; 10190 struct route ro; 10191 10192 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip)); 10193 if (o_pak == NULL) { 10194 sctp_m_freem(scm); 10195 return; 10196 } 10197 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip); 10198 len += sizeof(struct ip); 10199 SCTP_ATTACH_CHAIN(o_pak, scm, len); 10200 bzero(&ro, sizeof ro); 10201 out = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *); 10202 out->ip_v = iph->ip_v; 10203 out->ip_hl = (sizeof(struct ip) / 4); 10204 out->ip_tos = iph->ip_tos; 10205 out->ip_id = iph->ip_id; 10206 out->ip_off = 0; 10207 out->ip_ttl = MAXTTL; 10208 out->ip_p = IPPROTO_SCTP; 10209 out->ip_sum = 0; 10210 out->ip_src = iph->ip_dst; 10211 out->ip_dst = iph->ip_src; 10212 out->ip_len = SCTP_HEADER_LEN(o_pak); 10213 retcode = ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 10214 ,NULL 10215 ); 10216 SCTP_STAT_INCR(sctps_sendpackets); 10217 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 10218 /* Free the route if we got one back */ 10219 if (ro.ro_rt) 10220 RTFREE(ro.ro_rt); 10221 } else { 10222 /* V6 */ 10223 struct route_in6 ro; 10224 struct ip6_hdr *out6, *in6; 10225 10226 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr)); 10227 if (o_pak == NULL) { 10228 sctp_m_freem(scm); 10229 return; 10230 } 10231 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr); 10232 len += sizeof(struct ip6_hdr); 10233 SCTP_ATTACH_CHAIN(o_pak, scm, len); 10234 10235 bzero(&ro, sizeof ro); 10236 in6 = mtod(m, struct ip6_hdr *); 10237 out6 = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *); 10238 out6->ip6_flow = in6->ip6_flow; 10239 out6->ip6_hlim = ip6_defhlim; 10240 out6->ip6_nxt = IPPROTO_SCTP; 10241 out6->ip6_src = in6->ip6_dst; 10242 out6->ip6_dst = in6->ip6_src; 10243 out6->ip6_plen = len - sizeof(struct ip6_hdr); 10244 #ifdef SCTP_DEBUG 10245 bzero(&lsa6, sizeof(lsa6)); 10246 lsa6.sin6_len = sizeof(lsa6); 10247 lsa6.sin6_family = AF_INET6; 10248 lsa6.sin6_addr = out6->ip6_src; 10249 bzero(&fsa6, sizeof(fsa6)); 10250 fsa6.sin6_len = sizeof(fsa6); 10251 fsa6.sin6_family = AF_INET6; 10252 fsa6.sin6_addr = out6->ip6_dst; 10253 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 10254 printf("sctp_operr_to calling ipv6 output:\n"); 10255 printf("src: "); 10256 sctp_print_address((struct sockaddr *)&lsa6); 10257 printf("dst "); 10258 sctp_print_address((struct sockaddr *)&fsa6); 10259 } 10260 #endif /* SCTP_DEBUG */ 10261 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 10262 ,NULL 10263 ); 10264 SCTP_STAT_INCR(sctps_sendpackets); 10265 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 10266 /* Free the route if we got one back */ 10267 if (ro.ro_rt) 10268 RTFREE(ro.ro_rt); 10269 } 10270 } 10271 10272 10273 10274 static struct mbuf * 10275 sctp_copy_resume(struct sctp_stream_queue_pending *sp, 10276 struct uio *uio, 10277 struct sctp_sndrcvinfo *srcv, 10278 int max_send_len, 10279 int user_marks_eor, 10280 int *error, 10281 uint32_t * sndout, 10282 struct mbuf **new_tail) 10283 { 10284 int left, cancpy, willcpy; 10285 struct mbuf *m, *prev, *head; 10286 10287 left = min(uio->uio_resid, max_send_len); 10288 /* Always get a header just in case */ 10289 head = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 10290 cancpy = M_TRAILINGSPACE(head); 10291 willcpy = min(cancpy, left); 10292 *error = uiomove(mtod(head, caddr_t), willcpy, uio); 10293 if (*error) { 10294 sctp_m_freem(head); 10295 return (NULL); 10296 } 10297 *sndout += willcpy; 10298 left -= willcpy; 10299 SCTP_BUF_LEN(head) = willcpy; 10300 m = head; 10301 *new_tail = head; 10302 while (left > 0) { 10303 /* move in user data */ 10304 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 10305 if (SCTP_BUF_NEXT(m) == NULL) { 10306 sctp_m_freem(head); 10307 *new_tail = NULL; 10308 *error = ENOMEM; 10309 return (NULL); 10310 } 10311 prev = m; 10312 m = SCTP_BUF_NEXT(m); 10313 cancpy = M_TRAILINGSPACE(m); 10314 willcpy = min(cancpy, left); 10315 *error = uiomove(mtod(m, caddr_t), willcpy, uio); 10316 if (*error) { 10317 sctp_m_freem(head); 10318 *new_tail = NULL; 10319 *error = EFAULT; 10320 return (NULL); 10321 } 10322 SCTP_BUF_LEN(m) = willcpy; 10323 left -= willcpy; 10324 *sndout += willcpy; 10325 *new_tail = m; 10326 if (left == 0) { 10327 SCTP_BUF_NEXT(m) = NULL; 10328 } 10329 } 10330 return (head); 10331 } 10332 10333 static int 10334 sctp_copy_one(struct sctp_stream_queue_pending *sp, 10335 struct uio *uio, 10336 int resv_upfront) 10337 { 10338 int left, cancpy, willcpy, error; 10339 struct mbuf *m, *head; 10340 int cpsz = 0; 10341 10342 /* First one gets a header */ 10343 left = sp->length; 10344 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAIT, 0, MT_DATA); 10345 if (m == NULL) { 10346 return (ENOMEM); 10347 } 10348 /* 10349 * Add this one for m in now, that way if the alloc fails we won't 10350 * have a bad cnt. 10351 */ 10352 SCTP_BUF_RESV_UF(m, resv_upfront); 10353 cancpy = M_TRAILINGSPACE(m); 10354 willcpy = min(cancpy, left); 10355 while (left > 0) { 10356 /* move in user data */ 10357 error = uiomove(mtod(m, caddr_t), willcpy, uio); 10358 if (error) { 10359 sctp_m_freem(head); 10360 return (error); 10361 } 10362 SCTP_BUF_LEN(m) = willcpy; 10363 left -= willcpy; 10364 cpsz += willcpy; 10365 if (left > 0) { 10366 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 10367 if (SCTP_BUF_NEXT(m) == NULL) { 10368 /* 10369 * the head goes back to caller, he can free 10370 * the rest 10371 */ 10372 sctp_m_freem(head); 10373 return (ENOMEM); 10374 } 10375 m = SCTP_BUF_NEXT(m); 10376 cancpy = M_TRAILINGSPACE(m); 10377 willcpy = min(cancpy, left); 10378 } else { 10379 sp->tail_mbuf = m; 10380 SCTP_BUF_NEXT(m) = NULL; 10381 } 10382 } 10383 sp->data = head; 10384 sp->length = cpsz; 10385 return (0); 10386 } 10387 10388 10389 10390 static struct sctp_stream_queue_pending * 10391 sctp_copy_it_in(struct sctp_tcb *stcb, 10392 struct sctp_association *asoc, 10393 struct sctp_sndrcvinfo *srcv, 10394 struct uio *uio, 10395 struct sctp_nets *net, 10396 int max_send_len, 10397 int user_marks_eor, 10398 int *error, 10399 int non_blocking) 10400 { 10401 /* 10402 * This routine must be very careful in its work. Protocol 10403 * processing is up and running so care must be taken to spl...() 10404 * when you need to do something that may effect the stcb/asoc. The 10405 * sb is locked however. When data is copied the protocol processing 10406 * should be enabled since this is a slower operation... 10407 */ 10408 struct sctp_stream_queue_pending *sp = NULL; 10409 int resv_in_first; 10410 10411 *error = 0; 10412 /* Unless E_EOR mode is on, we must make a send FIT in one call. */ 10413 if (((user_marks_eor == 0) && non_blocking) && 10414 (uio->uio_resid > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { 10415 /* It will NEVER fit */ 10416 *error = EMSGSIZE; 10417 goto out_now; 10418 } 10419 /* Now can we send this? */ 10420 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 10421 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 10422 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 10423 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 10424 /* got data while shutting down */ 10425 *error = ECONNRESET; 10426 goto out_now; 10427 } 10428 sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending); 10429 if (sp == NULL) { 10430 *error = ENOMEM; 10431 goto out_now; 10432 } 10433 SCTP_INCR_STRMOQ_COUNT(); 10434 sp->act_flags = 0; 10435 sp->sinfo_flags = srcv->sinfo_flags; 10436 sp->timetolive = srcv->sinfo_timetolive; 10437 sp->ppid = srcv->sinfo_ppid; 10438 sp->context = srcv->sinfo_context; 10439 sp->strseq = 0; 10440 SCTP_GETTIME_TIMEVAL(&sp->ts); 10441 10442 sp->stream = srcv->sinfo_stream; 10443 sp->length = min(uio->uio_resid, max_send_len); 10444 if ((sp->length == uio->uio_resid) && 10445 ((user_marks_eor == 0) || 10446 (srcv->sinfo_flags & SCTP_EOF) || 10447 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 10448 ) { 10449 sp->msg_is_complete = 1; 10450 } else { 10451 sp->msg_is_complete = 0; 10452 } 10453 sp->some_taken = 0; 10454 resv_in_first = sizeof(struct sctp_data_chunk); 10455 sp->data = sp->tail_mbuf = NULL; 10456 *error = sctp_copy_one(sp, uio, resv_in_first); 10457 if (*error) { 10458 sctp_free_a_strmoq(stcb, sp); 10459 sp->data = NULL; 10460 sp->net = NULL; 10461 sp = NULL; 10462 } else { 10463 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 10464 sp->net = net; 10465 sp->addr_over = 1; 10466 } else { 10467 sp->net = asoc->primary_destination; 10468 sp->addr_over = 0; 10469 } 10470 atomic_add_int(&sp->net->ref_count, 1); 10471 sctp_set_prsctp_policy(stcb, sp); 10472 } 10473 out_now: 10474 return (sp); 10475 } 10476 10477 10478 int 10479 sctp_sosend(struct socket *so, 10480 struct sockaddr *addr, 10481 struct uio *uio, 10482 struct mbuf *top, 10483 struct mbuf *control, 10484 int flags 10485 , 10486 struct thread *p 10487 ) 10488 { 10489 struct sctp_inpcb *inp; 10490 int error, use_rcvinfo = 0; 10491 struct sctp_sndrcvinfo srcv; 10492 10493 inp = (struct sctp_inpcb *)so->so_pcb; 10494 if (control) { 10495 /* process cmsg snd/rcv info (maybe a assoc-id) */ 10496 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control, 10497 sizeof(srcv))) { 10498 /* got one */ 10499 use_rcvinfo = 1; 10500 } 10501 } 10502 error = sctp_lower_sosend(so, addr, uio, top, control, flags, 10503 use_rcvinfo, &srcv, p); 10504 return (error); 10505 } 10506 10507 10508 int 10509 sctp_lower_sosend(struct socket *so, 10510 struct sockaddr *addr, 10511 struct uio *uio, 10512 struct mbuf *i_pak, 10513 struct mbuf *control, 10514 int flags, 10515 int use_rcvinfo, 10516 struct sctp_sndrcvinfo *srcv, 10517 struct thread *p 10518 ) 10519 { 10520 unsigned int sndlen, max_len; 10521 int error, len; 10522 struct mbuf *top = NULL; 10523 10524 #if defined(__NetBSD__) || defined(__OpenBSD_) 10525 int s; 10526 10527 #endif 10528 int queue_only = 0, queue_only_for_init = 0; 10529 int free_cnt_applied = 0; 10530 int un_sent = 0; 10531 int now_filled = 0; 10532 struct sctp_block_entry be; 10533 struct sctp_inpcb *inp; 10534 struct sctp_tcb *stcb = NULL; 10535 struct timeval now; 10536 struct sctp_nets *net; 10537 struct sctp_association *asoc; 10538 struct sctp_inpcb *t_inp; 10539 int create_lock_applied = 0; 10540 int nagle_applies = 0; 10541 int some_on_control = 0; 10542 int got_all_of_the_send = 0; 10543 int hold_tcblock = 0; 10544 int non_blocking = 0; 10545 10546 error = 0; 10547 net = NULL; 10548 stcb = NULL; 10549 asoc = NULL; 10550 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 10551 if (inp == NULL) { 10552 error = EFAULT; 10553 goto out_unlocked; 10554 } 10555 atomic_add_int(&inp->total_sends, 1); 10556 if (uio) 10557 sndlen = uio->uio_resid; 10558 else { 10559 sndlen = SCTP_HEADER_LEN(i_pak); 10560 top = SCTP_HEADER_TO_CHAIN(i_pak); 10561 } 10562 10563 hold_tcblock = 0; 10564 10565 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 10566 (inp->sctp_socket->so_qlimit)) { 10567 /* The listener can NOT send */ 10568 error = EFAULT; 10569 goto out_unlocked; 10570 } 10571 if ((use_rcvinfo) && srcv) { 10572 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) || PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) { 10573 error = EINVAL; 10574 goto out_unlocked; 10575 } 10576 if (srcv->sinfo_flags) 10577 SCTP_STAT_INCR(sctps_sends_with_flags); 10578 10579 if (srcv->sinfo_flags & SCTP_SENDALL) { 10580 /* its a sendall */ 10581 error = sctp_sendall(inp, uio, top, srcv); 10582 top = NULL; 10583 goto out_unlocked; 10584 } 10585 } 10586 /* now we must find the assoc */ 10587 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 10588 SCTP_INP_RLOCK(inp); 10589 stcb = LIST_FIRST(&inp->sctp_asoc_list); 10590 if (stcb == NULL) { 10591 SCTP_INP_RUNLOCK(inp); 10592 error = ENOTCONN; 10593 goto out_unlocked; 10594 } 10595 hold_tcblock = 0; 10596 SCTP_INP_RUNLOCK(inp); 10597 if (addr) 10598 /* Must locate the net structure if addr given */ 10599 net = sctp_findnet(stcb, addr); 10600 else 10601 net = stcb->asoc.primary_destination; 10602 10603 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) { 10604 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0); 10605 if (stcb) { 10606 if (addr) 10607 /* 10608 * Must locate the net structure if addr 10609 * given 10610 */ 10611 net = sctp_findnet(stcb, addr); 10612 else 10613 net = stcb->asoc.primary_destination; 10614 } 10615 hold_tcblock = 0; 10616 } else if (addr) { 10617 /* 10618 * Since we did not use findep we must increment it, and if 10619 * we don't find a tcb decrement it. 10620 */ 10621 SCTP_INP_WLOCK(inp); 10622 SCTP_INP_INCR_REF(inp); 10623 SCTP_INP_WUNLOCK(inp); 10624 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 10625 if (stcb == NULL) { 10626 SCTP_INP_WLOCK(inp); 10627 SCTP_INP_DECR_REF(inp); 10628 SCTP_INP_WUNLOCK(inp); 10629 } else { 10630 hold_tcblock = 1; 10631 } 10632 } 10633 if ((stcb == NULL) && (addr)) { 10634 /* Possible implicit send? */ 10635 SCTP_ASOC_CREATE_LOCK(inp); 10636 create_lock_applied = 1; 10637 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 10638 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 10639 /* Should I really unlock ? */ 10640 error = EFAULT; 10641 goto out_unlocked; 10642 10643 } 10644 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 10645 (addr->sa_family == AF_INET6)) { 10646 error = EINVAL; 10647 goto out_unlocked; 10648 } 10649 SCTP_INP_WLOCK(inp); 10650 SCTP_INP_INCR_REF(inp); 10651 SCTP_INP_WUNLOCK(inp); 10652 /* With the lock applied look again */ 10653 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 10654 if (stcb == NULL) { 10655 SCTP_INP_WLOCK(inp); 10656 SCTP_INP_DECR_REF(inp); 10657 SCTP_INP_WUNLOCK(inp); 10658 } else { 10659 hold_tcblock = 1; 10660 } 10661 } 10662 if (stcb == NULL) { 10663 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 10664 error = ENOTCONN; 10665 goto out_unlocked; 10666 } else if (addr == NULL) { 10667 error = ENOENT; 10668 goto out_unlocked; 10669 } else { 10670 /* 10671 * UDP style, we must go ahead and start the INIT 10672 * process 10673 */ 10674 uint32_t vrf; 10675 10676 if ((use_rcvinfo) && (srcv) && 10677 ((srcv->sinfo_flags & SCTP_ABORT) || 10678 ((srcv->sinfo_flags & SCTP_EOF) && 10679 (uio->uio_resid == 0)))) { 10680 /* 10681 * User asks to abort a non-existant assoc, 10682 * or EOF a non-existant assoc with no data 10683 */ 10684 error = ENOENT; 10685 goto out_unlocked; 10686 } 10687 /* get an asoc/stcb struct */ 10688 vrf = SCTP_DEFAULT_VRFID; 10689 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf); 10690 if (stcb == NULL) { 10691 /* Error is setup for us in the call */ 10692 goto out_unlocked; 10693 } 10694 if (create_lock_applied) { 10695 SCTP_ASOC_CREATE_UNLOCK(inp); 10696 create_lock_applied = 0; 10697 } else { 10698 printf("Huh-3? create lock should have been on??\n"); 10699 } 10700 /* 10701 * Turn on queue only flag to prevent data from 10702 * being sent 10703 */ 10704 queue_only = 1; 10705 asoc = &stcb->asoc; 10706 asoc->state = SCTP_STATE_COOKIE_WAIT; 10707 SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 10708 10709 /* initialize authentication params for the assoc */ 10710 sctp_initialize_auth_params(inp, stcb); 10711 10712 if (control) { 10713 /* 10714 * see if a init structure exists in cmsg 10715 * headers 10716 */ 10717 struct sctp_initmsg initm; 10718 int i; 10719 10720 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control, 10721 sizeof(initm))) { 10722 /* 10723 * we have an INIT override of the 10724 * default 10725 */ 10726 if (initm.sinit_max_attempts) 10727 asoc->max_init_times = initm.sinit_max_attempts; 10728 if (initm.sinit_num_ostreams) 10729 asoc->pre_open_streams = initm.sinit_num_ostreams; 10730 if (initm.sinit_max_instreams) 10731 asoc->max_inbound_streams = initm.sinit_max_instreams; 10732 if (initm.sinit_max_init_timeo) 10733 asoc->initial_init_rto_max = initm.sinit_max_init_timeo; 10734 if (asoc->streamoutcnt < asoc->pre_open_streams) { 10735 /* Default is NOT correct */ 10736 #ifdef SCTP_DEBUG 10737 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 10738 printf("Ok, defout:%d pre_open:%d\n", 10739 asoc->streamoutcnt, asoc->pre_open_streams); 10740 } 10741 #endif 10742 SCTP_FREE(asoc->strmout); 10743 asoc->strmout = NULL; 10744 asoc->streamoutcnt = asoc->pre_open_streams; 10745 /* 10746 * What happens if this 10747 * fails? .. we panic ... 10748 */ 10749 { 10750 struct sctp_stream_out *tmp_str; 10751 int had_lock = 0; 10752 10753 if (hold_tcblock) { 10754 had_lock = 1; 10755 SCTP_TCB_UNLOCK(stcb); 10756 } 10757 SCTP_MALLOC(tmp_str, 10758 struct sctp_stream_out *, 10759 asoc->streamoutcnt * 10760 sizeof(struct sctp_stream_out), 10761 "StreamsOut"); 10762 if (had_lock) { 10763 SCTP_TCB_LOCK(stcb); 10764 } 10765 if (asoc->strmout == NULL) { 10766 asoc->strmout = tmp_str; 10767 } else { 10768 SCTP_FREE(asoc->strmout); 10769 asoc->strmout = tmp_str; 10770 } 10771 } 10772 for (i = 0; i < asoc->streamoutcnt; i++) { 10773 /* 10774 * inbound side must 10775 * be set to 0xffff, 10776 * also NOTE when we 10777 * get the INIT-ACK 10778 * back (for INIT 10779 * sender) we MUST 10780 * reduce the count 10781 * (streamoutcnt) 10782 * but first check 10783 * if we sent to any 10784 * of the upper 10785 * streams that were 10786 * dropped (if some 10787 * were). Those that 10788 * were dropped must 10789 * be notified to 10790 * the upper layer 10791 * as failed to 10792 * send. 10793 */ 10794 asoc->strmout[i].next_sequence_sent = 0x0; 10795 TAILQ_INIT(&asoc->strmout[i].outqueue); 10796 asoc->strmout[i].stream_no = i; 10797 asoc->strmout[i].last_msg_incomplete = 0; 10798 asoc->strmout[i].next_spoke.tqe_next = 0; 10799 asoc->strmout[i].next_spoke.tqe_prev = 0; 10800 } 10801 } 10802 } 10803 } 10804 hold_tcblock = 1; 10805 /* out with the INIT */ 10806 queue_only_for_init = 1; 10807 /* 10808 * we may want to dig in after this call and adjust 10809 * the MTU value. It defaulted to 1500 (constant) 10810 * but the ro structure may now have an update and 10811 * thus we may need to change it BEFORE we append 10812 * the message. 10813 */ 10814 net = stcb->asoc.primary_destination; 10815 asoc = &stcb->asoc; 10816 } 10817 } 10818 if ((SCTP_SO_IS_NBIO(so) 10819 || (flags & MSG_NBIO) 10820 )) { 10821 non_blocking = 1; 10822 } 10823 asoc = &stcb->asoc; 10824 /* would we block? */ 10825 if (non_blocking) { 10826 if ((SCTP_SB_LIMIT_SND(so) < 10827 (sndlen + stcb->asoc.total_output_queue_size)) || 10828 (stcb->asoc.chunks_on_out_queue > 10829 sctp_max_chunks_on_queue)) { 10830 error = EWOULDBLOCK; 10831 atomic_add_int(&stcb->sctp_ep->total_nospaces, 1); 10832 goto out_unlocked; 10833 } 10834 } 10835 /* Keep the stcb from being freed under our feet */ 10836 atomic_add_int(&stcb->asoc.refcnt, 1); 10837 free_cnt_applied = 1; 10838 10839 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 10840 error = ECONNRESET; 10841 goto out_unlocked; 10842 } 10843 if (create_lock_applied) { 10844 SCTP_ASOC_CREATE_UNLOCK(inp); 10845 create_lock_applied = 0; 10846 } 10847 if (asoc->stream_reset_outstanding) { 10848 /* 10849 * Can't queue any data while stream reset is underway. 10850 */ 10851 error = EAGAIN; 10852 goto out_unlocked; 10853 } 10854 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 10855 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 10856 queue_only = 1; 10857 } 10858 if ((use_rcvinfo == 0) || (srcv == NULL)) { 10859 /* Grab the default stuff from the asoc */ 10860 srcv = &stcb->asoc.def_send; 10861 } 10862 /* we are now done with all control */ 10863 if (control) { 10864 sctp_m_freem(control); 10865 control = NULL; 10866 } 10867 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 10868 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 10869 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 10870 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 10871 if ((use_rcvinfo) && 10872 (srcv->sinfo_flags & SCTP_ABORT)) { 10873 ; 10874 } else { 10875 error = ECONNRESET; 10876 goto out_unlocked; 10877 } 10878 } 10879 /* Ok, we will attempt a msgsnd :> */ 10880 if (p) { 10881 p->td_proc->p_stats->p_ru.ru_msgsnd++; 10882 } 10883 if (stcb) { 10884 if (net && ((srcv->sinfo_flags & SCTP_ADDR_OVER))) { 10885 /* we take the override or the unconfirmed */ 10886 ; 10887 } else { 10888 net = stcb->asoc.primary_destination; 10889 } 10890 } 10891 if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) { 10892 /* 10893 * CMT: Added check for CMT above. net above is the primary 10894 * dest. If CMT is ON, sender should always attempt to send 10895 * with the output routine sctp_fill_outqueue() that loops 10896 * through all destination addresses. Therefore, if CMT is 10897 * ON, queue_only is NOT set to 1 here, so that 10898 * sctp_chunk_output() can be called below. 10899 */ 10900 queue_only = 1; 10901 10902 } else if (asoc->ifp_had_enobuf) { 10903 SCTP_STAT_INCR(sctps_ifnomemqueued); 10904 if (net->flight_size > (net->mtu * 2)) 10905 queue_only = 1; 10906 asoc->ifp_had_enobuf = 0; 10907 } else { 10908 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 10909 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 10910 } 10911 /* Are we aborting? */ 10912 if (srcv->sinfo_flags & SCTP_ABORT) { 10913 struct mbuf *mm; 10914 int tot_demand, tot_out, max; 10915 10916 SCTP_STAT_INCR(sctps_sends_with_abort); 10917 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 10918 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 10919 /* It has to be up before we abort */ 10920 /* how big is the user initiated abort? */ 10921 error = EINVAL; 10922 goto out; 10923 } 10924 if (hold_tcblock) { 10925 SCTP_TCB_UNLOCK(stcb); 10926 hold_tcblock = 0; 10927 } 10928 if (top) { 10929 struct mbuf *cntm; 10930 10931 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA); 10932 10933 tot_out = 0; 10934 cntm = top; 10935 while (cntm) { 10936 tot_out += SCTP_BUF_LEN(cntm); 10937 cntm = SCTP_BUF_NEXT(cntm); 10938 } 10939 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 10940 } else { 10941 /* Must fit in a MTU */ 10942 tot_out = uio->uio_resid; 10943 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 10944 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA); 10945 } 10946 if (mm == NULL) { 10947 error = ENOMEM; 10948 goto out; 10949 } 10950 max = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 10951 max -= sizeof(struct sctp_abort_msg); 10952 if (tot_out > max) { 10953 tot_out = max; 10954 } 10955 if (mm) { 10956 struct sctp_paramhdr *ph; 10957 10958 /* now move forward the data pointer */ 10959 ph = mtod(mm, struct sctp_paramhdr *); 10960 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 10961 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out)); 10962 ph++; 10963 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); 10964 if (top == NULL) { 10965 error = uiomove((caddr_t)ph, (int)tot_out, uio); 10966 if (error) { 10967 /* 10968 * Here if we can't get his data we 10969 * still abort we just don't get to 10970 * send the users note :-0 10971 */ 10972 sctp_m_freem(mm); 10973 mm = NULL; 10974 } 10975 } else { 10976 SCTP_BUF_NEXT(mm) = top; 10977 } 10978 } 10979 if (hold_tcblock == 0) { 10980 SCTP_TCB_LOCK(stcb); 10981 hold_tcblock = 1; 10982 } 10983 atomic_add_int(&stcb->asoc.refcnt, -1); 10984 free_cnt_applied = 0; 10985 /* release this lock, otherwise we hang on ourselves */ 10986 sctp_abort_an_association(stcb->sctp_ep, stcb, 10987 SCTP_RESPONSE_TO_USER_REQ, 10988 mm); 10989 /* now relock the stcb so everything is sane */ 10990 hold_tcblock = 0; 10991 stcb = NULL; 10992 goto out_unlocked; 10993 } 10994 /* Calculate the maximum we can send */ 10995 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) { 10996 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 10997 } else { 10998 max_len = 0; 10999 } 11000 if (hold_tcblock) { 11001 SCTP_TCB_UNLOCK(stcb); 11002 hold_tcblock = 0; 11003 } 11004 /* Is the stream no. valid? */ 11005 if (srcv->sinfo_stream >= asoc->streamoutcnt) { 11006 /* Invalid stream number */ 11007 error = EINVAL; 11008 goto out_unlocked; 11009 } 11010 if (asoc->strmout == NULL) { 11011 /* huh? software error */ 11012 error = EFAULT; 11013 goto out_unlocked; 11014 } 11015 len = 0; 11016 if (max_len < sctp_add_more_threshold) { 11017 /* No room right no ! */ 11018 SOCKBUF_LOCK(&so->so_snd); 11019 while (SCTP_SB_LIMIT_SND(so) < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) { 11020 #ifdef SCTP_BLK_LOGGING 11021 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, 11022 so, asoc, uio->uio_resid); 11023 #endif 11024 be.error = 0; 11025 stcb->block_entry = &be; 11026 error = sbwait(&so->so_snd); 11027 stcb->block_entry = NULL; 11028 if (error || so->so_error || be.error) { 11029 if (error == 0) { 11030 if (so->so_error) 11031 error = so->so_error; 11032 if (be.error) { 11033 error = be.error; 11034 } 11035 } 11036 SOCKBUF_UNLOCK(&so->so_snd); 11037 goto out_unlocked; 11038 } 11039 #ifdef SCTP_BLK_LOGGING 11040 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 11041 so, asoc, stcb->asoc.total_output_queue_size); 11042 #endif 11043 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 11044 goto out_unlocked; 11045 } 11046 } 11047 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) { 11048 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 11049 } else { 11050 max_len = 0; 11051 } 11052 SOCKBUF_UNLOCK(&so->so_snd); 11053 } 11054 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 11055 goto out_unlocked; 11056 } 11057 atomic_add_int(&stcb->total_sends, 1); 11058 if (top == NULL) { 11059 struct sctp_stream_queue_pending *sp; 11060 struct sctp_stream_out *strm; 11061 uint32_t sndout, initial_out; 11062 int user_marks_eor; 11063 11064 if (uio->uio_resid == 0) { 11065 if (srcv->sinfo_flags & SCTP_EOF) { 11066 got_all_of_the_send = 1; 11067 goto dataless_eof; 11068 } else { 11069 error = EINVAL; 11070 goto out; 11071 } 11072 } 11073 initial_out = uio->uio_resid; 11074 11075 if ((asoc->stream_locked) && 11076 (asoc->stream_locked_on != srcv->sinfo_stream)) { 11077 error = EAGAIN; 11078 goto out; 11079 } 11080 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 11081 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 11082 if (strm->last_msg_incomplete == 0) { 11083 do_a_copy_in: 11084 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking); 11085 if ((sp == NULL) || (error)) { 11086 goto out; 11087 } 11088 SCTP_TCB_SEND_LOCK(stcb); 11089 if (sp->msg_is_complete) { 11090 strm->last_msg_incomplete = 0; 11091 asoc->stream_locked = 0; 11092 } else { 11093 /* 11094 * Just got locked to this guy in case of an 11095 * interupt. 11096 */ 11097 strm->last_msg_incomplete = 1; 11098 asoc->stream_locked = 1; 11099 asoc->stream_locked_on = srcv->sinfo_stream; 11100 } 11101 sctp_snd_sb_alloc(stcb, sp->length); 11102 11103 asoc->stream_queue_cnt++; 11104 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 11105 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 11106 sp->strseq = strm->next_sequence_sent; 11107 #ifdef SCTP_LOG_SENDING_STR 11108 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN, 11109 (uintptr_t) stcb, (uintptr_t) sp, 11110 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0); 11111 #endif 11112 strm->next_sequence_sent++; 11113 } else { 11114 SCTP_STAT_INCR(sctps_sends_with_unord); 11115 } 11116 11117 if ((strm->next_spoke.tqe_next == NULL) && 11118 (strm->next_spoke.tqe_prev == NULL)) { 11119 /* Not on wheel, insert */ 11120 sctp_insert_on_wheel(stcb, asoc, strm, 1); 11121 } 11122 SCTP_TCB_SEND_UNLOCK(stcb); 11123 } else { 11124 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 11125 if (sp == NULL) { 11126 /* ???? Huh ??? last msg is gone */ 11127 #ifdef INVARIANTS 11128 panic("Warning: Last msg marked incomplete, yet nothing left?"); 11129 #else 11130 printf("Warning: Last msg marked incomplete, yet nothing left?\n"); 11131 strm->last_msg_incomplete = 0; 11132 #endif 11133 goto do_a_copy_in; 11134 11135 } 11136 } 11137 while (uio->uio_resid > 0) { 11138 /* How much room do we have? */ 11139 struct mbuf *new_tail, *mm; 11140 11141 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 11142 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 11143 else 11144 max_len = 0; 11145 11146 if ((max_len > sctp_add_more_threshold) || 11147 (uio->uio_resid && (uio->uio_resid < max_len))) { 11148 sndout = 0; 11149 new_tail = NULL; 11150 if (hold_tcblock) { 11151 SCTP_TCB_UNLOCK(stcb); 11152 hold_tcblock = 0; 11153 } 11154 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail); 11155 if ((mm == NULL) || error) { 11156 if (mm) { 11157 sctp_m_freem(mm); 11158 } 11159 goto out; 11160 } 11161 /* Update the mbuf and count */ 11162 SCTP_TCB_SEND_LOCK(stcb); 11163 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 11164 /* 11165 * we need to get out. Peer probably 11166 * aborted. 11167 */ 11168 sctp_m_freem(mm); 11169 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) 11170 error = ECONNRESET; 11171 goto out; 11172 } 11173 if (sp->tail_mbuf) { 11174 /* tack it to the end */ 11175 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 11176 sp->tail_mbuf = new_tail; 11177 } else { 11178 /* A stolen mbuf */ 11179 sp->data = mm; 11180 sp->tail_mbuf = new_tail; 11181 } 11182 sctp_snd_sb_alloc(stcb, sndout); 11183 sp->length += sndout; 11184 len += sndout; 11185 /* Did we reach EOR? */ 11186 if ((uio->uio_resid == 0) && 11187 ((user_marks_eor == 0) || 11188 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 11189 ) { 11190 sp->msg_is_complete = 1; 11191 } else { 11192 sp->msg_is_complete = 0; 11193 } 11194 SCTP_TCB_SEND_UNLOCK(stcb); 11195 } 11196 if (uio->uio_resid == 0) { 11197 /* got it all? */ 11198 continue; 11199 } 11200 /* PR-SCTP? */ 11201 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) { 11202 /* 11203 * This is ugly but we must assure locking 11204 * order 11205 */ 11206 if (hold_tcblock == 0) { 11207 SCTP_TCB_LOCK(stcb); 11208 hold_tcblock = 1; 11209 } 11210 sctp_prune_prsctp(stcb, asoc, srcv, sndlen); 11211 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 11212 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 11213 else 11214 max_len = 0; 11215 if (max_len > 0) { 11216 continue; 11217 } 11218 SCTP_TCB_UNLOCK(stcb); 11219 hold_tcblock = 0; 11220 } 11221 /* wait for space now */ 11222 if (non_blocking) { 11223 /* Non-blocking io in place out */ 11224 goto skip_out_eof; 11225 } 11226 if ((net->flight_size > net->cwnd) && 11227 (sctp_cmt_on_off == 0)) { 11228 queue_only = 1; 11229 11230 } else if (asoc->ifp_had_enobuf) { 11231 SCTP_STAT_INCR(sctps_ifnomemqueued); 11232 if (net->flight_size > (net->mtu * 2)) { 11233 queue_only = 1; 11234 } else { 11235 queue_only = 0; 11236 } 11237 asoc->ifp_had_enobuf = 0; 11238 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 11239 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 11240 sizeof(struct sctp_data_chunk))); 11241 } else { 11242 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 11243 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 11244 sizeof(struct sctp_data_chunk))); 11245 queue_only = 0; 11246 } 11247 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 11248 (stcb->asoc.total_flight > 0) && 11249 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 11250 ) { 11251 11252 /* 11253 * Ok, Nagle is set on and we have data 11254 * outstanding. Don't send anything and let 11255 * SACKs drive out the data unless wen have 11256 * a "full" segment to send. 11257 */ 11258 #ifdef SCTP_NAGLE_LOGGING 11259 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 11260 #endif 11261 SCTP_STAT_INCR(sctps_naglequeued); 11262 nagle_applies = 1; 11263 } else { 11264 #ifdef SCTP_NAGLE_LOGGING 11265 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 11266 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 11267 #endif 11268 SCTP_STAT_INCR(sctps_naglesent); 11269 nagle_applies = 0; 11270 } 11271 /* What about the INIT, send it maybe */ 11272 #ifdef SCTP_BLK_LOGGING 11273 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent); 11274 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, stcb->asoc.total_flight, 11275 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 11276 #endif 11277 if (queue_only_for_init) { 11278 if (hold_tcblock == 0) { 11279 SCTP_TCB_LOCK(stcb); 11280 hold_tcblock = 1; 11281 } 11282 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 11283 /* a collision took us forward? */ 11284 queue_only_for_init = 0; 11285 queue_only = 0; 11286 } else { 11287 sctp_send_initiate(inp, stcb); 11288 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 11289 queue_only_for_init = 0; 11290 queue_only = 1; 11291 } 11292 } 11293 if ((queue_only == 0) && (nagle_applies == 0) 11294 ) { 11295 /* 11296 * need to start chunk output before 11297 * blocking.. note that if a lock is already 11298 * applied, then the input via the net is 11299 * happening and I don't need to start 11300 * output :-D 11301 */ 11302 if (hold_tcblock == 0) { 11303 if (SCTP_TCB_TRYLOCK(stcb)) { 11304 hold_tcblock = 1; 11305 sctp_chunk_output(inp, 11306 stcb, 11307 SCTP_OUTPUT_FROM_USR_SEND); 11308 11309 } 11310 } else { 11311 sctp_chunk_output(inp, 11312 stcb, 11313 SCTP_OUTPUT_FROM_USR_SEND); 11314 } 11315 if (hold_tcblock == 1) { 11316 SCTP_TCB_UNLOCK(stcb); 11317 hold_tcblock = 0; 11318 } 11319 } 11320 SOCKBUF_LOCK(&so->so_snd); 11321 /* 11322 * This is a bit strange, but I think it will work. 11323 * The total_output_queue_size is locked and 11324 * protected by the TCB_LOCK, which we just 11325 * released. There is a race that can occur between 11326 * releasing it above, and me getting the socket 11327 * lock, where sacks come in but we have not put the 11328 * SB_WAIT on the so_snd buffer to get the wakeup. 11329 * After the LOCK is applied the sack_processing 11330 * will also need to LOCK the so->so_snd to do the 11331 * actual sowwakeup(). So once we have the socket 11332 * buffer lock if we recheck the size we KNOW we 11333 * will get to sleep safely with the wakeup flag in 11334 * place. 11335 */ 11336 if (SCTP_SB_LIMIT_SND(so) < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) { 11337 #ifdef SCTP_BLK_LOGGING 11338 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 11339 so, asoc, uio->uio_resid); 11340 #endif 11341 be.error = 0; 11342 stcb->block_entry = &be; 11343 error = sbwait(&so->so_snd); 11344 stcb->block_entry = NULL; 11345 11346 if (error || so->so_error || be.error) { 11347 if (error == 0) { 11348 if (so->so_error) 11349 error = so->so_error; 11350 if (be.error) { 11351 error = be.error; 11352 } 11353 } 11354 SOCKBUF_UNLOCK(&so->so_snd); 11355 goto out_unlocked; 11356 } 11357 #ifdef SCTP_BLK_LOGGING 11358 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 11359 so, asoc, stcb->asoc.total_output_queue_size); 11360 #endif 11361 } 11362 SOCKBUF_UNLOCK(&so->so_snd); 11363 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 11364 goto out_unlocked; 11365 } 11366 } 11367 SCTP_TCB_SEND_LOCK(stcb); 11368 if (sp->msg_is_complete == 0) { 11369 strm->last_msg_incomplete = 1; 11370 asoc->stream_locked = 1; 11371 asoc->stream_locked_on = srcv->sinfo_stream; 11372 } else { 11373 strm->last_msg_incomplete = 0; 11374 asoc->stream_locked = 0; 11375 } 11376 SCTP_TCB_SEND_UNLOCK(stcb); 11377 if (uio->uio_resid == 0) { 11378 got_all_of_the_send = 1; 11379 } 11380 } else if (top) { 11381 /* We send in a 0, since we do NOT have any locks */ 11382 error = sctp_msg_append(stcb, net, top, srcv, 0); 11383 top = NULL; 11384 } 11385 if (error) { 11386 goto out; 11387 } 11388 dataless_eof: 11389 /* EOF thing ? */ 11390 if ((srcv->sinfo_flags & SCTP_EOF) && 11391 (got_all_of_the_send == 1) && 11392 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) 11393 ) { 11394 SCTP_STAT_INCR(sctps_sends_with_eof); 11395 error = 0; 11396 if (hold_tcblock == 0) { 11397 SCTP_TCB_LOCK(stcb); 11398 hold_tcblock = 1; 11399 } 11400 if (TAILQ_EMPTY(&asoc->send_queue) && 11401 TAILQ_EMPTY(&asoc->sent_queue) && 11402 (asoc->stream_queue_cnt == 0)) { 11403 if (asoc->locked_on_sending) { 11404 goto abort_anyway; 11405 } 11406 /* there is nothing queued to send, so I'm done... */ 11407 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 11408 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 11409 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 11410 /* only send SHUTDOWN the first time through */ 11411 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 11412 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 11413 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 11414 } 11415 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 11416 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 11417 asoc->primary_destination); 11418 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 11419 asoc->primary_destination); 11420 } 11421 } else { 11422 /* 11423 * we still got (or just got) data to send, so set 11424 * SHUTDOWN_PENDING 11425 */ 11426 /* 11427 * XXX sockets draft says that SCTP_EOF should be 11428 * sent with no data. currently, we will allow user 11429 * data to be sent first and move to 11430 * SHUTDOWN-PENDING 11431 */ 11432 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 11433 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 11434 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 11435 if (hold_tcblock == 0) { 11436 SCTP_TCB_LOCK(stcb); 11437 hold_tcblock = 1; 11438 } 11439 if (asoc->locked_on_sending) { 11440 /* Locked to send out the data */ 11441 struct sctp_stream_queue_pending *sp; 11442 11443 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 11444 if (sp) { 11445 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 11446 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 11447 } 11448 } 11449 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 11450 if (TAILQ_EMPTY(&asoc->send_queue) && 11451 TAILQ_EMPTY(&asoc->sent_queue) && 11452 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 11453 abort_anyway: 11454 if (free_cnt_applied) { 11455 atomic_add_int(&stcb->asoc.refcnt, -1); 11456 free_cnt_applied = 0; 11457 } 11458 sctp_abort_an_association(stcb->sctp_ep, stcb, 11459 SCTP_RESPONSE_TO_USER_REQ, 11460 NULL); 11461 /* 11462 * now relock the stcb so everything 11463 * is sane 11464 */ 11465 hold_tcblock = 0; 11466 stcb = NULL; 11467 goto out; 11468 } 11469 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 11470 asoc->primary_destination); 11471 } 11472 } 11473 } 11474 skip_out_eof: 11475 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 11476 some_on_control = 1; 11477 } 11478 if ((net->flight_size > net->cwnd) && 11479 (sctp_cmt_on_off == 0)) { 11480 queue_only = 1; 11481 } else if (asoc->ifp_had_enobuf) { 11482 SCTP_STAT_INCR(sctps_ifnomemqueued); 11483 if (net->flight_size > (net->mtu * 2)) { 11484 queue_only = 1; 11485 } else { 11486 queue_only = 0; 11487 } 11488 asoc->ifp_had_enobuf = 0; 11489 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 11490 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 11491 sizeof(struct sctp_data_chunk))); 11492 } else { 11493 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 11494 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 11495 sizeof(struct sctp_data_chunk))); 11496 queue_only = 0; 11497 } 11498 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 11499 (stcb->asoc.total_flight > 0) && 11500 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 11501 ) { 11502 11503 /* 11504 * Ok, Nagle is set on and we have data outstanding. Don't 11505 * send anything and let SACKs drive out the data unless wen 11506 * have a "full" segment to send. 11507 */ 11508 #ifdef SCTP_NAGLE_LOGGING 11509 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 11510 #endif 11511 SCTP_STAT_INCR(sctps_naglequeued); 11512 nagle_applies = 1; 11513 } else { 11514 #ifdef SCTP_NAGLE_LOGGING 11515 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 11516 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 11517 #endif 11518 SCTP_STAT_INCR(sctps_naglesent); 11519 nagle_applies = 0; 11520 } 11521 if (queue_only_for_init) { 11522 if (hold_tcblock == 0) { 11523 SCTP_TCB_LOCK(stcb); 11524 hold_tcblock = 1; 11525 } 11526 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 11527 /* a collision took us forward? */ 11528 queue_only_for_init = 0; 11529 queue_only = 0; 11530 } else { 11531 sctp_send_initiate(inp, stcb); 11532 if (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING) 11533 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT | 11534 SCTP_STATE_SHUTDOWN_PENDING; 11535 else 11536 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 11537 queue_only_for_init = 0; 11538 queue_only = 1; 11539 } 11540 } 11541 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { 11542 /* we can attempt to send too. */ 11543 if (hold_tcblock == 0) { 11544 /* 11545 * If there is activity recv'ing sacks no need to 11546 * send 11547 */ 11548 if (SCTP_TCB_TRYLOCK(stcb)) { 11549 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 11550 hold_tcblock = 1; 11551 } 11552 } else { 11553 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 11554 } 11555 } else if ((queue_only == 0) && 11556 (stcb->asoc.peers_rwnd == 0) && 11557 (stcb->asoc.total_flight == 0)) { 11558 /* We get to have a probe outstanding */ 11559 if (hold_tcblock == 0) { 11560 hold_tcblock = 1; 11561 SCTP_TCB_LOCK(stcb); 11562 } 11563 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 11564 } else if (some_on_control) { 11565 int num_out, reason, cwnd_full, frag_point; 11566 11567 /* Here we do control only */ 11568 if (hold_tcblock == 0) { 11569 hold_tcblock = 1; 11570 SCTP_TCB_LOCK(stcb); 11571 } 11572 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 11573 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 11574 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); 11575 } 11576 #ifdef SCTP_DEBUG 11577 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 11578 printf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n", 11579 queue_only, stcb->asoc.peers_rwnd, un_sent, 11580 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, 11581 stcb->asoc.total_output_queue_size); 11582 } 11583 #endif 11584 out: 11585 out_unlocked: 11586 11587 if (create_lock_applied) { 11588 SCTP_ASOC_CREATE_UNLOCK(inp); 11589 create_lock_applied = 0; 11590 } 11591 if ((stcb) && hold_tcblock) { 11592 SCTP_TCB_UNLOCK(stcb); 11593 } 11594 if (stcb && free_cnt_applied) { 11595 atomic_add_int(&stcb->asoc.refcnt, -1); 11596 } 11597 #ifdef INVARIANTS 11598 if (stcb) { 11599 if (mtx_owned(&stcb->tcb_mtx)) { 11600 panic("Leaving with tcb mtx owned?"); 11601 } 11602 if (mtx_owned(&stcb->tcb_send_mtx)) { 11603 panic("Leaving with tcb send mtx owned?"); 11604 } 11605 } 11606 #endif 11607 if (top) { 11608 sctp_m_freem(top); 11609 } 11610 if (control) { 11611 sctp_m_freem(control); 11612 } 11613 return (error); 11614 } 11615 11616 11617 /* 11618 * generate an AUTHentication chunk, if required 11619 */ 11620 struct mbuf * 11621 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 11622 struct sctp_auth_chunk **auth_ret, uint32_t * offset, 11623 struct sctp_tcb *stcb, uint8_t chunk) 11624 { 11625 struct mbuf *m_auth; 11626 struct sctp_auth_chunk *auth; 11627 int chunk_len; 11628 11629 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 11630 (stcb == NULL)) 11631 return (m); 11632 11633 /* sysctl disabled auth? */ 11634 if (sctp_auth_disable) 11635 return (m); 11636 11637 /* peer doesn't do auth... */ 11638 if (!stcb->asoc.peer_supports_auth) { 11639 return (m); 11640 } 11641 /* does the requested chunk require auth? */ 11642 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 11643 return (m); 11644 } 11645 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER); 11646 if (m_auth == NULL) { 11647 /* no mbuf's */ 11648 return (m); 11649 } 11650 /* reserve some space if this will be the first mbuf */ 11651 if (m == NULL) 11652 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 11653 /* fill in the AUTH chunk details */ 11654 auth = mtod(m_auth, struct sctp_auth_chunk *); 11655 bzero(auth, sizeof(*auth)); 11656 auth->ch.chunk_type = SCTP_AUTHENTICATION; 11657 auth->ch.chunk_flags = 0; 11658 chunk_len = sizeof(*auth) + 11659 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 11660 auth->ch.chunk_length = htons(chunk_len); 11661 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 11662 /* key id and hmac digest will be computed and filled in upon send */ 11663 11664 /* save the offset where the auth was inserted into the chain */ 11665 if (m != NULL) { 11666 struct mbuf *cn; 11667 11668 *offset = 0; 11669 cn = m; 11670 while (cn) { 11671 *offset += SCTP_BUF_LEN(cn); 11672 cn = SCTP_BUF_NEXT(cn); 11673 } 11674 } else 11675 *offset = 0; 11676 11677 /* update length and return pointer to the auth chunk */ 11678 SCTP_BUF_LEN(m_auth) = chunk_len; 11679 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 11680 if (auth_ret != NULL) 11681 *auth_ret = auth; 11682 11683 return (m); 11684 } 11685