1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <sys/proc.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctp_pcb.h> 42 #include <netinet/sctputil.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctputil.h> 46 #include <netinet/sctp_auth.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_asconf.h> 49 #include <netinet/sctp_indata.h> 50 #include <netinet/sctp_bsd_addr.h> 51 52 53 54 #define SCTP_MAX_GAPS_INARRAY 4 55 struct sack_track { 56 uint8_t right_edge; /* mergable on the right edge */ 57 uint8_t left_edge; /* mergable on the left edge */ 58 uint8_t num_entries; 59 uint8_t spare; 60 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 61 }; 62 63 struct sack_track sack_array[256] = { 64 {0, 0, 0, 0, /* 0x00 */ 65 {{0, 0}, 66 {0, 0}, 67 {0, 0}, 68 {0, 0} 69 } 70 }, 71 {1, 0, 1, 0, /* 0x01 */ 72 {{0, 0}, 73 {0, 0}, 74 {0, 0}, 75 {0, 0} 76 } 77 }, 78 {0, 0, 1, 0, /* 0x02 */ 79 {{1, 1}, 80 {0, 0}, 81 {0, 0}, 82 {0, 0} 83 } 84 }, 85 {1, 0, 1, 0, /* 0x03 */ 86 {{0, 1}, 87 {0, 0}, 88 {0, 0}, 89 {0, 0} 90 } 91 }, 92 {0, 0, 1, 0, /* 0x04 */ 93 {{2, 2}, 94 {0, 0}, 95 {0, 0}, 96 {0, 0} 97 } 98 }, 99 {1, 0, 2, 0, /* 0x05 */ 100 {{0, 0}, 101 {2, 2}, 102 {0, 0}, 103 {0, 0} 104 } 105 }, 106 {0, 0, 1, 0, /* 0x06 */ 107 {{1, 2}, 108 {0, 0}, 109 {0, 0}, 110 {0, 0} 111 } 112 }, 113 {1, 0, 1, 0, /* 0x07 */ 114 {{0, 2}, 115 {0, 0}, 116 {0, 0}, 117 {0, 0} 118 } 119 }, 120 {0, 0, 1, 0, /* 0x08 */ 121 {{3, 3}, 122 {0, 0}, 123 {0, 0}, 124 {0, 0} 125 } 126 }, 127 {1, 0, 2, 0, /* 0x09 */ 128 {{0, 0}, 129 {3, 3}, 130 {0, 0}, 131 {0, 0} 132 } 133 }, 134 {0, 0, 2, 0, /* 0x0a */ 135 {{1, 1}, 136 {3, 3}, 137 {0, 0}, 138 {0, 0} 139 } 140 }, 141 {1, 0, 2, 0, /* 0x0b */ 142 {{0, 1}, 143 {3, 3}, 144 {0, 0}, 145 {0, 0} 146 } 147 }, 148 {0, 0, 1, 0, /* 0x0c */ 149 {{2, 3}, 150 {0, 0}, 151 {0, 0}, 152 {0, 0} 153 } 154 }, 155 {1, 0, 2, 0, /* 0x0d */ 156 {{0, 0}, 157 {2, 3}, 158 {0, 0}, 159 {0, 0} 160 } 161 }, 162 {0, 0, 1, 0, /* 0x0e */ 163 {{1, 3}, 164 {0, 0}, 165 {0, 0}, 166 {0, 0} 167 } 168 }, 169 {1, 0, 1, 0, /* 0x0f */ 170 {{0, 3}, 171 {0, 0}, 172 {0, 0}, 173 {0, 0} 174 } 175 }, 176 {0, 0, 1, 0, /* 0x10 */ 177 {{4, 4}, 178 {0, 0}, 179 {0, 0}, 180 {0, 0} 181 } 182 }, 183 {1, 0, 2, 0, /* 0x11 */ 184 {{0, 0}, 185 {4, 4}, 186 {0, 0}, 187 {0, 0} 188 } 189 }, 190 {0, 0, 2, 0, /* 0x12 */ 191 {{1, 1}, 192 {4, 4}, 193 {0, 0}, 194 {0, 0} 195 } 196 }, 197 {1, 0, 2, 0, /* 0x13 */ 198 {{0, 1}, 199 {4, 4}, 200 {0, 0}, 201 {0, 0} 202 } 203 }, 204 {0, 0, 2, 0, /* 0x14 */ 205 {{2, 2}, 206 {4, 4}, 207 {0, 0}, 208 {0, 0} 209 } 210 }, 211 {1, 0, 3, 0, /* 0x15 */ 212 {{0, 0}, 213 {2, 2}, 214 {4, 4}, 215 {0, 0} 216 } 217 }, 218 {0, 0, 2, 0, /* 0x16 */ 219 {{1, 2}, 220 {4, 4}, 221 {0, 0}, 222 {0, 0} 223 } 224 }, 225 {1, 0, 2, 0, /* 0x17 */ 226 {{0, 2}, 227 {4, 4}, 228 {0, 0}, 229 {0, 0} 230 } 231 }, 232 {0, 0, 1, 0, /* 0x18 */ 233 {{3, 4}, 234 {0, 0}, 235 {0, 0}, 236 {0, 0} 237 } 238 }, 239 {1, 0, 2, 0, /* 0x19 */ 240 {{0, 0}, 241 {3, 4}, 242 {0, 0}, 243 {0, 0} 244 } 245 }, 246 {0, 0, 2, 0, /* 0x1a */ 247 {{1, 1}, 248 {3, 4}, 249 {0, 0}, 250 {0, 0} 251 } 252 }, 253 {1, 0, 2, 0, /* 0x1b */ 254 {{0, 1}, 255 {3, 4}, 256 {0, 0}, 257 {0, 0} 258 } 259 }, 260 {0, 0, 1, 0, /* 0x1c */ 261 {{2, 4}, 262 {0, 0}, 263 {0, 0}, 264 {0, 0} 265 } 266 }, 267 {1, 0, 2, 0, /* 0x1d */ 268 {{0, 0}, 269 {2, 4}, 270 {0, 0}, 271 {0, 0} 272 } 273 }, 274 {0, 0, 1, 0, /* 0x1e */ 275 {{1, 4}, 276 {0, 0}, 277 {0, 0}, 278 {0, 0} 279 } 280 }, 281 {1, 0, 1, 0, /* 0x1f */ 282 {{0, 4}, 283 {0, 0}, 284 {0, 0}, 285 {0, 0} 286 } 287 }, 288 {0, 0, 1, 0, /* 0x20 */ 289 {{5, 5}, 290 {0, 0}, 291 {0, 0}, 292 {0, 0} 293 } 294 }, 295 {1, 0, 2, 0, /* 0x21 */ 296 {{0, 0}, 297 {5, 5}, 298 {0, 0}, 299 {0, 0} 300 } 301 }, 302 {0, 0, 2, 0, /* 0x22 */ 303 {{1, 1}, 304 {5, 5}, 305 {0, 0}, 306 {0, 0} 307 } 308 }, 309 {1, 0, 2, 0, /* 0x23 */ 310 {{0, 1}, 311 {5, 5}, 312 {0, 0}, 313 {0, 0} 314 } 315 }, 316 {0, 0, 2, 0, /* 0x24 */ 317 {{2, 2}, 318 {5, 5}, 319 {0, 0}, 320 {0, 0} 321 } 322 }, 323 {1, 0, 3, 0, /* 0x25 */ 324 {{0, 0}, 325 {2, 2}, 326 {5, 5}, 327 {0, 0} 328 } 329 }, 330 {0, 0, 2, 0, /* 0x26 */ 331 {{1, 2}, 332 {5, 5}, 333 {0, 0}, 334 {0, 0} 335 } 336 }, 337 {1, 0, 2, 0, /* 0x27 */ 338 {{0, 2}, 339 {5, 5}, 340 {0, 0}, 341 {0, 0} 342 } 343 }, 344 {0, 0, 2, 0, /* 0x28 */ 345 {{3, 3}, 346 {5, 5}, 347 {0, 0}, 348 {0, 0} 349 } 350 }, 351 {1, 0, 3, 0, /* 0x29 */ 352 {{0, 0}, 353 {3, 3}, 354 {5, 5}, 355 {0, 0} 356 } 357 }, 358 {0, 0, 3, 0, /* 0x2a */ 359 {{1, 1}, 360 {3, 3}, 361 {5, 5}, 362 {0, 0} 363 } 364 }, 365 {1, 0, 3, 0, /* 0x2b */ 366 {{0, 1}, 367 {3, 3}, 368 {5, 5}, 369 {0, 0} 370 } 371 }, 372 {0, 0, 2, 0, /* 0x2c */ 373 {{2, 3}, 374 {5, 5}, 375 {0, 0}, 376 {0, 0} 377 } 378 }, 379 {1, 0, 3, 0, /* 0x2d */ 380 {{0, 0}, 381 {2, 3}, 382 {5, 5}, 383 {0, 0} 384 } 385 }, 386 {0, 0, 2, 0, /* 0x2e */ 387 {{1, 3}, 388 {5, 5}, 389 {0, 0}, 390 {0, 0} 391 } 392 }, 393 {1, 0, 2, 0, /* 0x2f */ 394 {{0, 3}, 395 {5, 5}, 396 {0, 0}, 397 {0, 0} 398 } 399 }, 400 {0, 0, 1, 0, /* 0x30 */ 401 {{4, 5}, 402 {0, 0}, 403 {0, 0}, 404 {0, 0} 405 } 406 }, 407 {1, 0, 2, 0, /* 0x31 */ 408 {{0, 0}, 409 {4, 5}, 410 {0, 0}, 411 {0, 0} 412 } 413 }, 414 {0, 0, 2, 0, /* 0x32 */ 415 {{1, 1}, 416 {4, 5}, 417 {0, 0}, 418 {0, 0} 419 } 420 }, 421 {1, 0, 2, 0, /* 0x33 */ 422 {{0, 1}, 423 {4, 5}, 424 {0, 0}, 425 {0, 0} 426 } 427 }, 428 {0, 0, 2, 0, /* 0x34 */ 429 {{2, 2}, 430 {4, 5}, 431 {0, 0}, 432 {0, 0} 433 } 434 }, 435 {1, 0, 3, 0, /* 0x35 */ 436 {{0, 0}, 437 {2, 2}, 438 {4, 5}, 439 {0, 0} 440 } 441 }, 442 {0, 0, 2, 0, /* 0x36 */ 443 {{1, 2}, 444 {4, 5}, 445 {0, 0}, 446 {0, 0} 447 } 448 }, 449 {1, 0, 2, 0, /* 0x37 */ 450 {{0, 2}, 451 {4, 5}, 452 {0, 0}, 453 {0, 0} 454 } 455 }, 456 {0, 0, 1, 0, /* 0x38 */ 457 {{3, 5}, 458 {0, 0}, 459 {0, 0}, 460 {0, 0} 461 } 462 }, 463 {1, 0, 2, 0, /* 0x39 */ 464 {{0, 0}, 465 {3, 5}, 466 {0, 0}, 467 {0, 0} 468 } 469 }, 470 {0, 0, 2, 0, /* 0x3a */ 471 {{1, 1}, 472 {3, 5}, 473 {0, 0}, 474 {0, 0} 475 } 476 }, 477 {1, 0, 2, 0, /* 0x3b */ 478 {{0, 1}, 479 {3, 5}, 480 {0, 0}, 481 {0, 0} 482 } 483 }, 484 {0, 0, 1, 0, /* 0x3c */ 485 {{2, 5}, 486 {0, 0}, 487 {0, 0}, 488 {0, 0} 489 } 490 }, 491 {1, 0, 2, 0, /* 0x3d */ 492 {{0, 0}, 493 {2, 5}, 494 {0, 0}, 495 {0, 0} 496 } 497 }, 498 {0, 0, 1, 0, /* 0x3e */ 499 {{1, 5}, 500 {0, 0}, 501 {0, 0}, 502 {0, 0} 503 } 504 }, 505 {1, 0, 1, 0, /* 0x3f */ 506 {{0, 5}, 507 {0, 0}, 508 {0, 0}, 509 {0, 0} 510 } 511 }, 512 {0, 0, 1, 0, /* 0x40 */ 513 {{6, 6}, 514 {0, 0}, 515 {0, 0}, 516 {0, 0} 517 } 518 }, 519 {1, 0, 2, 0, /* 0x41 */ 520 {{0, 0}, 521 {6, 6}, 522 {0, 0}, 523 {0, 0} 524 } 525 }, 526 {0, 0, 2, 0, /* 0x42 */ 527 {{1, 1}, 528 {6, 6}, 529 {0, 0}, 530 {0, 0} 531 } 532 }, 533 {1, 0, 2, 0, /* 0x43 */ 534 {{0, 1}, 535 {6, 6}, 536 {0, 0}, 537 {0, 0} 538 } 539 }, 540 {0, 0, 2, 0, /* 0x44 */ 541 {{2, 2}, 542 {6, 6}, 543 {0, 0}, 544 {0, 0} 545 } 546 }, 547 {1, 0, 3, 0, /* 0x45 */ 548 {{0, 0}, 549 {2, 2}, 550 {6, 6}, 551 {0, 0} 552 } 553 }, 554 {0, 0, 2, 0, /* 0x46 */ 555 {{1, 2}, 556 {6, 6}, 557 {0, 0}, 558 {0, 0} 559 } 560 }, 561 {1, 0, 2, 0, /* 0x47 */ 562 {{0, 2}, 563 {6, 6}, 564 {0, 0}, 565 {0, 0} 566 } 567 }, 568 {0, 0, 2, 0, /* 0x48 */ 569 {{3, 3}, 570 {6, 6}, 571 {0, 0}, 572 {0, 0} 573 } 574 }, 575 {1, 0, 3, 0, /* 0x49 */ 576 {{0, 0}, 577 {3, 3}, 578 {6, 6}, 579 {0, 0} 580 } 581 }, 582 {0, 0, 3, 0, /* 0x4a */ 583 {{1, 1}, 584 {3, 3}, 585 {6, 6}, 586 {0, 0} 587 } 588 }, 589 {1, 0, 3, 0, /* 0x4b */ 590 {{0, 1}, 591 {3, 3}, 592 {6, 6}, 593 {0, 0} 594 } 595 }, 596 {0, 0, 2, 0, /* 0x4c */ 597 {{2, 3}, 598 {6, 6}, 599 {0, 0}, 600 {0, 0} 601 } 602 }, 603 {1, 0, 3, 0, /* 0x4d */ 604 {{0, 0}, 605 {2, 3}, 606 {6, 6}, 607 {0, 0} 608 } 609 }, 610 {0, 0, 2, 0, /* 0x4e */ 611 {{1, 3}, 612 {6, 6}, 613 {0, 0}, 614 {0, 0} 615 } 616 }, 617 {1, 0, 2, 0, /* 0x4f */ 618 {{0, 3}, 619 {6, 6}, 620 {0, 0}, 621 {0, 0} 622 } 623 }, 624 {0, 0, 2, 0, /* 0x50 */ 625 {{4, 4}, 626 {6, 6}, 627 {0, 0}, 628 {0, 0} 629 } 630 }, 631 {1, 0, 3, 0, /* 0x51 */ 632 {{0, 0}, 633 {4, 4}, 634 {6, 6}, 635 {0, 0} 636 } 637 }, 638 {0, 0, 3, 0, /* 0x52 */ 639 {{1, 1}, 640 {4, 4}, 641 {6, 6}, 642 {0, 0} 643 } 644 }, 645 {1, 0, 3, 0, /* 0x53 */ 646 {{0, 1}, 647 {4, 4}, 648 {6, 6}, 649 {0, 0} 650 } 651 }, 652 {0, 0, 3, 0, /* 0x54 */ 653 {{2, 2}, 654 {4, 4}, 655 {6, 6}, 656 {0, 0} 657 } 658 }, 659 {1, 0, 4, 0, /* 0x55 */ 660 {{0, 0}, 661 {2, 2}, 662 {4, 4}, 663 {6, 6} 664 } 665 }, 666 {0, 0, 3, 0, /* 0x56 */ 667 {{1, 2}, 668 {4, 4}, 669 {6, 6}, 670 {0, 0} 671 } 672 }, 673 {1, 0, 3, 0, /* 0x57 */ 674 {{0, 2}, 675 {4, 4}, 676 {6, 6}, 677 {0, 0} 678 } 679 }, 680 {0, 0, 2, 0, /* 0x58 */ 681 {{3, 4}, 682 {6, 6}, 683 {0, 0}, 684 {0, 0} 685 } 686 }, 687 {1, 0, 3, 0, /* 0x59 */ 688 {{0, 0}, 689 {3, 4}, 690 {6, 6}, 691 {0, 0} 692 } 693 }, 694 {0, 0, 3, 0, /* 0x5a */ 695 {{1, 1}, 696 {3, 4}, 697 {6, 6}, 698 {0, 0} 699 } 700 }, 701 {1, 0, 3, 0, /* 0x5b */ 702 {{0, 1}, 703 {3, 4}, 704 {6, 6}, 705 {0, 0} 706 } 707 }, 708 {0, 0, 2, 0, /* 0x5c */ 709 {{2, 4}, 710 {6, 6}, 711 {0, 0}, 712 {0, 0} 713 } 714 }, 715 {1, 0, 3, 0, /* 0x5d */ 716 {{0, 0}, 717 {2, 4}, 718 {6, 6}, 719 {0, 0} 720 } 721 }, 722 {0, 0, 2, 0, /* 0x5e */ 723 {{1, 4}, 724 {6, 6}, 725 {0, 0}, 726 {0, 0} 727 } 728 }, 729 {1, 0, 2, 0, /* 0x5f */ 730 {{0, 4}, 731 {6, 6}, 732 {0, 0}, 733 {0, 0} 734 } 735 }, 736 {0, 0, 1, 0, /* 0x60 */ 737 {{5, 6}, 738 {0, 0}, 739 {0, 0}, 740 {0, 0} 741 } 742 }, 743 {1, 0, 2, 0, /* 0x61 */ 744 {{0, 0}, 745 {5, 6}, 746 {0, 0}, 747 {0, 0} 748 } 749 }, 750 {0, 0, 2, 0, /* 0x62 */ 751 {{1, 1}, 752 {5, 6}, 753 {0, 0}, 754 {0, 0} 755 } 756 }, 757 {1, 0, 2, 0, /* 0x63 */ 758 {{0, 1}, 759 {5, 6}, 760 {0, 0}, 761 {0, 0} 762 } 763 }, 764 {0, 0, 2, 0, /* 0x64 */ 765 {{2, 2}, 766 {5, 6}, 767 {0, 0}, 768 {0, 0} 769 } 770 }, 771 {1, 0, 3, 0, /* 0x65 */ 772 {{0, 0}, 773 {2, 2}, 774 {5, 6}, 775 {0, 0} 776 } 777 }, 778 {0, 0, 2, 0, /* 0x66 */ 779 {{1, 2}, 780 {5, 6}, 781 {0, 0}, 782 {0, 0} 783 } 784 }, 785 {1, 0, 2, 0, /* 0x67 */ 786 {{0, 2}, 787 {5, 6}, 788 {0, 0}, 789 {0, 0} 790 } 791 }, 792 {0, 0, 2, 0, /* 0x68 */ 793 {{3, 3}, 794 {5, 6}, 795 {0, 0}, 796 {0, 0} 797 } 798 }, 799 {1, 0, 3, 0, /* 0x69 */ 800 {{0, 0}, 801 {3, 3}, 802 {5, 6}, 803 {0, 0} 804 } 805 }, 806 {0, 0, 3, 0, /* 0x6a */ 807 {{1, 1}, 808 {3, 3}, 809 {5, 6}, 810 {0, 0} 811 } 812 }, 813 {1, 0, 3, 0, /* 0x6b */ 814 {{0, 1}, 815 {3, 3}, 816 {5, 6}, 817 {0, 0} 818 } 819 }, 820 {0, 0, 2, 0, /* 0x6c */ 821 {{2, 3}, 822 {5, 6}, 823 {0, 0}, 824 {0, 0} 825 } 826 }, 827 {1, 0, 3, 0, /* 0x6d */ 828 {{0, 0}, 829 {2, 3}, 830 {5, 6}, 831 {0, 0} 832 } 833 }, 834 {0, 0, 2, 0, /* 0x6e */ 835 {{1, 3}, 836 {5, 6}, 837 {0, 0}, 838 {0, 0} 839 } 840 }, 841 {1, 0, 2, 0, /* 0x6f */ 842 {{0, 3}, 843 {5, 6}, 844 {0, 0}, 845 {0, 0} 846 } 847 }, 848 {0, 0, 1, 0, /* 0x70 */ 849 {{4, 6}, 850 {0, 0}, 851 {0, 0}, 852 {0, 0} 853 } 854 }, 855 {1, 0, 2, 0, /* 0x71 */ 856 {{0, 0}, 857 {4, 6}, 858 {0, 0}, 859 {0, 0} 860 } 861 }, 862 {0, 0, 2, 0, /* 0x72 */ 863 {{1, 1}, 864 {4, 6}, 865 {0, 0}, 866 {0, 0} 867 } 868 }, 869 {1, 0, 2, 0, /* 0x73 */ 870 {{0, 1}, 871 {4, 6}, 872 {0, 0}, 873 {0, 0} 874 } 875 }, 876 {0, 0, 2, 0, /* 0x74 */ 877 {{2, 2}, 878 {4, 6}, 879 {0, 0}, 880 {0, 0} 881 } 882 }, 883 {1, 0, 3, 0, /* 0x75 */ 884 {{0, 0}, 885 {2, 2}, 886 {4, 6}, 887 {0, 0} 888 } 889 }, 890 {0, 0, 2, 0, /* 0x76 */ 891 {{1, 2}, 892 {4, 6}, 893 {0, 0}, 894 {0, 0} 895 } 896 }, 897 {1, 0, 2, 0, /* 0x77 */ 898 {{0, 2}, 899 {4, 6}, 900 {0, 0}, 901 {0, 0} 902 } 903 }, 904 {0, 0, 1, 0, /* 0x78 */ 905 {{3, 6}, 906 {0, 0}, 907 {0, 0}, 908 {0, 0} 909 } 910 }, 911 {1, 0, 2, 0, /* 0x79 */ 912 {{0, 0}, 913 {3, 6}, 914 {0, 0}, 915 {0, 0} 916 } 917 }, 918 {0, 0, 2, 0, /* 0x7a */ 919 {{1, 1}, 920 {3, 6}, 921 {0, 0}, 922 {0, 0} 923 } 924 }, 925 {1, 0, 2, 0, /* 0x7b */ 926 {{0, 1}, 927 {3, 6}, 928 {0, 0}, 929 {0, 0} 930 } 931 }, 932 {0, 0, 1, 0, /* 0x7c */ 933 {{2, 6}, 934 {0, 0}, 935 {0, 0}, 936 {0, 0} 937 } 938 }, 939 {1, 0, 2, 0, /* 0x7d */ 940 {{0, 0}, 941 {2, 6}, 942 {0, 0}, 943 {0, 0} 944 } 945 }, 946 {0, 0, 1, 0, /* 0x7e */ 947 {{1, 6}, 948 {0, 0}, 949 {0, 0}, 950 {0, 0} 951 } 952 }, 953 {1, 0, 1, 0, /* 0x7f */ 954 {{0, 6}, 955 {0, 0}, 956 {0, 0}, 957 {0, 0} 958 } 959 }, 960 {0, 1, 1, 0, /* 0x80 */ 961 {{7, 7}, 962 {0, 0}, 963 {0, 0}, 964 {0, 0} 965 } 966 }, 967 {1, 1, 2, 0, /* 0x81 */ 968 {{0, 0}, 969 {7, 7}, 970 {0, 0}, 971 {0, 0} 972 } 973 }, 974 {0, 1, 2, 0, /* 0x82 */ 975 {{1, 1}, 976 {7, 7}, 977 {0, 0}, 978 {0, 0} 979 } 980 }, 981 {1, 1, 2, 0, /* 0x83 */ 982 {{0, 1}, 983 {7, 7}, 984 {0, 0}, 985 {0, 0} 986 } 987 }, 988 {0, 1, 2, 0, /* 0x84 */ 989 {{2, 2}, 990 {7, 7}, 991 {0, 0}, 992 {0, 0} 993 } 994 }, 995 {1, 1, 3, 0, /* 0x85 */ 996 {{0, 0}, 997 {2, 2}, 998 {7, 7}, 999 {0, 0} 1000 } 1001 }, 1002 {0, 1, 2, 0, /* 0x86 */ 1003 {{1, 2}, 1004 {7, 7}, 1005 {0, 0}, 1006 {0, 0} 1007 } 1008 }, 1009 {1, 1, 2, 0, /* 0x87 */ 1010 {{0, 2}, 1011 {7, 7}, 1012 {0, 0}, 1013 {0, 0} 1014 } 1015 }, 1016 {0, 1, 2, 0, /* 0x88 */ 1017 {{3, 3}, 1018 {7, 7}, 1019 {0, 0}, 1020 {0, 0} 1021 } 1022 }, 1023 {1, 1, 3, 0, /* 0x89 */ 1024 {{0, 0}, 1025 {3, 3}, 1026 {7, 7}, 1027 {0, 0} 1028 } 1029 }, 1030 {0, 1, 3, 0, /* 0x8a */ 1031 {{1, 1}, 1032 {3, 3}, 1033 {7, 7}, 1034 {0, 0} 1035 } 1036 }, 1037 {1, 1, 3, 0, /* 0x8b */ 1038 {{0, 1}, 1039 {3, 3}, 1040 {7, 7}, 1041 {0, 0} 1042 } 1043 }, 1044 {0, 1, 2, 0, /* 0x8c */ 1045 {{2, 3}, 1046 {7, 7}, 1047 {0, 0}, 1048 {0, 0} 1049 } 1050 }, 1051 {1, 1, 3, 0, /* 0x8d */ 1052 {{0, 0}, 1053 {2, 3}, 1054 {7, 7}, 1055 {0, 0} 1056 } 1057 }, 1058 {0, 1, 2, 0, /* 0x8e */ 1059 {{1, 3}, 1060 {7, 7}, 1061 {0, 0}, 1062 {0, 0} 1063 } 1064 }, 1065 {1, 1, 2, 0, /* 0x8f */ 1066 {{0, 3}, 1067 {7, 7}, 1068 {0, 0}, 1069 {0, 0} 1070 } 1071 }, 1072 {0, 1, 2, 0, /* 0x90 */ 1073 {{4, 4}, 1074 {7, 7}, 1075 {0, 0}, 1076 {0, 0} 1077 } 1078 }, 1079 {1, 1, 3, 0, /* 0x91 */ 1080 {{0, 0}, 1081 {4, 4}, 1082 {7, 7}, 1083 {0, 0} 1084 } 1085 }, 1086 {0, 1, 3, 0, /* 0x92 */ 1087 {{1, 1}, 1088 {4, 4}, 1089 {7, 7}, 1090 {0, 0} 1091 } 1092 }, 1093 {1, 1, 3, 0, /* 0x93 */ 1094 {{0, 1}, 1095 {4, 4}, 1096 {7, 7}, 1097 {0, 0} 1098 } 1099 }, 1100 {0, 1, 3, 0, /* 0x94 */ 1101 {{2, 2}, 1102 {4, 4}, 1103 {7, 7}, 1104 {0, 0} 1105 } 1106 }, 1107 {1, 1, 4, 0, /* 0x95 */ 1108 {{0, 0}, 1109 {2, 2}, 1110 {4, 4}, 1111 {7, 7} 1112 } 1113 }, 1114 {0, 1, 3, 0, /* 0x96 */ 1115 {{1, 2}, 1116 {4, 4}, 1117 {7, 7}, 1118 {0, 0} 1119 } 1120 }, 1121 {1, 1, 3, 0, /* 0x97 */ 1122 {{0, 2}, 1123 {4, 4}, 1124 {7, 7}, 1125 {0, 0} 1126 } 1127 }, 1128 {0, 1, 2, 0, /* 0x98 */ 1129 {{3, 4}, 1130 {7, 7}, 1131 {0, 0}, 1132 {0, 0} 1133 } 1134 }, 1135 {1, 1, 3, 0, /* 0x99 */ 1136 {{0, 0}, 1137 {3, 4}, 1138 {7, 7}, 1139 {0, 0} 1140 } 1141 }, 1142 {0, 1, 3, 0, /* 0x9a */ 1143 {{1, 1}, 1144 {3, 4}, 1145 {7, 7}, 1146 {0, 0} 1147 } 1148 }, 1149 {1, 1, 3, 0, /* 0x9b */ 1150 {{0, 1}, 1151 {3, 4}, 1152 {7, 7}, 1153 {0, 0} 1154 } 1155 }, 1156 {0, 1, 2, 0, /* 0x9c */ 1157 {{2, 4}, 1158 {7, 7}, 1159 {0, 0}, 1160 {0, 0} 1161 } 1162 }, 1163 {1, 1, 3, 0, /* 0x9d */ 1164 {{0, 0}, 1165 {2, 4}, 1166 {7, 7}, 1167 {0, 0} 1168 } 1169 }, 1170 {0, 1, 2, 0, /* 0x9e */ 1171 {{1, 4}, 1172 {7, 7}, 1173 {0, 0}, 1174 {0, 0} 1175 } 1176 }, 1177 {1, 1, 2, 0, /* 0x9f */ 1178 {{0, 4}, 1179 {7, 7}, 1180 {0, 0}, 1181 {0, 0} 1182 } 1183 }, 1184 {0, 1, 2, 0, /* 0xa0 */ 1185 {{5, 5}, 1186 {7, 7}, 1187 {0, 0}, 1188 {0, 0} 1189 } 1190 }, 1191 {1, 1, 3, 0, /* 0xa1 */ 1192 {{0, 0}, 1193 {5, 5}, 1194 {7, 7}, 1195 {0, 0} 1196 } 1197 }, 1198 {0, 1, 3, 0, /* 0xa2 */ 1199 {{1, 1}, 1200 {5, 5}, 1201 {7, 7}, 1202 {0, 0} 1203 } 1204 }, 1205 {1, 1, 3, 0, /* 0xa3 */ 1206 {{0, 1}, 1207 {5, 5}, 1208 {7, 7}, 1209 {0, 0} 1210 } 1211 }, 1212 {0, 1, 3, 0, /* 0xa4 */ 1213 {{2, 2}, 1214 {5, 5}, 1215 {7, 7}, 1216 {0, 0} 1217 } 1218 }, 1219 {1, 1, 4, 0, /* 0xa5 */ 1220 {{0, 0}, 1221 {2, 2}, 1222 {5, 5}, 1223 {7, 7} 1224 } 1225 }, 1226 {0, 1, 3, 0, /* 0xa6 */ 1227 {{1, 2}, 1228 {5, 5}, 1229 {7, 7}, 1230 {0, 0} 1231 } 1232 }, 1233 {1, 1, 3, 0, /* 0xa7 */ 1234 {{0, 2}, 1235 {5, 5}, 1236 {7, 7}, 1237 {0, 0} 1238 } 1239 }, 1240 {0, 1, 3, 0, /* 0xa8 */ 1241 {{3, 3}, 1242 {5, 5}, 1243 {7, 7}, 1244 {0, 0} 1245 } 1246 }, 1247 {1, 1, 4, 0, /* 0xa9 */ 1248 {{0, 0}, 1249 {3, 3}, 1250 {5, 5}, 1251 {7, 7} 1252 } 1253 }, 1254 {0, 1, 4, 0, /* 0xaa */ 1255 {{1, 1}, 1256 {3, 3}, 1257 {5, 5}, 1258 {7, 7} 1259 } 1260 }, 1261 {1, 1, 4, 0, /* 0xab */ 1262 {{0, 1}, 1263 {3, 3}, 1264 {5, 5}, 1265 {7, 7} 1266 } 1267 }, 1268 {0, 1, 3, 0, /* 0xac */ 1269 {{2, 3}, 1270 {5, 5}, 1271 {7, 7}, 1272 {0, 0} 1273 } 1274 }, 1275 {1, 1, 4, 0, /* 0xad */ 1276 {{0, 0}, 1277 {2, 3}, 1278 {5, 5}, 1279 {7, 7} 1280 } 1281 }, 1282 {0, 1, 3, 0, /* 0xae */ 1283 {{1, 3}, 1284 {5, 5}, 1285 {7, 7}, 1286 {0, 0} 1287 } 1288 }, 1289 {1, 1, 3, 0, /* 0xaf */ 1290 {{0, 3}, 1291 {5, 5}, 1292 {7, 7}, 1293 {0, 0} 1294 } 1295 }, 1296 {0, 1, 2, 0, /* 0xb0 */ 1297 {{4, 5}, 1298 {7, 7}, 1299 {0, 0}, 1300 {0, 0} 1301 } 1302 }, 1303 {1, 1, 3, 0, /* 0xb1 */ 1304 {{0, 0}, 1305 {4, 5}, 1306 {7, 7}, 1307 {0, 0} 1308 } 1309 }, 1310 {0, 1, 3, 0, /* 0xb2 */ 1311 {{1, 1}, 1312 {4, 5}, 1313 {7, 7}, 1314 {0, 0} 1315 } 1316 }, 1317 {1, 1, 3, 0, /* 0xb3 */ 1318 {{0, 1}, 1319 {4, 5}, 1320 {7, 7}, 1321 {0, 0} 1322 } 1323 }, 1324 {0, 1, 3, 0, /* 0xb4 */ 1325 {{2, 2}, 1326 {4, 5}, 1327 {7, 7}, 1328 {0, 0} 1329 } 1330 }, 1331 {1, 1, 4, 0, /* 0xb5 */ 1332 {{0, 0}, 1333 {2, 2}, 1334 {4, 5}, 1335 {7, 7} 1336 } 1337 }, 1338 {0, 1, 3, 0, /* 0xb6 */ 1339 {{1, 2}, 1340 {4, 5}, 1341 {7, 7}, 1342 {0, 0} 1343 } 1344 }, 1345 {1, 1, 3, 0, /* 0xb7 */ 1346 {{0, 2}, 1347 {4, 5}, 1348 {7, 7}, 1349 {0, 0} 1350 } 1351 }, 1352 {0, 1, 2, 0, /* 0xb8 */ 1353 {{3, 5}, 1354 {7, 7}, 1355 {0, 0}, 1356 {0, 0} 1357 } 1358 }, 1359 {1, 1, 3, 0, /* 0xb9 */ 1360 {{0, 0}, 1361 {3, 5}, 1362 {7, 7}, 1363 {0, 0} 1364 } 1365 }, 1366 {0, 1, 3, 0, /* 0xba */ 1367 {{1, 1}, 1368 {3, 5}, 1369 {7, 7}, 1370 {0, 0} 1371 } 1372 }, 1373 {1, 1, 3, 0, /* 0xbb */ 1374 {{0, 1}, 1375 {3, 5}, 1376 {7, 7}, 1377 {0, 0} 1378 } 1379 }, 1380 {0, 1, 2, 0, /* 0xbc */ 1381 {{2, 5}, 1382 {7, 7}, 1383 {0, 0}, 1384 {0, 0} 1385 } 1386 }, 1387 {1, 1, 3, 0, /* 0xbd */ 1388 {{0, 0}, 1389 {2, 5}, 1390 {7, 7}, 1391 {0, 0} 1392 } 1393 }, 1394 {0, 1, 2, 0, /* 0xbe */ 1395 {{1, 5}, 1396 {7, 7}, 1397 {0, 0}, 1398 {0, 0} 1399 } 1400 }, 1401 {1, 1, 2, 0, /* 0xbf */ 1402 {{0, 5}, 1403 {7, 7}, 1404 {0, 0}, 1405 {0, 0} 1406 } 1407 }, 1408 {0, 1, 1, 0, /* 0xc0 */ 1409 {{6, 7}, 1410 {0, 0}, 1411 {0, 0}, 1412 {0, 0} 1413 } 1414 }, 1415 {1, 1, 2, 0, /* 0xc1 */ 1416 {{0, 0}, 1417 {6, 7}, 1418 {0, 0}, 1419 {0, 0} 1420 } 1421 }, 1422 {0, 1, 2, 0, /* 0xc2 */ 1423 {{1, 1}, 1424 {6, 7}, 1425 {0, 0}, 1426 {0, 0} 1427 } 1428 }, 1429 {1, 1, 2, 0, /* 0xc3 */ 1430 {{0, 1}, 1431 {6, 7}, 1432 {0, 0}, 1433 {0, 0} 1434 } 1435 }, 1436 {0, 1, 2, 0, /* 0xc4 */ 1437 {{2, 2}, 1438 {6, 7}, 1439 {0, 0}, 1440 {0, 0} 1441 } 1442 }, 1443 {1, 1, 3, 0, /* 0xc5 */ 1444 {{0, 0}, 1445 {2, 2}, 1446 {6, 7}, 1447 {0, 0} 1448 } 1449 }, 1450 {0, 1, 2, 0, /* 0xc6 */ 1451 {{1, 2}, 1452 {6, 7}, 1453 {0, 0}, 1454 {0, 0} 1455 } 1456 }, 1457 {1, 1, 2, 0, /* 0xc7 */ 1458 {{0, 2}, 1459 {6, 7}, 1460 {0, 0}, 1461 {0, 0} 1462 } 1463 }, 1464 {0, 1, 2, 0, /* 0xc8 */ 1465 {{3, 3}, 1466 {6, 7}, 1467 {0, 0}, 1468 {0, 0} 1469 } 1470 }, 1471 {1, 1, 3, 0, /* 0xc9 */ 1472 {{0, 0}, 1473 {3, 3}, 1474 {6, 7}, 1475 {0, 0} 1476 } 1477 }, 1478 {0, 1, 3, 0, /* 0xca */ 1479 {{1, 1}, 1480 {3, 3}, 1481 {6, 7}, 1482 {0, 0} 1483 } 1484 }, 1485 {1, 1, 3, 0, /* 0xcb */ 1486 {{0, 1}, 1487 {3, 3}, 1488 {6, 7}, 1489 {0, 0} 1490 } 1491 }, 1492 {0, 1, 2, 0, /* 0xcc */ 1493 {{2, 3}, 1494 {6, 7}, 1495 {0, 0}, 1496 {0, 0} 1497 } 1498 }, 1499 {1, 1, 3, 0, /* 0xcd */ 1500 {{0, 0}, 1501 {2, 3}, 1502 {6, 7}, 1503 {0, 0} 1504 } 1505 }, 1506 {0, 1, 2, 0, /* 0xce */ 1507 {{1, 3}, 1508 {6, 7}, 1509 {0, 0}, 1510 {0, 0} 1511 } 1512 }, 1513 {1, 1, 2, 0, /* 0xcf */ 1514 {{0, 3}, 1515 {6, 7}, 1516 {0, 0}, 1517 {0, 0} 1518 } 1519 }, 1520 {0, 1, 2, 0, /* 0xd0 */ 1521 {{4, 4}, 1522 {6, 7}, 1523 {0, 0}, 1524 {0, 0} 1525 } 1526 }, 1527 {1, 1, 3, 0, /* 0xd1 */ 1528 {{0, 0}, 1529 {4, 4}, 1530 {6, 7}, 1531 {0, 0} 1532 } 1533 }, 1534 {0, 1, 3, 0, /* 0xd2 */ 1535 {{1, 1}, 1536 {4, 4}, 1537 {6, 7}, 1538 {0, 0} 1539 } 1540 }, 1541 {1, 1, 3, 0, /* 0xd3 */ 1542 {{0, 1}, 1543 {4, 4}, 1544 {6, 7}, 1545 {0, 0} 1546 } 1547 }, 1548 {0, 1, 3, 0, /* 0xd4 */ 1549 {{2, 2}, 1550 {4, 4}, 1551 {6, 7}, 1552 {0, 0} 1553 } 1554 }, 1555 {1, 1, 4, 0, /* 0xd5 */ 1556 {{0, 0}, 1557 {2, 2}, 1558 {4, 4}, 1559 {6, 7} 1560 } 1561 }, 1562 {0, 1, 3, 0, /* 0xd6 */ 1563 {{1, 2}, 1564 {4, 4}, 1565 {6, 7}, 1566 {0, 0} 1567 } 1568 }, 1569 {1, 1, 3, 0, /* 0xd7 */ 1570 {{0, 2}, 1571 {4, 4}, 1572 {6, 7}, 1573 {0, 0} 1574 } 1575 }, 1576 {0, 1, 2, 0, /* 0xd8 */ 1577 {{3, 4}, 1578 {6, 7}, 1579 {0, 0}, 1580 {0, 0} 1581 } 1582 }, 1583 {1, 1, 3, 0, /* 0xd9 */ 1584 {{0, 0}, 1585 {3, 4}, 1586 {6, 7}, 1587 {0, 0} 1588 } 1589 }, 1590 {0, 1, 3, 0, /* 0xda */ 1591 {{1, 1}, 1592 {3, 4}, 1593 {6, 7}, 1594 {0, 0} 1595 } 1596 }, 1597 {1, 1, 3, 0, /* 0xdb */ 1598 {{0, 1}, 1599 {3, 4}, 1600 {6, 7}, 1601 {0, 0} 1602 } 1603 }, 1604 {0, 1, 2, 0, /* 0xdc */ 1605 {{2, 4}, 1606 {6, 7}, 1607 {0, 0}, 1608 {0, 0} 1609 } 1610 }, 1611 {1, 1, 3, 0, /* 0xdd */ 1612 {{0, 0}, 1613 {2, 4}, 1614 {6, 7}, 1615 {0, 0} 1616 } 1617 }, 1618 {0, 1, 2, 0, /* 0xde */ 1619 {{1, 4}, 1620 {6, 7}, 1621 {0, 0}, 1622 {0, 0} 1623 } 1624 }, 1625 {1, 1, 2, 0, /* 0xdf */ 1626 {{0, 4}, 1627 {6, 7}, 1628 {0, 0}, 1629 {0, 0} 1630 } 1631 }, 1632 {0, 1, 1, 0, /* 0xe0 */ 1633 {{5, 7}, 1634 {0, 0}, 1635 {0, 0}, 1636 {0, 0} 1637 } 1638 }, 1639 {1, 1, 2, 0, /* 0xe1 */ 1640 {{0, 0}, 1641 {5, 7}, 1642 {0, 0}, 1643 {0, 0} 1644 } 1645 }, 1646 {0, 1, 2, 0, /* 0xe2 */ 1647 {{1, 1}, 1648 {5, 7}, 1649 {0, 0}, 1650 {0, 0} 1651 } 1652 }, 1653 {1, 1, 2, 0, /* 0xe3 */ 1654 {{0, 1}, 1655 {5, 7}, 1656 {0, 0}, 1657 {0, 0} 1658 } 1659 }, 1660 {0, 1, 2, 0, /* 0xe4 */ 1661 {{2, 2}, 1662 {5, 7}, 1663 {0, 0}, 1664 {0, 0} 1665 } 1666 }, 1667 {1, 1, 3, 0, /* 0xe5 */ 1668 {{0, 0}, 1669 {2, 2}, 1670 {5, 7}, 1671 {0, 0} 1672 } 1673 }, 1674 {0, 1, 2, 0, /* 0xe6 */ 1675 {{1, 2}, 1676 {5, 7}, 1677 {0, 0}, 1678 {0, 0} 1679 } 1680 }, 1681 {1, 1, 2, 0, /* 0xe7 */ 1682 {{0, 2}, 1683 {5, 7}, 1684 {0, 0}, 1685 {0, 0} 1686 } 1687 }, 1688 {0, 1, 2, 0, /* 0xe8 */ 1689 {{3, 3}, 1690 {5, 7}, 1691 {0, 0}, 1692 {0, 0} 1693 } 1694 }, 1695 {1, 1, 3, 0, /* 0xe9 */ 1696 {{0, 0}, 1697 {3, 3}, 1698 {5, 7}, 1699 {0, 0} 1700 } 1701 }, 1702 {0, 1, 3, 0, /* 0xea */ 1703 {{1, 1}, 1704 {3, 3}, 1705 {5, 7}, 1706 {0, 0} 1707 } 1708 }, 1709 {1, 1, 3, 0, /* 0xeb */ 1710 {{0, 1}, 1711 {3, 3}, 1712 {5, 7}, 1713 {0, 0} 1714 } 1715 }, 1716 {0, 1, 2, 0, /* 0xec */ 1717 {{2, 3}, 1718 {5, 7}, 1719 {0, 0}, 1720 {0, 0} 1721 } 1722 }, 1723 {1, 1, 3, 0, /* 0xed */ 1724 {{0, 0}, 1725 {2, 3}, 1726 {5, 7}, 1727 {0, 0} 1728 } 1729 }, 1730 {0, 1, 2, 0, /* 0xee */ 1731 {{1, 3}, 1732 {5, 7}, 1733 {0, 0}, 1734 {0, 0} 1735 } 1736 }, 1737 {1, 1, 2, 0, /* 0xef */ 1738 {{0, 3}, 1739 {5, 7}, 1740 {0, 0}, 1741 {0, 0} 1742 } 1743 }, 1744 {0, 1, 1, 0, /* 0xf0 */ 1745 {{4, 7}, 1746 {0, 0}, 1747 {0, 0}, 1748 {0, 0} 1749 } 1750 }, 1751 {1, 1, 2, 0, /* 0xf1 */ 1752 {{0, 0}, 1753 {4, 7}, 1754 {0, 0}, 1755 {0, 0} 1756 } 1757 }, 1758 {0, 1, 2, 0, /* 0xf2 */ 1759 {{1, 1}, 1760 {4, 7}, 1761 {0, 0}, 1762 {0, 0} 1763 } 1764 }, 1765 {1, 1, 2, 0, /* 0xf3 */ 1766 {{0, 1}, 1767 {4, 7}, 1768 {0, 0}, 1769 {0, 0} 1770 } 1771 }, 1772 {0, 1, 2, 0, /* 0xf4 */ 1773 {{2, 2}, 1774 {4, 7}, 1775 {0, 0}, 1776 {0, 0} 1777 } 1778 }, 1779 {1, 1, 3, 0, /* 0xf5 */ 1780 {{0, 0}, 1781 {2, 2}, 1782 {4, 7}, 1783 {0, 0} 1784 } 1785 }, 1786 {0, 1, 2, 0, /* 0xf6 */ 1787 {{1, 2}, 1788 {4, 7}, 1789 {0, 0}, 1790 {0, 0} 1791 } 1792 }, 1793 {1, 1, 2, 0, /* 0xf7 */ 1794 {{0, 2}, 1795 {4, 7}, 1796 {0, 0}, 1797 {0, 0} 1798 } 1799 }, 1800 {0, 1, 1, 0, /* 0xf8 */ 1801 {{3, 7}, 1802 {0, 0}, 1803 {0, 0}, 1804 {0, 0} 1805 } 1806 }, 1807 {1, 1, 2, 0, /* 0xf9 */ 1808 {{0, 0}, 1809 {3, 7}, 1810 {0, 0}, 1811 {0, 0} 1812 } 1813 }, 1814 {0, 1, 2, 0, /* 0xfa */ 1815 {{1, 1}, 1816 {3, 7}, 1817 {0, 0}, 1818 {0, 0} 1819 } 1820 }, 1821 {1, 1, 2, 0, /* 0xfb */ 1822 {{0, 1}, 1823 {3, 7}, 1824 {0, 0}, 1825 {0, 0} 1826 } 1827 }, 1828 {0, 1, 1, 0, /* 0xfc */ 1829 {{2, 7}, 1830 {0, 0}, 1831 {0, 0}, 1832 {0, 0} 1833 } 1834 }, 1835 {1, 1, 2, 0, /* 0xfd */ 1836 {{0, 0}, 1837 {2, 7}, 1838 {0, 0}, 1839 {0, 0} 1840 } 1841 }, 1842 {0, 1, 1, 0, /* 0xfe */ 1843 {{1, 7}, 1844 {0, 0}, 1845 {0, 0}, 1846 {0, 0} 1847 } 1848 }, 1849 {1, 1, 1, 0, /* 0xff */ 1850 {{0, 7}, 1851 {0, 0}, 1852 {0, 0}, 1853 {0, 0} 1854 } 1855 } 1856 }; 1857 1858 1859 int 1860 sctp_is_address_in_scope(struct sctp_ifa *ifa, 1861 int ipv4_addr_legal, 1862 int ipv6_addr_legal, 1863 int loopback_scope, 1864 int ipv4_local_scope, 1865 int local_scope, 1866 int site_scope, 1867 int do_update) 1868 { 1869 if ((loopback_scope == 0) && 1870 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { 1871 /* 1872 * skip loopback if not in scope * 1873 */ 1874 return (0); 1875 } 1876 if ((ifa->address.sa.sa_family == AF_INET) && ipv4_addr_legal) { 1877 struct sockaddr_in *sin; 1878 1879 sin = (struct sockaddr_in *)&ifa->address.sin; 1880 if (sin->sin_addr.s_addr == 0) { 1881 /* not in scope , unspecified */ 1882 return (0); 1883 } 1884 if ((ipv4_local_scope == 0) && 1885 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1886 /* private address not in scope */ 1887 return (0); 1888 } 1889 } else if ((ifa->address.sa.sa_family == AF_INET6) && ipv6_addr_legal) { 1890 struct sockaddr_in6 *sin6; 1891 1892 /* 1893 * Must update the flags, bummer, which means any IFA locks 1894 * must now be applied HERE <-> 1895 */ 1896 if (do_update) { 1897 sctp_gather_internal_ifa_flags(ifa); 1898 } 1899 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1900 return (0); 1901 } 1902 /* ok to use deprecated addresses? */ 1903 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1904 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1905 /* skip unspecifed addresses */ 1906 return (0); 1907 } 1908 if ( /* (local_scope == 0) && */ 1909 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { 1910 return (0); 1911 } 1912 if ((site_scope == 0) && 1913 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1914 return (0); 1915 } 1916 } else { 1917 return (0); 1918 } 1919 return (1); 1920 } 1921 1922 static struct mbuf * 1923 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa) 1924 { 1925 struct sctp_paramhdr *parmh; 1926 struct mbuf *mret; 1927 int len; 1928 1929 if (ifa->address.sa.sa_family == AF_INET) { 1930 len = sizeof(struct sctp_ipv4addr_param); 1931 } else if (ifa->address.sa.sa_family == AF_INET6) { 1932 len = sizeof(struct sctp_ipv6addr_param); 1933 } else { 1934 /* unknown type */ 1935 return (m); 1936 } 1937 if (M_TRAILINGSPACE(m) >= len) { 1938 /* easy side we just drop it on the end */ 1939 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); 1940 mret = m; 1941 } else { 1942 /* Need more space */ 1943 mret = m; 1944 while (SCTP_BUF_NEXT(mret) != NULL) { 1945 mret = SCTP_BUF_NEXT(mret); 1946 } 1947 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA); 1948 if (SCTP_BUF_NEXT(mret) == NULL) { 1949 /* We are hosed, can't add more addresses */ 1950 return (m); 1951 } 1952 mret = SCTP_BUF_NEXT(mret); 1953 parmh = mtod(mret, struct sctp_paramhdr *); 1954 } 1955 /* now add the parameter */ 1956 if (ifa->address.sa.sa_family == AF_INET) { 1957 struct sctp_ipv4addr_param *ipv4p; 1958 struct sockaddr_in *sin; 1959 1960 sin = (struct sockaddr_in *)&ifa->address.sin; 1961 ipv4p = (struct sctp_ipv4addr_param *)parmh; 1962 parmh->param_type = htons(SCTP_IPV4_ADDRESS); 1963 parmh->param_length = htons(len); 1964 ipv4p->addr = sin->sin_addr.s_addr; 1965 SCTP_BUF_LEN(mret) += len; 1966 } else if (ifa->address.sa.sa_family == AF_INET6) { 1967 struct sctp_ipv6addr_param *ipv6p; 1968 struct sockaddr_in6 *sin6; 1969 1970 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1971 ipv6p = (struct sctp_ipv6addr_param *)parmh; 1972 parmh->param_type = htons(SCTP_IPV6_ADDRESS); 1973 parmh->param_length = htons(len); 1974 memcpy(ipv6p->addr, &sin6->sin6_addr, 1975 sizeof(ipv6p->addr)); 1976 /* clear embedded scope in the address */ 1977 in6_clearscope((struct in6_addr *)ipv6p->addr); 1978 SCTP_BUF_LEN(mret) += len; 1979 } else { 1980 return (m); 1981 } 1982 return (mret); 1983 } 1984 1985 1986 struct mbuf * 1987 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope, 1988 struct mbuf *m_at, int cnt_inits_to) 1989 { 1990 struct sctp_vrf *vrf = NULL; 1991 int cnt, limit_out = 0, total_count; 1992 uint32_t vrf_id; 1993 1994 vrf_id = inp->def_vrf_id; 1995 SCTP_IPI_ADDR_LOCK(); 1996 vrf = sctp_find_vrf(vrf_id); 1997 if (vrf == NULL) { 1998 SCTP_IPI_ADDR_UNLOCK(); 1999 return (m_at); 2000 } 2001 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2002 struct sctp_ifa *sctp_ifap; 2003 struct sctp_ifn *sctp_ifnp; 2004 2005 cnt = cnt_inits_to; 2006 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { 2007 limit_out = 1; 2008 cnt = SCTP_ADDRESS_LIMIT; 2009 goto skip_count; 2010 } 2011 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2012 if ((scope->loopback_scope == 0) && 2013 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2014 /* 2015 * Skip loopback devices if loopback_scope 2016 * not set 2017 */ 2018 continue; 2019 } 2020 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2021 if (sctp_is_address_in_scope(sctp_ifap, 2022 scope->ipv4_addr_legal, 2023 scope->ipv6_addr_legal, 2024 scope->loopback_scope, 2025 scope->ipv4_local_scope, 2026 scope->local_scope, 2027 scope->site_scope, 1) == 0) { 2028 continue; 2029 } 2030 cnt++; 2031 if (cnt > SCTP_ADDRESS_LIMIT) { 2032 break; 2033 } 2034 } 2035 if (cnt > SCTP_ADDRESS_LIMIT) { 2036 break; 2037 } 2038 } 2039 skip_count: 2040 if (cnt > 1) { 2041 total_count = 0; 2042 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2043 cnt = 0; 2044 if ((scope->loopback_scope == 0) && 2045 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2046 /* 2047 * Skip loopback devices if 2048 * loopback_scope not set 2049 */ 2050 continue; 2051 } 2052 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2053 if (sctp_is_address_in_scope(sctp_ifap, 2054 scope->ipv4_addr_legal, 2055 scope->ipv6_addr_legal, 2056 scope->loopback_scope, 2057 scope->ipv4_local_scope, 2058 scope->local_scope, 2059 scope->site_scope, 0) == 0) { 2060 continue; 2061 } 2062 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap); 2063 if (limit_out) { 2064 cnt++; 2065 total_count++; 2066 if (cnt >= 2) { 2067 /* 2068 * two from each 2069 * address 2070 */ 2071 break; 2072 } 2073 if (total_count > SCTP_ADDRESS_LIMIT) { 2074 /* No more addresses */ 2075 break; 2076 } 2077 } 2078 } 2079 } 2080 } 2081 } else { 2082 struct sctp_laddr *laddr; 2083 2084 cnt = cnt_inits_to; 2085 /* First, how many ? */ 2086 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2087 if (laddr->ifa == NULL) { 2088 continue; 2089 } 2090 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2091 /* 2092 * Address being deleted by the system, dont 2093 * list. 2094 */ 2095 continue; 2096 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2097 /* 2098 * Address being deleted on this ep don't 2099 * list. 2100 */ 2101 continue; 2102 } 2103 if (sctp_is_address_in_scope(laddr->ifa, 2104 scope->ipv4_addr_legal, 2105 scope->ipv6_addr_legal, 2106 scope->loopback_scope, 2107 scope->ipv4_local_scope, 2108 scope->local_scope, 2109 scope->site_scope, 1) == 0) { 2110 continue; 2111 } 2112 cnt++; 2113 } 2114 if (cnt > SCTP_ADDRESS_LIMIT) { 2115 limit_out = 1; 2116 } 2117 /* 2118 * To get through a NAT we only list addresses if we have 2119 * more than one. That way if you just bind a single address 2120 * we let the source of the init dictate our address. 2121 */ 2122 if (cnt > 1) { 2123 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2124 cnt = 0; 2125 if (laddr->ifa == NULL) { 2126 continue; 2127 } 2128 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2129 continue; 2130 2131 if (sctp_is_address_in_scope(laddr->ifa, 2132 scope->ipv4_addr_legal, 2133 scope->ipv6_addr_legal, 2134 scope->loopback_scope, 2135 scope->ipv4_local_scope, 2136 scope->local_scope, 2137 scope->site_scope, 0) == 0) { 2138 continue; 2139 } 2140 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa); 2141 cnt++; 2142 if (cnt >= SCTP_ADDRESS_LIMIT) { 2143 break; 2144 } 2145 } 2146 } 2147 } 2148 SCTP_IPI_ADDR_UNLOCK(); 2149 return (m_at); 2150 } 2151 2152 static struct sctp_ifa * 2153 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, 2154 uint8_t dest_is_loop, 2155 uint8_t dest_is_priv, 2156 sa_family_t fam) 2157 { 2158 uint8_t dest_is_global = 0; 2159 2160 /* dest_is_priv is true if destination is a private address */ 2161 /* dest_is_loop is true if destination is a loopback addresses */ 2162 2163 /* 2164 * Here we determine if its a preferred address. A preferred address 2165 * means it is the same scope or higher scope then the destination. 2166 * L = loopback, P = private, G = global 2167 * ----------------------------------------- src | dest | result 2168 * ---------------------------------------- L | L | yes 2169 * ----------------------------------------- P | L | 2170 * yes-v4 no-v6 ----------------------------------------- G | 2171 * L | yes-v4 no-v6 ----------------------------------------- L 2172 * | P | no ----------------------------------------- P | 2173 * P | yes ----------------------------------------- G | 2174 * P | no ----------------------------------------- L | G 2175 * | no ----------------------------------------- P | G | 2176 * no ----------------------------------------- G | G | 2177 * yes ----------------------------------------- 2178 */ 2179 2180 if (ifa->address.sa.sa_family != fam) { 2181 /* forget mis-matched family */ 2182 return (NULL); 2183 } 2184 if ((dest_is_priv == 0) && (dest_is_loop == 0)) { 2185 dest_is_global = 1; 2186 } 2187 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:"); 2188 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa); 2189 /* Ok the address may be ok */ 2190 if (fam == AF_INET6) { 2191 /* ok to use deprecated addresses? */ 2192 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2193 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n"); 2194 return (NULL); 2195 } 2196 if (ifa->src_is_priv) { 2197 if (dest_is_loop) { 2198 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n"); 2199 return (NULL); 2200 } 2201 } 2202 if (ifa->src_is_glob) { 2203 if (dest_is_loop) { 2204 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n"); 2205 return (NULL); 2206 } 2207 } 2208 } 2209 /* 2210 * Now that we know what is what, implement or table this could in 2211 * theory be done slicker (it used to be), but this is 2212 * straightforward and easier to validate :-) 2213 */ 2214 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n", 2215 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob); 2216 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n", 2217 dest_is_loop, dest_is_priv, dest_is_global); 2218 2219 if ((ifa->src_is_loop) && (dest_is_priv)) { 2220 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n"); 2221 return (NULL); 2222 } 2223 if ((ifa->src_is_glob) && (dest_is_priv)) { 2224 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n"); 2225 return (NULL); 2226 } 2227 if ((ifa->src_is_loop) && (dest_is_global)) { 2228 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n"); 2229 return (NULL); 2230 } 2231 if ((ifa->src_is_priv) && (dest_is_global)) { 2232 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n"); 2233 return (NULL); 2234 } 2235 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n"); 2236 /* its a preferred address */ 2237 return (ifa); 2238 } 2239 2240 static struct sctp_ifa * 2241 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, 2242 uint8_t dest_is_loop, 2243 uint8_t dest_is_priv, 2244 sa_family_t fam) 2245 { 2246 uint8_t dest_is_global = 0; 2247 2248 2249 /* 2250 * Here we determine if its a acceptable address. A acceptable 2251 * address means it is the same scope or higher scope but we can 2252 * allow for NAT which means its ok to have a global dest and a 2253 * private src. 2254 * 2255 * L = loopback, P = private, G = global 2256 * ----------------------------------------- src | dest | result 2257 * ----------------------------------------- L | L | yes 2258 * ----------------------------------------- P | L | 2259 * yes-v4 no-v6 ----------------------------------------- G | 2260 * L | yes ----------------------------------------- L | 2261 * P | no ----------------------------------------- P | P 2262 * | yes ----------------------------------------- G | P 2263 * | yes - May not work ----------------------------------------- 2264 * L | G | no ----------------------------------------- P 2265 * | G | yes - May not work 2266 * ----------------------------------------- G | G | yes 2267 * ----------------------------------------- 2268 */ 2269 2270 if (ifa->address.sa.sa_family != fam) { 2271 /* forget non matching family */ 2272 return (NULL); 2273 } 2274 /* Ok the address may be ok */ 2275 if ((dest_is_loop == 0) && (dest_is_priv == 0)) { 2276 dest_is_global = 1; 2277 } 2278 if (fam == AF_INET6) { 2279 /* ok to use deprecated addresses? */ 2280 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2281 return (NULL); 2282 } 2283 if (ifa->src_is_priv) { 2284 /* Special case, linklocal to loop */ 2285 if (dest_is_loop) 2286 return (NULL); 2287 } 2288 } 2289 /* 2290 * Now that we know what is what, implement our table. This could in 2291 * theory be done slicker (it used to be), but this is 2292 * straightforward and easier to validate :-) 2293 */ 2294 if ((ifa->src_is_loop == 1) && (dest_is_priv)) { 2295 return (NULL); 2296 } 2297 if ((ifa->src_is_loop == 1) && (dest_is_global)) { 2298 return (NULL); 2299 } 2300 /* its an acceptable address */ 2301 return (ifa); 2302 } 2303 2304 int 2305 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 2306 { 2307 struct sctp_laddr *laddr; 2308 2309 if (stcb == NULL) { 2310 /* There are no restrictions, no TCB :-) */ 2311 return (0); 2312 } 2313 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 2314 if (laddr->ifa == NULL) { 2315 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2316 __FUNCTION__); 2317 continue; 2318 } 2319 if (laddr->ifa == ifa) { 2320 /* Yes it is on the list */ 2321 return (1); 2322 } 2323 } 2324 return (0); 2325 } 2326 2327 2328 int 2329 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 2330 { 2331 struct sctp_laddr *laddr; 2332 2333 if (ifa == NULL) 2334 return (0); 2335 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2336 if (laddr->ifa == NULL) { 2337 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2338 __FUNCTION__); 2339 continue; 2340 } 2341 if ((laddr->ifa == ifa) && laddr->action == 0) 2342 /* same pointer */ 2343 return (1); 2344 } 2345 return (0); 2346 } 2347 2348 2349 2350 static struct sctp_ifa * 2351 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, 2352 sctp_route_t * ro, 2353 uint32_t vrf_id, 2354 int non_asoc_addr_ok, 2355 uint8_t dest_is_priv, 2356 uint8_t dest_is_loop, 2357 sa_family_t fam) 2358 { 2359 struct sctp_laddr *laddr, *starting_point; 2360 void *ifn; 2361 int resettotop = 0; 2362 struct sctp_ifn *sctp_ifn; 2363 struct sctp_ifa *sctp_ifa, *sifa; 2364 struct sctp_vrf *vrf; 2365 uint32_t ifn_index; 2366 2367 vrf = sctp_find_vrf(vrf_id); 2368 if (vrf == NULL) 2369 return (NULL); 2370 2371 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2372 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2373 sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index); 2374 /* 2375 * first question, is the ifn we will emit on in our list, if so, we 2376 * want such an address. Note that we first looked for a preferred 2377 * address. 2378 */ 2379 if (sctp_ifn) { 2380 /* is a preferred one on the interface we route out? */ 2381 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2382 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2383 continue; 2384 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2385 if (sifa == NULL) 2386 continue; 2387 if (sctp_is_addr_in_ep(inp, sifa)) { 2388 atomic_add_int(&sifa->refcount, 1); 2389 return (sifa); 2390 } 2391 } 2392 } 2393 /* 2394 * ok, now we now need to find one on the list of the addresses. We 2395 * can't get one on the emitting interface so let's find first a 2396 * preferred one. If not that an acceptable one otherwise... we 2397 * return NULL. 2398 */ 2399 starting_point = inp->next_addr_touse; 2400 once_again: 2401 if (inp->next_addr_touse == NULL) { 2402 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2403 resettotop = 1; 2404 } 2405 for (laddr = inp->next_addr_touse; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2406 if (laddr->ifa == NULL) { 2407 /* address has been removed */ 2408 continue; 2409 } 2410 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2411 if (sifa == NULL) 2412 continue; 2413 atomic_add_int(&sifa->refcount, 1); 2414 return (sifa); 2415 } 2416 if (resettotop == 0) { 2417 inp->next_addr_touse = NULL; 2418 goto once_again; 2419 } 2420 inp->next_addr_touse = starting_point; 2421 resettotop = 0; 2422 once_again_too: 2423 if (inp->next_addr_touse == NULL) { 2424 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2425 resettotop = 1; 2426 } 2427 /* ok, what about an acceptable address in the inp */ 2428 for (laddr = inp->next_addr_touse; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2429 if (laddr->ifa == NULL) { 2430 /* address has been removed */ 2431 continue; 2432 } 2433 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2434 if (sifa == NULL) 2435 continue; 2436 atomic_add_int(&sifa->refcount, 1); 2437 return (sifa); 2438 } 2439 if (resettotop == 0) { 2440 inp->next_addr_touse = NULL; 2441 goto once_again_too; 2442 } 2443 /* 2444 * no address bound can be a source for the destination we are in 2445 * trouble 2446 */ 2447 return (NULL); 2448 } 2449 2450 2451 2452 static struct sctp_ifa * 2453 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, 2454 struct sctp_tcb *stcb, 2455 struct sctp_nets *net, 2456 sctp_route_t * ro, 2457 uint32_t vrf_id, 2458 uint8_t dest_is_priv, 2459 uint8_t dest_is_loop, 2460 int non_asoc_addr_ok, 2461 sa_family_t fam) 2462 { 2463 struct sctp_laddr *laddr, *starting_point; 2464 void *ifn; 2465 struct sctp_ifn *sctp_ifn; 2466 struct sctp_ifa *sctp_ifa, *sifa; 2467 uint8_t start_at_beginning = 0; 2468 struct sctp_vrf *vrf; 2469 uint32_t ifn_index; 2470 2471 /* 2472 * first question, is the ifn we will emit on in our list, if so, we 2473 * want that one. 2474 */ 2475 vrf = sctp_find_vrf(vrf_id); 2476 if (vrf == NULL) 2477 return (NULL); 2478 2479 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2480 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2481 sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index); 2482 2483 /* 2484 * first question, is the ifn we will emit on in our list? If so, 2485 * we want that one. First we look for a preferred. Second, we go 2486 * for an acceptable. 2487 */ 2488 if (sctp_ifn) { 2489 /* first try for a preferred address on the ep */ 2490 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2491 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2492 continue; 2493 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2494 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2495 if (sifa == NULL) 2496 continue; 2497 if ((non_asoc_addr_ok == 0) && 2498 (sctp_is_addr_restricted(stcb, sifa))) { 2499 /* on the no-no list */ 2500 continue; 2501 } 2502 atomic_add_int(&sifa->refcount, 1); 2503 return (sifa); 2504 } 2505 } 2506 /* next try for an acceptable address on the ep */ 2507 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2508 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2509 continue; 2510 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2511 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2512 if (sifa == NULL) 2513 continue; 2514 if ((non_asoc_addr_ok == 0) && 2515 (sctp_is_addr_restricted(stcb, sifa))) { 2516 /* on the no-no list */ 2517 continue; 2518 } 2519 atomic_add_int(&sifa->refcount, 1); 2520 return (sifa); 2521 } 2522 } 2523 2524 } 2525 /* 2526 * if we can't find one like that then we must look at all addresses 2527 * bound to pick one at first preferable then secondly acceptable. 2528 */ 2529 starting_point = stcb->asoc.last_used_address; 2530 sctp_from_the_top: 2531 if (stcb->asoc.last_used_address == NULL) { 2532 start_at_beginning = 1; 2533 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2534 } 2535 /* search beginning with the last used address */ 2536 for (laddr = stcb->asoc.last_used_address; laddr; 2537 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2538 if (laddr->ifa == NULL) { 2539 /* address has been removed */ 2540 continue; 2541 } 2542 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2543 if (sifa == NULL) 2544 continue; 2545 if ((non_asoc_addr_ok == 0) && 2546 (sctp_is_addr_restricted(stcb, sifa))) { 2547 /* on the no-no list */ 2548 continue; 2549 } 2550 stcb->asoc.last_used_address = laddr; 2551 atomic_add_int(&sifa->refcount, 1); 2552 return (sifa); 2553 2554 } 2555 if (start_at_beginning == 0) { 2556 stcb->asoc.last_used_address = NULL; 2557 goto sctp_from_the_top; 2558 } 2559 /* now try for any higher scope than the destination */ 2560 stcb->asoc.last_used_address = starting_point; 2561 start_at_beginning = 0; 2562 sctp_from_the_top2: 2563 if (stcb->asoc.last_used_address == NULL) { 2564 start_at_beginning = 1; 2565 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2566 } 2567 /* search beginning with the last used address */ 2568 for (laddr = stcb->asoc.last_used_address; laddr; 2569 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2570 if (laddr->ifa == NULL) { 2571 /* address has been removed */ 2572 continue; 2573 } 2574 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2575 dest_is_priv, fam); 2576 if (sifa == NULL) 2577 continue; 2578 if ((non_asoc_addr_ok == 0) && 2579 (sctp_is_addr_restricted(stcb, sifa))) { 2580 /* on the no-no list */ 2581 continue; 2582 } 2583 stcb->asoc.last_used_address = laddr; 2584 atomic_add_int(&sifa->refcount, 1); 2585 return (sifa); 2586 } 2587 if (start_at_beginning == 0) { 2588 stcb->asoc.last_used_address = NULL; 2589 goto sctp_from_the_top2; 2590 } 2591 return (NULL); 2592 } 2593 2594 static struct sctp_ifa * 2595 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, 2596 struct sctp_tcb *stcb, 2597 int non_asoc_addr_ok, 2598 uint8_t dest_is_loop, 2599 uint8_t dest_is_priv, 2600 int addr_wanted, 2601 sa_family_t fam) 2602 { 2603 struct sctp_ifa *ifa, *sifa; 2604 int num_eligible_addr = 0; 2605 2606 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2607 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2608 (non_asoc_addr_ok == 0)) 2609 continue; 2610 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2611 dest_is_priv, fam); 2612 if (sifa == NULL) 2613 continue; 2614 if (stcb) { 2615 if ((non_asoc_addr_ok == 0) && 2616 sctp_is_addr_restricted(stcb, sifa)) { 2617 /* 2618 * It is restricted for some reason.. 2619 * probably not yet added. 2620 */ 2621 continue; 2622 } 2623 } 2624 if (num_eligible_addr >= addr_wanted) { 2625 return (sifa); 2626 } 2627 num_eligible_addr++; 2628 } 2629 return (NULL); 2630 } 2631 2632 2633 static int 2634 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, 2635 struct sctp_tcb *stcb, 2636 int non_asoc_addr_ok, 2637 uint8_t dest_is_loop, 2638 uint8_t dest_is_priv, 2639 sa_family_t fam) 2640 { 2641 struct sctp_ifa *ifa, *sifa; 2642 int num_eligible_addr = 0; 2643 2644 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2645 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2646 (non_asoc_addr_ok == 0)) { 2647 continue; 2648 } 2649 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2650 dest_is_priv, fam); 2651 if (sifa == NULL) { 2652 continue; 2653 } 2654 if (stcb) { 2655 if ((non_asoc_addr_ok == 0) && 2656 sctp_is_addr_restricted(stcb, sifa)) { 2657 /* 2658 * It is restricted for some reason.. 2659 * probably not yet added. 2660 */ 2661 continue; 2662 } 2663 } 2664 num_eligible_addr++; 2665 } 2666 return (num_eligible_addr); 2667 } 2668 2669 static struct sctp_ifa * 2670 sctp_choose_boundall(struct sctp_inpcb *inp, 2671 struct sctp_tcb *stcb, 2672 struct sctp_nets *net, 2673 sctp_route_t * ro, 2674 uint32_t vrf_id, 2675 uint8_t dest_is_priv, 2676 uint8_t dest_is_loop, 2677 int non_asoc_addr_ok, 2678 sa_family_t fam) 2679 { 2680 int cur_addr_num = 0, num_preferred = 0; 2681 void *ifn; 2682 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; 2683 struct sctp_ifa *sctp_ifa, *sifa; 2684 uint32_t ifn_index; 2685 struct sctp_vrf *vrf; 2686 2687 /* 2688 * For boundall we can use any address in the association. If 2689 * non_asoc_addr_ok is set we can use any address (at least in 2690 * theory). So we look for preferred addresses first. If we find 2691 * one, we use it. Otherwise we next try to get an address on the 2692 * interface, which we should be able to do (unless non_asoc_addr_ok 2693 * is false and we are routed out that way). In these cases where we 2694 * can't use the address of the interface we go through all the 2695 * ifn's looking for an address we can use and fill that in. Punting 2696 * means we send back address 0, which will probably cause problems 2697 * actually since then IP will fill in the address of the route ifn, 2698 * which means we probably already rejected it.. i.e. here comes an 2699 * abort :-<. 2700 */ 2701 vrf = sctp_find_vrf(vrf_id); 2702 if (vrf == NULL) 2703 return (NULL); 2704 2705 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2706 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2707 2708 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index); 2709 if (sctp_ifn == NULL) { 2710 /* ?? We don't have this guy ?? */ 2711 goto bound_all_plan_b; 2712 } 2713 if (net) { 2714 cur_addr_num = net->indx_of_eligible_next_to_use; 2715 } 2716 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, 2717 stcb, 2718 non_asoc_addr_ok, 2719 dest_is_loop, 2720 dest_is_priv, fam); 2721 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses\n", 2722 num_preferred); 2723 if (num_preferred == 0) { 2724 /* 2725 * no eligible addresses, we must use some other interface 2726 * address if we can find one. 2727 */ 2728 goto bound_all_plan_b; 2729 } 2730 /* 2731 * Ok we have num_eligible_addr set with how many we can use, this 2732 * may vary from call to call due to addresses being deprecated 2733 * etc.. 2734 */ 2735 if (cur_addr_num >= num_preferred) { 2736 cur_addr_num = 0; 2737 } 2738 /* 2739 * select the nth address from the list (where cur_addr_num is the 2740 * nth) and 0 is the first one, 1 is the second one etc... 2741 */ 2742 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num); 2743 2744 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 2745 dest_is_priv, cur_addr_num, fam); 2746 2747 /* if sctp_ifa is NULL something changed??, fall to plan b. */ 2748 if (sctp_ifa) { 2749 atomic_add_int(&sctp_ifa->refcount, 1); 2750 if (net) { 2751 /* save off where the next one we will want */ 2752 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 2753 } 2754 return (sctp_ifa); 2755 } 2756 /* 2757 * plan_b: Look at all interfaces and find a preferred address. If 2758 * no preferred fall through to plan_c. 2759 */ 2760 bound_all_plan_b: 2761 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n"); 2762 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 2763 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 2764 /* wrong base scope */ 2765 continue; 2766 } 2767 if ((sctp_ifn == looked_at) && looked_at) 2768 /* already looked at this guy */ 2769 continue; 2770 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok, 2771 dest_is_loop, dest_is_priv, fam); 2772 SCTPDBG(SCTP_DEBUG_OUTPUT2, 2773 "Found ifn:%p %d preferred source addresses\n", 2774 ifn, num_preferred); 2775 if (num_preferred == 0) { 2776 /* None on this interface. */ 2777 continue; 2778 } 2779 SCTPDBG(SCTP_DEBUG_OUTPUT2, 2780 "num preferred:%d on interface:%p cur_addr_num:%d\n", 2781 num_preferred, sctp_ifn, cur_addr_num); 2782 2783 /* 2784 * Ok we have num_eligible_addr set with how many we can 2785 * use, this may vary from call to call due to addresses 2786 * being deprecated etc.. 2787 */ 2788 if (cur_addr_num >= num_preferred) { 2789 cur_addr_num = 0; 2790 } 2791 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 2792 dest_is_priv, cur_addr_num, fam); 2793 if (sifa == NULL) 2794 continue; 2795 if (net) { 2796 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 2797 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n", 2798 cur_addr_num); 2799 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:"); 2800 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 2801 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:"); 2802 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa); 2803 } 2804 atomic_add_int(&sifa->refcount, 1); 2805 return (sifa); 2806 2807 } 2808 2809 /* plan_c: do we have an acceptable address on the emit interface */ 2810 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n"); 2811 if (emit_ifn == NULL) { 2812 goto plan_d; 2813 } 2814 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { 2815 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2816 (non_asoc_addr_ok == 0)) 2817 continue; 2818 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, 2819 dest_is_priv, fam); 2820 if (sifa == NULL) 2821 continue; 2822 if (stcb) { 2823 if ((non_asoc_addr_ok == 0) && 2824 sctp_is_addr_restricted(stcb, sifa)) { 2825 /* 2826 * It is restricted for some reason.. 2827 * probably not yet added. 2828 */ 2829 continue; 2830 } 2831 } 2832 atomic_add_int(&sifa->refcount, 1); 2833 return (sifa); 2834 } 2835 plan_d: 2836 /* 2837 * plan_d: We are in trouble. No preferred address on the emit 2838 * interface. And not even a perfered address on all interfaces. Go 2839 * out and see if we can find an acceptable address somewhere 2840 * amongst all interfaces. 2841 */ 2842 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n"); 2843 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 2844 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 2845 /* wrong base scope */ 2846 continue; 2847 } 2848 if ((sctp_ifn == looked_at) && looked_at) 2849 /* already looked at this guy */ 2850 continue; 2851 2852 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2853 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2854 (non_asoc_addr_ok == 0)) 2855 continue; 2856 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 2857 dest_is_loop, 2858 dest_is_priv, fam); 2859 if (sifa == NULL) 2860 continue; 2861 if (stcb) { 2862 if ((non_asoc_addr_ok == 0) && 2863 sctp_is_addr_restricted(stcb, sifa)) { 2864 /* 2865 * It is restricted for some 2866 * reason.. probably not yet added. 2867 */ 2868 continue; 2869 } 2870 } 2871 atomic_add_int(&sifa->refcount, 1); 2872 return (sifa); 2873 } 2874 } 2875 /* 2876 * Ok we can find NO address to source from that is not on our 2877 * negative list and non_asoc_address is NOT ok, or its on our 2878 * negative list. We cant source to it :-( 2879 */ 2880 return (NULL); 2881 } 2882 2883 2884 2885 /* tcb may be NULL */ 2886 struct sctp_ifa * 2887 sctp_source_address_selection(struct sctp_inpcb *inp, 2888 struct sctp_tcb *stcb, 2889 sctp_route_t * ro, 2890 struct sctp_nets *net, 2891 int non_asoc_addr_ok, uint32_t vrf_id) 2892 { 2893 2894 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; 2895 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; 2896 struct sctp_ifa *answer; 2897 uint8_t dest_is_priv, dest_is_loop; 2898 sa_family_t fam; 2899 2900 /* 2901 * Rules: - Find the route if needed, cache if I can. - Look at 2902 * interface address in route, Is it in the bound list. If so we 2903 * have the best source. - If not we must rotate amongst the 2904 * addresses. 2905 * 2906 * Cavets and issues 2907 * 2908 * Do we need to pay attention to scope. We can have a private address 2909 * or a global address we are sourcing or sending to. So if we draw 2910 * it out zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 2911 * For V4 ------------------------------------------ source * 2912 * dest * result ----------------------------------------- <a> 2913 * Private * Global * NAT 2914 * ----------------------------------------- <b> Private * 2915 * Private * No problem ----------------------------------------- 2916 * <c> Global * Private * Huh, How will this work? 2917 * ----------------------------------------- <d> Global * 2918 * Global * No Problem ------------------------------------------ 2919 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz For V6 2920 * ------------------------------------------ source * dest * 2921 * result ----------------------------------------- <a> Linklocal * 2922 * Global * ----------------------------------------- <b> 2923 * Linklocal * Linklocal * No problem 2924 * ----------------------------------------- <c> Global * 2925 * Linklocal * Huh, How will this work? 2926 * ----------------------------------------- <d> Global * 2927 * Global * No Problem ------------------------------------------ 2928 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 2929 * 2930 * And then we add to that what happens if there are multiple addresses 2931 * assigned to an interface. Remember the ifa on a ifn is a linked 2932 * list of addresses. So one interface can have more than one IP 2933 * address. What happens if we have both a private and a global 2934 * address? Do we then use context of destination to sort out which 2935 * one is best? And what about NAT's sending P->G may get you a NAT 2936 * translation, or should you select the G thats on the interface in 2937 * preference. 2938 * 2939 * Decisions: 2940 * 2941 * - count the number of addresses on the interface. - if its one, no 2942 * problem except case <c>. For <a> we will assume a NAT out there. 2943 * - if there are more than one, then we need to worry about scope P 2944 * or G. We should prefer G -> G and P -> P if possible. Then as a 2945 * secondary fall back to mixed types G->P being a last ditch one. - 2946 * The above all works for bound all, but bound specific we need to 2947 * use the same concept but instead only consider the bound 2948 * addresses. If the bound set is NOT assigned to the interface then 2949 * we must use rotation amongst the bound addresses.. 2950 */ 2951 if (ro->ro_rt == NULL) { 2952 uint32_t table_id = 0; 2953 2954 /* 2955 * Need a route to cache. 2956 */ 2957 if (stcb) { 2958 table_id = stcb->asoc.table_id; 2959 } else { 2960 table_id = SCTP_VRF_DEFAULT_TABLEID(vrf_id); 2961 } 2962 SCTP_RTALLOC(ro, vrf_id, table_id); 2963 } 2964 if (ro->ro_rt == NULL) { 2965 return (NULL); 2966 } 2967 fam = to->sin_family; 2968 dest_is_priv = dest_is_loop = 0; 2969 /* Setup our scopes for the destination */ 2970 if (fam == AF_INET) { 2971 /* Scope based on outbound address */ 2972 if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { 2973 dest_is_priv = 1; 2974 } else if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { 2975 dest_is_loop = 1; 2976 if (net != NULL) { 2977 /* mark it as local */ 2978 net->addr_is_local = 1; 2979 } 2980 } 2981 } else if (fam == AF_INET6) { 2982 /* Scope based on outbound address */ 2983 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { 2984 /* 2985 * If the route goes to the loopback address OR the 2986 * address is a loopback address, we are loopback 2987 * scope. But we don't use dest_is_priv (link local 2988 * addresses). 2989 */ 2990 dest_is_loop = 1; 2991 if (net != NULL) { 2992 /* mark it as local */ 2993 net->addr_is_local = 1; 2994 } 2995 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { 2996 dest_is_priv = 1; 2997 } 2998 } 2999 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:"); 3000 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to); 3001 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3002 /* 3003 * When bound to all if the address list is set it is a 3004 * negative list. Addresses being added by asconf. 3005 */ 3006 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id, 3007 dest_is_priv, dest_is_loop, 3008 non_asoc_addr_ok, fam); 3009 return (answer); 3010 } 3011 /* 3012 * Three possiblities here: 3013 * 3014 * a) stcb is NULL, which means we operate only from the list of 3015 * addresses (ifa's) bound to the endpoint and we care not about the 3016 * list. b) stcb is NOT-NULL, which means we have an assoc structure 3017 * and auto-asconf is on. This means that the list of addresses is a 3018 * NOT list. We use the list from the inp, but any listed address in 3019 * our list is NOT yet added. However if the non_asoc_addr_ok is set 3020 * we CAN use an address NOT available (i.e. being added). Its a 3021 * negative list. c) stcb is NOT-NULL, which means we have an assoc 3022 * structure and auto-asconf is off. This means that the list of 3023 * addresses is the ONLY addresses I can use.. its positive. 3024 * 3025 * Note we collapse b & c into the same function just like in the v6 3026 * address selection. 3027 */ 3028 if (stcb) { 3029 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro, 3030 vrf_id, dest_is_priv, 3031 dest_is_loop, 3032 non_asoc_addr_ok, fam); 3033 } else { 3034 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, 3035 non_asoc_addr_ok, 3036 dest_is_priv, 3037 dest_is_loop, fam); 3038 } 3039 return (answer); 3040 } 3041 3042 static int 3043 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize) 3044 { 3045 struct cmsghdr cmh; 3046 int tlen, at; 3047 3048 tlen = SCTP_BUF_LEN(control); 3049 at = 0; 3050 /* 3051 * Independent of how many mbufs, find the c_type inside the control 3052 * structure and copy out the data. 3053 */ 3054 while (at < tlen) { 3055 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 3056 /* not enough room for one more we are done. */ 3057 return (0); 3058 } 3059 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 3060 if ((cmh.cmsg_len + at) > tlen) { 3061 /* 3062 * this is real messed up since there is not enough 3063 * data here to cover the cmsg header. We are done. 3064 */ 3065 return (0); 3066 } 3067 if ((cmh.cmsg_level == IPPROTO_SCTP) && 3068 (c_type == cmh.cmsg_type)) { 3069 /* found the one we want, copy it out */ 3070 at += CMSG_ALIGN(sizeof(struct cmsghdr)); 3071 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) { 3072 /* 3073 * space of cmsg_len after header not big 3074 * enough 3075 */ 3076 return (0); 3077 } 3078 m_copydata(control, at, cpsize, data); 3079 return (1); 3080 } else { 3081 at += CMSG_ALIGN(cmh.cmsg_len); 3082 if (cmh.cmsg_len == 0) { 3083 break; 3084 } 3085 } 3086 } 3087 /* not found */ 3088 return (0); 3089 } 3090 3091 static struct mbuf * 3092 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset, 3093 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in) 3094 { 3095 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 3096 struct sctp_state_cookie *stc; 3097 struct sctp_paramhdr *ph; 3098 uint8_t *signature; 3099 int sig_offset; 3100 uint16_t cookie_sz; 3101 3102 mret = NULL; 3103 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 3104 sizeof(struct sctp_paramhdr)), 0, 3105 M_DONTWAIT, 1, MT_DATA); 3106 if (mret == NULL) { 3107 return (NULL); 3108 } 3109 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT); 3110 if (copy_init == NULL) { 3111 sctp_m_freem(mret); 3112 return (NULL); 3113 } 3114 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 3115 M_DONTWAIT); 3116 if (copy_initack == NULL) { 3117 sctp_m_freem(mret); 3118 sctp_m_freem(copy_init); 3119 return (NULL); 3120 } 3121 /* easy side we just drop it on the end */ 3122 ph = mtod(mret, struct sctp_paramhdr *); 3123 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 3124 sizeof(struct sctp_paramhdr); 3125 stc = (struct sctp_state_cookie *)((caddr_t)ph + 3126 sizeof(struct sctp_paramhdr)); 3127 ph->param_type = htons(SCTP_STATE_COOKIE); 3128 ph->param_length = 0; /* fill in at the end */ 3129 /* Fill in the stc cookie data */ 3130 *stc = *stc_in; 3131 3132 /* tack the INIT and then the INIT-ACK onto the chain */ 3133 cookie_sz = 0; 3134 m_at = mret; 3135 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3136 cookie_sz += SCTP_BUF_LEN(m_at); 3137 if (SCTP_BUF_NEXT(m_at) == NULL) { 3138 SCTP_BUF_NEXT(m_at) = copy_init; 3139 break; 3140 } 3141 } 3142 3143 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3144 cookie_sz += SCTP_BUF_LEN(m_at); 3145 if (SCTP_BUF_NEXT(m_at) == NULL) { 3146 SCTP_BUF_NEXT(m_at) = copy_initack; 3147 break; 3148 } 3149 } 3150 3151 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3152 cookie_sz += SCTP_BUF_LEN(m_at); 3153 if (SCTP_BUF_NEXT(m_at) == NULL) { 3154 break; 3155 } 3156 } 3157 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA); 3158 if (sig == NULL) { 3159 /* no space, so free the entire chain */ 3160 sctp_m_freem(mret); 3161 return (NULL); 3162 } 3163 SCTP_BUF_LEN(sig) = 0; 3164 SCTP_BUF_NEXT(m_at) = sig; 3165 sig_offset = 0; 3166 signature = (uint8_t *) (mtod(sig, caddr_t)+sig_offset); 3167 /* Time to sign the cookie */ 3168 (void)sctp_hmac_m(SCTP_HMAC, 3169 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 3170 SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr), 3171 (uint8_t *) signature); 3172 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; 3173 cookie_sz += SCTP_SIGNATURE_SIZE; 3174 3175 ph->param_length = htons(cookie_sz); 3176 return (mret); 3177 } 3178 3179 3180 static __inline uint8_t 3181 sctp_get_ect(struct sctp_tcb *stcb, 3182 struct sctp_tmit_chunk *chk) 3183 { 3184 uint8_t this_random; 3185 3186 /* Huh? */ 3187 if (sctp_ecn_enable == 0) 3188 return (0); 3189 3190 if (sctp_ecn_nonce == 0) 3191 /* no nonce, always return ECT0 */ 3192 return (SCTP_ECT0_BIT); 3193 3194 if (stcb->asoc.peer_supports_ecn_nonce == 0) { 3195 /* Peer does NOT support it, so we send a ECT0 only */ 3196 return (SCTP_ECT0_BIT); 3197 } 3198 if (chk == NULL) 3199 return (SCTP_ECT0_BIT); 3200 3201 if ((stcb->asoc.hb_random_idx > 3) || 3202 ((stcb->asoc.hb_random_idx == 3) && 3203 (stcb->asoc.hb_ect_randombit > 7))) { 3204 uint32_t rndval; 3205 3206 warp_drive_sa: 3207 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 3208 memcpy(stcb->asoc.hb_random_values, &rndval, 3209 sizeof(stcb->asoc.hb_random_values)); 3210 this_random = stcb->asoc.hb_random_values[0]; 3211 stcb->asoc.hb_random_idx = 0; 3212 stcb->asoc.hb_ect_randombit = 0; 3213 } else { 3214 if (stcb->asoc.hb_ect_randombit > 7) { 3215 stcb->asoc.hb_ect_randombit = 0; 3216 stcb->asoc.hb_random_idx++; 3217 if (stcb->asoc.hb_random_idx > 3) { 3218 goto warp_drive_sa; 3219 } 3220 } 3221 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 3222 } 3223 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) { 3224 if (chk != NULL) 3225 /* ECN Nonce stuff */ 3226 chk->rec.data.ect_nonce = SCTP_ECT1_BIT; 3227 stcb->asoc.hb_ect_randombit++; 3228 return (SCTP_ECT1_BIT); 3229 } else { 3230 stcb->asoc.hb_ect_randombit++; 3231 return (SCTP_ECT0_BIT); 3232 } 3233 } 3234 3235 static int 3236 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 3237 struct sctp_tcb *stcb, /* may be NULL */ 3238 struct sctp_nets *net, 3239 struct sockaddr *to, 3240 struct mbuf *m, 3241 uint32_t auth_offset, 3242 struct sctp_auth_chunk *auth, 3243 int nofragment_flag, 3244 int ecn_ok, 3245 struct sctp_tmit_chunk *chk, 3246 int out_of_asoc_ok) 3247 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 3248 { 3249 /* 3250 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet 3251 * header WITH an SCTPHDR but no IP header, endpoint inp and sa 3252 * structure: - fill in the HMAC digest of any AUTH chunk in the 3253 * packet. - calculate and fill in the SCTP checksum. - prepend an 3254 * IP address header. - if boundall use INADDR_ANY. - if 3255 * boundspecific do source address selection. - set fragmentation 3256 * option for ipV4. - On return from IP output, check/adjust mtu 3257 * size of output interface and smallest_mtu size as well. 3258 */ 3259 /* Will need ifdefs around this */ 3260 struct mbuf *o_pak; 3261 struct mbuf *newm; 3262 struct sctphdr *sctphdr; 3263 int packet_length; 3264 uint32_t csum; 3265 int ret; 3266 uint32_t vrf_id; 3267 sctp_route_t *ro = NULL; 3268 3269 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 3270 sctp_m_freem(m); 3271 return (EFAULT); 3272 } 3273 if (stcb) { 3274 vrf_id = stcb->asoc.vrf_id; 3275 } else { 3276 vrf_id = inp->def_vrf_id; 3277 } 3278 3279 /* fill in the HMAC digest for any AUTH chunk in the packet */ 3280 if ((auth != NULL) && (stcb != NULL)) { 3281 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb); 3282 } 3283 /* Calculate the csum and fill in the length of the packet */ 3284 sctphdr = mtod(m, struct sctphdr *); 3285 if (sctp_no_csum_on_loopback && 3286 (stcb) && 3287 (stcb->asoc.loopback_scope)) { 3288 sctphdr->checksum = 0; 3289 /* 3290 * This can probably now be taken out since my audit shows 3291 * no more bad pktlen's coming in. But we will wait a while 3292 * yet. 3293 */ 3294 packet_length = sctp_calculate_len(m); 3295 } else { 3296 sctphdr->checksum = 0; 3297 csum = sctp_calculate_sum(m, &packet_length, 0); 3298 sctphdr->checksum = csum; 3299 } 3300 3301 if (to->sa_family == AF_INET) { 3302 struct ip *ip = NULL; 3303 sctp_route_t iproute; 3304 uint8_t tos_value; 3305 3306 newm = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA); 3307 if (newm == NULL) { 3308 sctp_m_freem(m); 3309 return (ENOMEM); 3310 } 3311 SCTP_ALIGN_TO_END(newm, sizeof(struct ip)); 3312 SCTP_BUF_LEN(newm) = sizeof(struct ip); 3313 packet_length += sizeof(struct ip); 3314 SCTP_BUF_NEXT(newm) = m; 3315 m = newm; 3316 ip = mtod(m, struct ip *); 3317 ip->ip_v = IPVERSION; 3318 ip->ip_hl = (sizeof(struct ip) >> 2); 3319 if (net) { 3320 tos_value = net->tos_flowlabel & 0x000000ff; 3321 } else { 3322 tos_value = inp->ip_inp.inp.inp_ip_tos; 3323 } 3324 if (nofragment_flag) { 3325 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__) 3326 ip->ip_off = IP_DF; 3327 #else 3328 ip->ip_off = htons(IP_DF); 3329 #endif 3330 } else 3331 ip->ip_off = 0; 3332 3333 /* FreeBSD has a function for ip_id's */ 3334 ip->ip_id = ip_newid(); 3335 3336 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 3337 ip->ip_len = packet_length; 3338 if (stcb) { 3339 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 3340 /* Enable ECN */ 3341 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk)); 3342 } else { 3343 /* No ECN */ 3344 ip->ip_tos = (u_char)(tos_value & 0xfc); 3345 } 3346 } else { 3347 /* no association at all */ 3348 ip->ip_tos = (tos_value & 0xfc); 3349 } 3350 ip->ip_p = IPPROTO_SCTP; 3351 ip->ip_sum = 0; 3352 if (net == NULL) { 3353 ro = &iproute; 3354 memset(&iproute, 0, sizeof(iproute)); 3355 memcpy(&ro->ro_dst, to, to->sa_len); 3356 } else { 3357 ro = (sctp_route_t *) & net->ro; 3358 } 3359 /* Now the address selection part */ 3360 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 3361 3362 /* call the routine to select the src address */ 3363 if (net) { 3364 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 3365 sctp_free_ifa(net->ro._s_addr); 3366 net->ro._s_addr = NULL; 3367 net->src_addr_selected = 0; 3368 } 3369 if (net->src_addr_selected == 0) { 3370 /* Cache the source address */ 3371 net->ro._s_addr = sctp_source_address_selection(inp, stcb, 3372 ro, net, out_of_asoc_ok, 3373 vrf_id); 3374 net->src_addr_selected = 1; 3375 } 3376 if (net->ro._s_addr == NULL) { 3377 /* No route to host */ 3378 net->src_addr_selected = 0; 3379 goto no_route; 3380 } 3381 ip->ip_src = net->ro._s_addr->address.sin.sin_addr; 3382 } else { 3383 struct sctp_ifa *_lsrc; 3384 3385 _lsrc = sctp_source_address_selection(inp, 3386 stcb, ro, net, out_of_asoc_ok, vrf_id); 3387 if (_lsrc == NULL) { 3388 goto no_route; 3389 } 3390 ip->ip_src = _lsrc->address.sin.sin_addr; 3391 sctp_free_ifa(_lsrc); 3392 } 3393 3394 /* 3395 * If source address selection fails and we find no route 3396 * then the ip_output should fail as well with a 3397 * NO_ROUTE_TO_HOST type error. We probably should catch 3398 * that somewhere and abort the association right away 3399 * (assuming this is an INIT being sent). 3400 */ 3401 if ((ro->ro_rt == NULL)) { 3402 /* 3403 * src addr selection failed to find a route (or 3404 * valid source addr), so we can't get there from 3405 * here (yet)! 3406 */ 3407 no_route: 3408 SCTPDBG(SCTP_DEBUG_OUTPUT1, 3409 "%s: dropped packet - no valid source addr\n", 3410 __FUNCTION__); 3411 if (net) { 3412 SCTPDBG(SCTP_DEBUG_OUTPUT1, 3413 "Destination was "); 3414 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, 3415 &net->ro._l_addr.sa); 3416 if (net->dest_state & SCTP_ADDR_CONFIRMED) { 3417 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { 3418 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net); 3419 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 3420 stcb, 3421 SCTP_FAILED_THRESHOLD, 3422 (void *)net); 3423 net->dest_state &= ~SCTP_ADDR_REACHABLE; 3424 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 3425 } 3426 } 3427 if (stcb) { 3428 if (net == stcb->asoc.primary_destination) { 3429 /* need a new primary */ 3430 struct sctp_nets *alt; 3431 3432 alt = sctp_find_alternate_net(stcb, net, 0); 3433 if (alt != net) { 3434 if (sctp_set_primary_addr(stcb, 3435 (struct sockaddr *)NULL, 3436 alt) == 0) { 3437 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 3438 if (net->ro._s_addr) { 3439 sctp_free_ifa(net->ro._s_addr); 3440 net->ro._s_addr = NULL; 3441 } 3442 net->src_addr_selected = 0; 3443 } 3444 } 3445 } 3446 } 3447 } 3448 sctp_m_freem(m); 3449 return (EHOSTUNREACH); 3450 } 3451 if (ro != &iproute) { 3452 memcpy(&iproute, ro, sizeof(*ro)); 3453 } 3454 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n", 3455 (uint32_t) (ntohl(ip->ip_src.s_addr))); 3456 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", 3457 (uint32_t) (ntohl(ip->ip_dst.s_addr))); 3458 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", 3459 ro->ro_rt); 3460 3461 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 3462 /* failed to prepend data, give up */ 3463 sctp_m_freem(m); 3464 return (ENOMEM); 3465 } 3466 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 3467 3468 /* send it out. table id is taken from stcb */ 3469 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id, 0); 3470 3471 SCTP_STAT_INCR(sctps_sendpackets); 3472 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 3473 if (ret) 3474 SCTP_STAT_INCR(sctps_senderrors); 3475 3476 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); 3477 if (net == NULL) { 3478 /* free tempy routes */ 3479 if (ro->ro_rt) { 3480 RTFREE(ro->ro_rt); 3481 ro->ro_rt = NULL; 3482 } 3483 } else { 3484 /* PMTU check versus smallest asoc MTU goes here */ 3485 if (ro->ro_rt != NULL) { 3486 uint32_t mtu; 3487 3488 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 3489 if (mtu && 3490 (stcb->asoc.smallest_mtu > mtu)) { 3491 #ifdef SCTP_PRINT_FOR_B_AND_M 3492 SCTP_PRINTF("sctp_mtu_size_reset called after ip_output mtu-change:%d\n", 3493 mtu); 3494 #endif 3495 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 3496 net->mtu = mtu; 3497 } 3498 } else { 3499 /* route was freed */ 3500 if (net->ro._s_addr && 3501 net->src_addr_selected) { 3502 sctp_free_ifa(net->ro._s_addr); 3503 net->ro._s_addr = NULL; 3504 } 3505 net->src_addr_selected = 0; 3506 } 3507 } 3508 return (ret); 3509 } 3510 #ifdef INET6 3511 else if (to->sa_family == AF_INET6) { 3512 uint32_t flowlabel; 3513 struct ip6_hdr *ip6h; 3514 struct route_in6 ip6route; 3515 struct ifnet *ifp; 3516 u_char flowTop; 3517 uint16_t flowBottom; 3518 u_char tosBottom, tosTop; 3519 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 3520 int prev_scope = 0; 3521 struct sockaddr_in6 lsa6_storage; 3522 int error; 3523 u_short prev_port = 0; 3524 3525 if (net != NULL) { 3526 flowlabel = net->tos_flowlabel; 3527 } else { 3528 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 3529 } 3530 3531 newm = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA); 3532 if (newm == NULL) { 3533 sctp_m_freem(m); 3534 return (ENOMEM); 3535 } 3536 SCTP_ALIGN_TO_END(newm, sizeof(struct ip6_hdr)); 3537 SCTP_BUF_LEN(newm) = sizeof(struct ip6_hdr); 3538 packet_length += sizeof(struct ip6_hdr); 3539 SCTP_BUF_NEXT(newm) = m; 3540 m = newm; 3541 3542 ip6h = mtod(m, struct ip6_hdr *); 3543 /* 3544 * We assume here that inp_flow is in host byte order within 3545 * the TCB! 3546 */ 3547 flowBottom = flowlabel & 0x0000ffff; 3548 flowTop = ((flowlabel & 0x000f0000) >> 16); 3549 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION); 3550 /* protect *sin6 from overwrite */ 3551 sin6 = (struct sockaddr_in6 *)to; 3552 tmp = *sin6; 3553 sin6 = &tmp; 3554 3555 /* KAME hack: embed scopeid */ 3556 if (sa6_embedscope(sin6, ip6_use_defzone) != 0) 3557 return (EINVAL); 3558 if (net == NULL) { 3559 memset(&ip6route, 0, sizeof(ip6route)); 3560 ro = (sctp_route_t *) & ip6route; 3561 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 3562 } else { 3563 ro = (sctp_route_t *) & net->ro; 3564 } 3565 if (stcb != NULL) { 3566 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 3567 /* Enable ECN */ 3568 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4); 3569 } else { 3570 /* No ECN */ 3571 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 3572 } 3573 } else { 3574 /* we could get no asoc if it is a O-O-T-B packet */ 3575 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 3576 } 3577 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom)); 3578 ip6h->ip6_nxt = IPPROTO_SCTP; 3579 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr)); 3580 ip6h->ip6_dst = sin6->sin6_addr; 3581 3582 /* 3583 * Add SRC address selection here: we can only reuse to a 3584 * limited degree the kame src-addr-sel, since we can try 3585 * their selection but it may not be bound. 3586 */ 3587 bzero(&lsa6_tmp, sizeof(lsa6_tmp)); 3588 lsa6_tmp.sin6_family = AF_INET6; 3589 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 3590 lsa6 = &lsa6_tmp; 3591 if (net) { 3592 if (net->ro._s_addr && net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED) { 3593 sctp_free_ifa(net->ro._s_addr); 3594 net->ro._s_addr = NULL; 3595 net->src_addr_selected = 0; 3596 } 3597 if (net->src_addr_selected == 0) { 3598 /* Cache the source address */ 3599 net->ro._s_addr = sctp_source_address_selection(inp, 3600 stcb, 3601 ro, 3602 net, 3603 out_of_asoc_ok, 3604 vrf_id); 3605 net->src_addr_selected = 1; 3606 } 3607 if (net->ro._s_addr == NULL) { 3608 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n"); 3609 net->src_addr_selected = 0; 3610 goto no_route; 3611 } 3612 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; 3613 } else { 3614 struct sctp_ifa *_lsrc; 3615 3616 _lsrc = sctp_source_address_selection(inp, stcb, ro, net, out_of_asoc_ok, vrf_id); 3617 if (_lsrc == NULL) { 3618 goto no_route; 3619 } 3620 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; 3621 sctp_free_ifa(_lsrc); 3622 } 3623 lsa6->sin6_port = inp->sctp_lport; 3624 3625 if ((ro->ro_rt == NULL)) { 3626 /* 3627 * src addr selection failed to find a route (or 3628 * valid source addr), so we can't get there from 3629 * here! 3630 */ 3631 goto no_route; 3632 } 3633 /* 3634 * XXX: sa6 may not have a valid sin6_scope_id in the 3635 * non-SCOPEDROUTING case. 3636 */ 3637 bzero(&lsa6_storage, sizeof(lsa6_storage)); 3638 lsa6_storage.sin6_family = AF_INET6; 3639 lsa6_storage.sin6_len = sizeof(lsa6_storage); 3640 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 3641 sctp_m_freem(m); 3642 return (error); 3643 } 3644 /* XXX */ 3645 lsa6_storage.sin6_addr = lsa6->sin6_addr; 3646 lsa6_storage.sin6_port = inp->sctp_lport; 3647 lsa6 = &lsa6_storage; 3648 ip6h->ip6_src = lsa6->sin6_addr; 3649 3650 /* 3651 * We set the hop limit now since there is a good chance 3652 * that our ro pointer is now filled 3653 */ 3654 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); 3655 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 3656 3657 #ifdef SCTP_DEBUG 3658 /* Copy to be sure something bad is not happening */ 3659 sin6->sin6_addr = ip6h->ip6_dst; 3660 lsa6->sin6_addr = ip6h->ip6_src; 3661 #endif 3662 3663 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n"); 3664 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: "); 3665 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6); 3666 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: "); 3667 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6); 3668 if (net) { 3669 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 3670 /* preserve the port and scope for link local send */ 3671 prev_scope = sin6->sin6_scope_id; 3672 prev_port = sin6->sin6_port; 3673 } 3674 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 3675 /* failed to prepend data, give up */ 3676 sctp_m_freem(m); 3677 return (ENOMEM); 3678 } 3679 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 3680 3681 /* send it out. table id is taken from stcb */ 3682 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, 3683 stcb, vrf_id, 0); 3684 3685 if (net) { 3686 /* for link local this must be done */ 3687 sin6->sin6_scope_id = prev_scope; 3688 sin6->sin6_port = prev_port; 3689 } 3690 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); 3691 SCTP_STAT_INCR(sctps_sendpackets); 3692 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 3693 if (ret) { 3694 SCTP_STAT_INCR(sctps_senderrors); 3695 } 3696 if (net == NULL) { 3697 /* Now if we had a temp route free it */ 3698 if (ro->ro_rt) { 3699 RTFREE(ro->ro_rt); 3700 } 3701 } else { 3702 /* PMTU check versus smallest asoc MTU goes here */ 3703 if (ro->ro_rt == NULL) { 3704 /* Route was freed */ 3705 if (net->ro._s_addr && 3706 net->src_addr_selected) { 3707 sctp_free_ifa(net->ro._s_addr); 3708 net->ro._s_addr = NULL; 3709 } 3710 net->src_addr_selected = 0; 3711 } 3712 if (ro->ro_rt != NULL) { 3713 uint32_t mtu; 3714 3715 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 3716 if (mtu && 3717 (stcb->asoc.smallest_mtu > mtu)) { 3718 #ifdef SCTP_PRINT_FOR_B_AND_M 3719 SCTP_PRINTF("sctp_mtu_size_reset called after ip6_output mtu-change:%d\n", 3720 mtu); 3721 #endif 3722 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 3723 net->mtu = mtu; 3724 } 3725 } else if (ifp) { 3726 if (ND_IFINFO(ifp)->linkmtu && 3727 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 3728 #ifdef SCTP_PRINT_FOR_B_AND_M 3729 SCTP_PRINTF("sctp_mtu_size_reset called via ifp ND_IFINFO() linkmtu:%d\n", 3730 ND_IFINFO(ifp)->linkmtu); 3731 #endif 3732 sctp_mtu_size_reset(inp, 3733 &stcb->asoc, 3734 ND_IFINFO(ifp)->linkmtu); 3735 } 3736 } 3737 } 3738 return (ret); 3739 } 3740 #endif 3741 else { 3742 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 3743 ((struct sockaddr *)to)->sa_family); 3744 sctp_m_freem(m); 3745 return (EFAULT); 3746 } 3747 } 3748 3749 3750 void 3751 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 3752 { 3753 struct mbuf *m, *m_at, *mp_last; 3754 struct sctp_nets *net; 3755 struct sctp_init_msg *initm; 3756 struct sctp_supported_addr_param *sup_addr; 3757 struct sctp_ecn_supported_param *ecn; 3758 struct sctp_prsctp_supported_param *prsctp; 3759 struct sctp_ecn_nonce_supported_param *ecn_nonce; 3760 struct sctp_supported_chunk_types_param *pr_supported; 3761 int cnt_inits_to = 0; 3762 int padval, ret; 3763 int num_ext; 3764 int p_len; 3765 3766 /* INIT's always go to the primary (and usually ONLY address) */ 3767 mp_last = NULL; 3768 net = stcb->asoc.primary_destination; 3769 if (net == NULL) { 3770 net = TAILQ_FIRST(&stcb->asoc.nets); 3771 if (net == NULL) { 3772 /* TSNH */ 3773 return; 3774 } 3775 /* we confirm any address we send an INIT to */ 3776 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 3777 (void)sctp_set_primary_addr(stcb, NULL, net); 3778 } else { 3779 /* we confirm any address we send an INIT to */ 3780 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 3781 } 3782 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n"); 3783 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) { 3784 /* 3785 * special hook, if we are sending to link local it will not 3786 * show up in our private address count. 3787 */ 3788 struct sockaddr_in6 *sin6l; 3789 3790 sin6l = &net->ro._l_addr.sin6; 3791 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr)) 3792 cnt_inits_to = 1; 3793 } 3794 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 3795 /* This case should not happen */ 3796 return; 3797 } 3798 /* start the INIT timer */ 3799 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 3800 3801 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA); 3802 if (m == NULL) { 3803 /* No memory, INIT timer will re-attempt. */ 3804 return; 3805 } 3806 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 3807 /* Now lets put the SCTP header in place */ 3808 initm = mtod(m, struct sctp_init_msg *); 3809 initm->sh.src_port = inp->sctp_lport; 3810 initm->sh.dest_port = stcb->rport; 3811 initm->sh.v_tag = 0; 3812 initm->sh.checksum = 0; /* calculate later */ 3813 /* now the chunk header */ 3814 initm->msg.ch.chunk_type = SCTP_INITIATION; 3815 initm->msg.ch.chunk_flags = 0; 3816 /* fill in later from mbuf we build */ 3817 initm->msg.ch.chunk_length = 0; 3818 /* place in my tag */ 3819 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag); 3820 /* set up some of the credits. */ 3821 initm->msg.init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), 3822 SCTP_MINIMAL_RWND)); 3823 3824 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 3825 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 3826 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number); 3827 /* now the address restriction */ 3828 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm + 3829 sizeof(*initm)); 3830 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 3831 /* we support 2 types IPv6/IPv4 */ 3832 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + 3833 sizeof(uint16_t)); 3834 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS); 3835 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS); 3836 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t); 3837 3838 if (inp->sctp_ep.adaptation_layer_indicator) { 3839 struct sctp_adaptation_layer_indication *ali; 3840 3841 ali = (struct sctp_adaptation_layer_indication *)( 3842 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t)); 3843 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 3844 ali->ph.param_length = htons(sizeof(*ali)); 3845 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 3846 SCTP_BUF_LEN(m) += sizeof(*ali); 3847 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 3848 sizeof(*ali)); 3849 } else { 3850 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr + 3851 sizeof(*sup_addr) + sizeof(uint16_t)); 3852 } 3853 3854 /* now any cookie time extensions */ 3855 if (stcb->asoc.cookie_preserve_req) { 3856 struct sctp_cookie_perserve_param *cookie_preserve; 3857 3858 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn); 3859 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 3860 cookie_preserve->ph.param_length = htons( 3861 sizeof(*cookie_preserve)); 3862 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 3863 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve); 3864 ecn = (struct sctp_ecn_supported_param *)( 3865 (caddr_t)cookie_preserve + sizeof(*cookie_preserve)); 3866 stcb->asoc.cookie_preserve_req = 0; 3867 } 3868 /* ECN parameter */ 3869 if (sctp_ecn_enable == 1) { 3870 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 3871 ecn->ph.param_length = htons(sizeof(*ecn)); 3872 SCTP_BUF_LEN(m) += sizeof(*ecn); 3873 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 3874 sizeof(*ecn)); 3875 } else { 3876 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 3877 } 3878 /* And now tell the peer we do pr-sctp */ 3879 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 3880 prsctp->ph.param_length = htons(sizeof(*prsctp)); 3881 SCTP_BUF_LEN(m) += sizeof(*prsctp); 3882 3883 /* And now tell the peer we do all the extensions */ 3884 pr_supported = (struct sctp_supported_chunk_types_param *) 3885 ((caddr_t)prsctp + sizeof(*prsctp)); 3886 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 3887 num_ext = 0; 3888 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 3889 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 3890 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 3891 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 3892 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 3893 if (!sctp_auth_disable) 3894 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 3895 p_len = sizeof(*pr_supported) + num_ext; 3896 pr_supported->ph.param_length = htons(p_len); 3897 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 3898 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3899 3900 /* ECN nonce: And now tell the peer we support ECN nonce */ 3901 if (sctp_ecn_nonce) { 3902 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 3903 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 3904 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 3905 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 3906 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 3907 } 3908 /* add authentication parameters */ 3909 if (!sctp_auth_disable) { 3910 struct sctp_auth_random *random; 3911 struct sctp_auth_hmac_algo *hmacs; 3912 struct sctp_auth_chunk_list *chunks; 3913 3914 /* attach RANDOM parameter, if available */ 3915 if (stcb->asoc.authinfo.random != NULL) { 3916 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3917 p_len = sizeof(*random) + stcb->asoc.authinfo.random_len; 3918 #ifdef SCTP_AUTH_DRAFT_04 3919 random->ph.param_type = htons(SCTP_RANDOM); 3920 random->ph.param_length = htons(p_len); 3921 bcopy(stcb->asoc.authinfo.random->key, 3922 random->random_data, 3923 stcb->asoc.authinfo.random_len); 3924 #else 3925 /* random key already contains the header */ 3926 bcopy(stcb->asoc.authinfo.random->key, random, p_len); 3927 #endif 3928 /* zero out any padding required */ 3929 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len); 3930 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3931 } 3932 /* add HMAC_ALGO parameter */ 3933 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3934 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs, 3935 (uint8_t *) hmacs->hmac_ids); 3936 if (p_len > 0) { 3937 p_len += sizeof(*hmacs); 3938 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 3939 hmacs->ph.param_length = htons(p_len); 3940 /* zero out any padding required */ 3941 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 3942 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3943 } 3944 /* add CHUNKS parameter */ 3945 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3946 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, 3947 chunks->chunk_types); 3948 if (p_len > 0) { 3949 p_len += sizeof(*chunks); 3950 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 3951 chunks->ph.param_length = htons(p_len); 3952 /* zero out any padding required */ 3953 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 3954 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3955 } 3956 } 3957 m_at = m; 3958 /* now the addresses */ 3959 { 3960 struct sctp_scoping scp; 3961 3962 /* 3963 * To optimize this we could put the scoping stuff into a 3964 * structure and remove the individual uint8's from the 3965 * assoc structure. Then we could just sifa in the address 3966 * within the stcb.. but for now this is a quick hack to get 3967 * the address stuff teased apart. 3968 */ 3969 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal; 3970 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal; 3971 scp.loopback_scope = stcb->asoc.loopback_scope; 3972 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope; 3973 scp.local_scope = stcb->asoc.local_scope; 3974 scp.site_scope = stcb->asoc.site_scope; 3975 3976 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 3977 } 3978 3979 /* calulate the size and update pkt header and chunk header */ 3980 p_len = 0; 3981 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3982 if (SCTP_BUF_NEXT(m_at) == NULL) 3983 mp_last = m_at; 3984 p_len += SCTP_BUF_LEN(m_at); 3985 } 3986 initm->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr))); 3987 /* 3988 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 3989 * here since the timer will drive a retranmission. 3990 */ 3991 3992 /* I don't expect this to execute but we will be safe here */ 3993 padval = p_len % 4; 3994 if ((padval) && (mp_last)) { 3995 /* 3996 * The compiler worries that mp_last may not be set even 3997 * though I think it is impossible :-> however we add 3998 * mp_last here just in case. 3999 */ 4000 int ret; 4001 4002 ret = sctp_add_pad_tombuf(mp_last, (4 - padval)); 4003 if (ret) { 4004 /* Houston we have a problem, no space */ 4005 sctp_m_freem(m); 4006 return; 4007 } 4008 p_len += padval; 4009 } 4010 ret = sctp_lowlevel_chunk_output(inp, stcb, net, 4011 (struct sockaddr *)&net->ro._l_addr, 4012 m, 0, NULL, 0, 0, NULL, 0); 4013 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 4014 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 4015 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 4016 } 4017 4018 struct mbuf * 4019 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 4020 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp) 4021 { 4022 /* 4023 * Given a mbuf containing an INIT or INIT-ACK with the param_offset 4024 * being equal to the beginning of the params i.e. (iphlen + 4025 * sizeof(struct sctp_init_msg) parse through the parameters to the 4026 * end of the mbuf verifying that all parameters are known. 4027 * 4028 * For unknown parameters build and return a mbuf with 4029 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 4030 * processing this chunk stop, and set *abort_processing to 1. 4031 * 4032 * By having param_offset be pre-set to where parameters begin it is 4033 * hoped that this routine may be reused in the future by new 4034 * features. 4035 */ 4036 struct sctp_paramhdr *phdr, params; 4037 4038 struct mbuf *mat, *op_err; 4039 char tempbuf[SCTP_PARAM_BUFFER_SIZE]; 4040 int at, limit, pad_needed; 4041 uint16_t ptype, plen, padded_size; 4042 int err_at; 4043 4044 *abort_processing = 0; 4045 mat = in_initpkt; 4046 err_at = 0; 4047 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 4048 at = param_offset; 4049 op_err = NULL; 4050 4051 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 4052 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 4053 ptype = ntohs(phdr->param_type); 4054 plen = ntohs(phdr->param_length); 4055 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) { 4056 /* wacked parameter */ 4057 goto invalid_size; 4058 } 4059 limit -= SCTP_SIZE32(plen); 4060 /*- 4061 * All parameters for all chunks that we know/understand are 4062 * listed here. We process them other places and make 4063 * appropriate stop actions per the upper bits. However this 4064 * is the generic routine processor's can call to get back 4065 * an operr.. to either incorporate (init-ack) or send. 4066 */ 4067 padded_size = SCTP_SIZE32(plen); 4068 switch (ptype) { 4069 /* Param's with variable size */ 4070 case SCTP_HEARTBEAT_INFO: 4071 case SCTP_STATE_COOKIE: 4072 case SCTP_UNRECOG_PARAM: 4073 case SCTP_ERROR_CAUSE_IND: 4074 /* ok skip fwd */ 4075 at += padded_size; 4076 break; 4077 /* Param's with variable size within a range */ 4078 case SCTP_CHUNK_LIST: 4079 case SCTP_SUPPORTED_CHUNK_EXT: 4080 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) { 4081 goto invalid_size; 4082 } 4083 at += padded_size; 4084 break; 4085 case SCTP_SUPPORTED_ADDRTYPE: 4086 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) { 4087 goto invalid_size; 4088 } 4089 at += padded_size; 4090 break; 4091 case SCTP_RANDOM: 4092 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) { 4093 goto invalid_size; 4094 } 4095 at += padded_size; 4096 break; 4097 case SCTP_SET_PRIM_ADDR: 4098 case SCTP_DEL_IP_ADDRESS: 4099 case SCTP_ADD_IP_ADDRESS: 4100 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) && 4101 (padded_size != sizeof(struct sctp_asconf_addr_param))) { 4102 goto invalid_size; 4103 } 4104 at += padded_size; 4105 break; 4106 /* Param's with a fixed size */ 4107 case SCTP_IPV4_ADDRESS: 4108 if (padded_size != sizeof(struct sctp_ipv4addr_param)) { 4109 goto invalid_size; 4110 } 4111 at += padded_size; 4112 break; 4113 case SCTP_IPV6_ADDRESS: 4114 if (padded_size != sizeof(struct sctp_ipv6addr_param)) { 4115 goto invalid_size; 4116 } 4117 at += padded_size; 4118 break; 4119 case SCTP_COOKIE_PRESERVE: 4120 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) { 4121 goto invalid_size; 4122 } 4123 at += padded_size; 4124 break; 4125 case SCTP_ECN_NONCE_SUPPORTED: 4126 case SCTP_PRSCTP_SUPPORTED: 4127 if (padded_size != sizeof(struct sctp_paramhdr)) { 4128 goto invalid_size; 4129 } 4130 at += padded_size; 4131 break; 4132 case SCTP_ECN_CAPABLE: 4133 if (padded_size != sizeof(struct sctp_ecn_supported_param)) { 4134 goto invalid_size; 4135 } 4136 at += padded_size; 4137 break; 4138 case SCTP_ULP_ADAPTATION: 4139 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) { 4140 goto invalid_size; 4141 } 4142 at += padded_size; 4143 break; 4144 case SCTP_SUCCESS_REPORT: 4145 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) { 4146 goto invalid_size; 4147 } 4148 at += padded_size; 4149 break; 4150 case SCTP_HOSTNAME_ADDRESS: 4151 { 4152 /* We can NOT handle HOST NAME addresses!! */ 4153 int l_len; 4154 4155 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Can't handle hostname addresses.. abort processing\n"); 4156 *abort_processing = 1; 4157 if (op_err == NULL) { 4158 /* Ok need to try to get a mbuf */ 4159 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4160 l_len += plen; 4161 l_len += sizeof(struct sctp_paramhdr); 4162 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4163 if (op_err) { 4164 SCTP_BUF_LEN(op_err) = 0; 4165 /* 4166 * pre-reserve space for ip 4167 * and sctp header and 4168 * chunk hdr 4169 */ 4170 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4171 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4172 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4173 } 4174 } 4175 if (op_err) { 4176 /* If we have space */ 4177 struct sctp_paramhdr s; 4178 4179 if (err_at % 4) { 4180 uint32_t cpthis = 0; 4181 4182 pad_needed = 4 - (err_at % 4); 4183 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4184 err_at += pad_needed; 4185 } 4186 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 4187 s.param_length = htons(sizeof(s) + plen); 4188 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4189 err_at += sizeof(s); 4190 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen)); 4191 if (phdr == NULL) { 4192 sctp_m_freem(op_err); 4193 /* 4194 * we are out of memory but 4195 * we still need to have a 4196 * look at what to do (the 4197 * system is in trouble 4198 * though). 4199 */ 4200 return (NULL); 4201 } 4202 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 4203 err_at += plen; 4204 } 4205 return (op_err); 4206 break; 4207 } 4208 default: 4209 /* 4210 * we do not recognize the parameter figure out what 4211 * we do. 4212 */ 4213 if ((ptype & 0x4000) == 0x4000) { 4214 /* Report bit is set?? */ 4215 if (op_err == NULL) { 4216 int l_len; 4217 4218 /* Ok need to try to get an mbuf */ 4219 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4220 l_len += plen; 4221 l_len += sizeof(struct sctp_paramhdr); 4222 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4223 if (op_err) { 4224 SCTP_BUF_LEN(op_err) = 0; 4225 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4226 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4227 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4228 } 4229 } 4230 if (op_err) { 4231 /* If we have space */ 4232 struct sctp_paramhdr s; 4233 4234 if (err_at % 4) { 4235 uint32_t cpthis = 0; 4236 4237 pad_needed = 4 - (err_at % 4); 4238 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4239 err_at += pad_needed; 4240 } 4241 s.param_type = htons(SCTP_UNRECOG_PARAM); 4242 s.param_length = htons(sizeof(s) + plen); 4243 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4244 err_at += sizeof(s); 4245 if (plen > sizeof(tempbuf)) { 4246 plen = sizeof(tempbuf); 4247 } 4248 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen)); 4249 if (phdr == NULL) { 4250 sctp_m_freem(op_err); 4251 /* 4252 * we are out of memory but 4253 * we still need to have a 4254 * look at what to do (the 4255 * system is in trouble 4256 * though). 4257 */ 4258 op_err = NULL; 4259 goto more_processing; 4260 } 4261 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 4262 err_at += plen; 4263 } 4264 } 4265 more_processing: 4266 if ((ptype & 0x8000) == 0x0000) { 4267 return (op_err); 4268 } else { 4269 /* skip this chunk and continue processing */ 4270 at += SCTP_SIZE32(plen); 4271 } 4272 break; 4273 4274 } 4275 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 4276 } 4277 return (op_err); 4278 invalid_size: 4279 *abort_processing = 1; 4280 if ((op_err == NULL) && phdr) { 4281 int l_len; 4282 4283 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4284 l_len += (2 * sizeof(struct sctp_paramhdr)); 4285 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4286 if (op_err) { 4287 SCTP_BUF_LEN(op_err) = 0; 4288 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4289 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4290 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4291 } 4292 } 4293 if ((op_err) && phdr) { 4294 struct sctp_paramhdr s; 4295 4296 if (err_at % 4) { 4297 uint32_t cpthis = 0; 4298 4299 pad_needed = 4 - (err_at % 4); 4300 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4301 err_at += pad_needed; 4302 } 4303 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4304 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr)); 4305 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4306 err_at += sizeof(s); 4307 /* Only copy back the p-hdr that caused the issue */ 4308 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr); 4309 } 4310 return (op_err); 4311 } 4312 4313 static int 4314 sctp_are_there_new_addresses(struct sctp_association *asoc, 4315 struct mbuf *in_initpkt, int iphlen, int offset) 4316 { 4317 /* 4318 * Given a INIT packet, look through the packet to verify that there 4319 * are NO new addresses. As we go through the parameters add reports 4320 * of any un-understood parameters that require an error. Also we 4321 * must return (1) to drop the packet if we see a un-understood 4322 * parameter that tells us to drop the chunk. 4323 */ 4324 struct sockaddr_in sin4, *sa4; 4325 struct sockaddr_in6 sin6, *sa6; 4326 struct sockaddr *sa_touse; 4327 struct sockaddr *sa; 4328 struct sctp_paramhdr *phdr, params; 4329 struct ip *iph; 4330 struct mbuf *mat; 4331 uint16_t ptype, plen; 4332 int err_at; 4333 uint8_t fnd; 4334 struct sctp_nets *net; 4335 4336 memset(&sin4, 0, sizeof(sin4)); 4337 memset(&sin6, 0, sizeof(sin6)); 4338 sin4.sin_family = AF_INET; 4339 sin4.sin_len = sizeof(sin4); 4340 sin6.sin6_family = AF_INET6; 4341 sin6.sin6_len = sizeof(sin6); 4342 4343 sa_touse = NULL; 4344 /* First what about the src address of the pkt ? */ 4345 iph = mtod(in_initpkt, struct ip *); 4346 if (iph->ip_v == IPVERSION) { 4347 /* source addr is IPv4 */ 4348 sin4.sin_addr = iph->ip_src; 4349 sa_touse = (struct sockaddr *)&sin4; 4350 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 4351 /* source addr is IPv6 */ 4352 struct ip6_hdr *ip6h; 4353 4354 ip6h = mtod(in_initpkt, struct ip6_hdr *); 4355 sin6.sin6_addr = ip6h->ip6_src; 4356 sa_touse = (struct sockaddr *)&sin6; 4357 } else { 4358 return (1); 4359 } 4360 4361 fnd = 0; 4362 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4363 sa = (struct sockaddr *)&net->ro._l_addr; 4364 if (sa->sa_family == sa_touse->sa_family) { 4365 if (sa->sa_family == AF_INET) { 4366 sa4 = (struct sockaddr_in *)sa; 4367 if (sa4->sin_addr.s_addr == 4368 sin4.sin_addr.s_addr) { 4369 fnd = 1; 4370 break; 4371 } 4372 } else if (sa->sa_family == AF_INET6) { 4373 sa6 = (struct sockaddr_in6 *)sa; 4374 if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr, 4375 &sin6.sin6_addr)) { 4376 fnd = 1; 4377 break; 4378 } 4379 } 4380 } 4381 } 4382 if (fnd == 0) { 4383 /* New address added! no need to look futher. */ 4384 return (1); 4385 } 4386 /* Ok so far lets munge through the rest of the packet */ 4387 mat = in_initpkt; 4388 err_at = 0; 4389 sa_touse = NULL; 4390 offset += sizeof(struct sctp_init_chunk); 4391 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 4392 while (phdr) { 4393 ptype = ntohs(phdr->param_type); 4394 plen = ntohs(phdr->param_length); 4395 if (ptype == SCTP_IPV4_ADDRESS) { 4396 struct sctp_ipv4addr_param *p4, p4_buf; 4397 4398 phdr = sctp_get_next_param(mat, offset, 4399 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 4400 if (plen != sizeof(struct sctp_ipv4addr_param) || 4401 phdr == NULL) { 4402 return (1); 4403 } 4404 p4 = (struct sctp_ipv4addr_param *)phdr; 4405 sin4.sin_addr.s_addr = p4->addr; 4406 sa_touse = (struct sockaddr *)&sin4; 4407 } else if (ptype == SCTP_IPV6_ADDRESS) { 4408 struct sctp_ipv6addr_param *p6, p6_buf; 4409 4410 phdr = sctp_get_next_param(mat, offset, 4411 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 4412 if (plen != sizeof(struct sctp_ipv6addr_param) || 4413 phdr == NULL) { 4414 return (1); 4415 } 4416 p6 = (struct sctp_ipv6addr_param *)phdr; 4417 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 4418 sizeof(p6->addr)); 4419 sa_touse = (struct sockaddr *)&sin4; 4420 } 4421 if (sa_touse) { 4422 /* ok, sa_touse points to one to check */ 4423 fnd = 0; 4424 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4425 sa = (struct sockaddr *)&net->ro._l_addr; 4426 if (sa->sa_family != sa_touse->sa_family) { 4427 continue; 4428 } 4429 if (sa->sa_family == AF_INET) { 4430 sa4 = (struct sockaddr_in *)sa; 4431 if (sa4->sin_addr.s_addr == 4432 sin4.sin_addr.s_addr) { 4433 fnd = 1; 4434 break; 4435 } 4436 } else if (sa->sa_family == AF_INET6) { 4437 sa6 = (struct sockaddr_in6 *)sa; 4438 if (SCTP6_ARE_ADDR_EQUAL( 4439 &sa6->sin6_addr, &sin6.sin6_addr)) { 4440 fnd = 1; 4441 break; 4442 } 4443 } 4444 } 4445 if (!fnd) { 4446 /* New addr added! no need to look further */ 4447 return (1); 4448 } 4449 } 4450 offset += SCTP_SIZE32(plen); 4451 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 4452 } 4453 return (0); 4454 } 4455 4456 /* 4457 * Given a MBUF chain that was sent into us containing an INIT. Build a 4458 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 4459 * a pullup to include IPv6/4header, SCTP header and initial part of INIT 4460 * message (i.e. the struct sctp_init_msg). 4461 */ 4462 void 4463 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4464 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh, 4465 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint32_t table_id) 4466 { 4467 struct sctp_association *asoc; 4468 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last; 4469 struct sctp_init_msg *initackm_out; 4470 struct sctp_ecn_supported_param *ecn; 4471 struct sctp_prsctp_supported_param *prsctp; 4472 struct sctp_ecn_nonce_supported_param *ecn_nonce; 4473 struct sctp_supported_chunk_types_param *pr_supported; 4474 struct sockaddr_storage store; 4475 struct sockaddr_in *sin; 4476 struct sockaddr_in6 *sin6; 4477 sctp_route_t *ro; 4478 struct ip *iph; 4479 struct ip6_hdr *ip6; 4480 struct sockaddr *to; 4481 struct sctp_state_cookie stc; 4482 struct sctp_nets *net = NULL; 4483 int cnt_inits_to = 0; 4484 uint16_t his_limit, i_want; 4485 int abort_flag, padval, sz_of; 4486 int num_ext; 4487 int p_len; 4488 4489 if (stcb) 4490 asoc = &stcb->asoc; 4491 else 4492 asoc = NULL; 4493 mp_last = NULL; 4494 if ((asoc != NULL) && 4495 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && 4496 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) { 4497 /* new addresses, out of here in non-cookie-wait states */ 4498 /* 4499 * Send a ABORT, we don't add the new address error clause 4500 * though we even set the T bit and copy in the 0 tag.. this 4501 * looks no different than if no listener was present. 4502 */ 4503 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, 4504 table_id); 4505 return; 4506 } 4507 abort_flag = 0; 4508 op_err = sctp_arethere_unrecognized_parameters(init_pkt, 4509 (offset + sizeof(struct sctp_init_chunk)), 4510 &abort_flag, (struct sctp_chunkhdr *)init_chk); 4511 if (abort_flag) { 4512 sctp_send_abort(init_pkt, iphlen, sh, 4513 init_chk->init.initiate_tag, op_err, vrf_id, 4514 table_id); 4515 return; 4516 } 4517 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 4518 if (m == NULL) { 4519 /* No memory, INIT timer will re-attempt. */ 4520 if (op_err) 4521 sctp_m_freem(op_err); 4522 return; 4523 } 4524 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 4525 4526 /* the time I built cookie */ 4527 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered); 4528 4529 /* populate any tie tags */ 4530 if (asoc != NULL) { 4531 /* unlock before tag selections */ 4532 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 4533 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 4534 stc.cookie_life = asoc->cookie_life; 4535 net = asoc->primary_destination; 4536 } else { 4537 stc.tie_tag_my_vtag = 0; 4538 stc.tie_tag_peer_vtag = 0; 4539 /* life I will award this cookie */ 4540 stc.cookie_life = inp->sctp_ep.def_cookie_life; 4541 } 4542 4543 /* copy in the ports for later check */ 4544 stc.myport = sh->dest_port; 4545 stc.peerport = sh->src_port; 4546 4547 /* 4548 * If we wanted to honor cookie life extentions, we would add to 4549 * stc.cookie_life. For now we should NOT honor any extension 4550 */ 4551 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 4552 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 4553 struct inpcb *in_inp; 4554 4555 /* Its a V6 socket */ 4556 in_inp = (struct inpcb *)inp; 4557 stc.ipv6_addr_legal = 1; 4558 /* Now look at the binding flag to see if V4 will be legal */ 4559 if (SCTP_IPV6_V6ONLY(in_inp) == 0) { 4560 stc.ipv4_addr_legal = 1; 4561 } else { 4562 /* V4 addresses are NOT legal on the association */ 4563 stc.ipv4_addr_legal = 0; 4564 } 4565 } else { 4566 /* Its a V4 socket, no - V6 */ 4567 stc.ipv4_addr_legal = 1; 4568 stc.ipv6_addr_legal = 0; 4569 } 4570 4571 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 4572 stc.ipv4_scope = 1; 4573 #else 4574 stc.ipv4_scope = 0; 4575 #endif 4576 /* now for scope setup */ 4577 memset((caddr_t)&store, 0, sizeof(store)); 4578 sin = (struct sockaddr_in *)&store; 4579 sin6 = (struct sockaddr_in6 *)&store; 4580 if (net == NULL) { 4581 to = (struct sockaddr *)&store; 4582 iph = mtod(init_pkt, struct ip *); 4583 if (iph->ip_v == IPVERSION) { 4584 struct sctp_ifa *addr; 4585 sctp_route_t iproute; 4586 4587 sin->sin_family = AF_INET; 4588 sin->sin_len = sizeof(struct sockaddr_in); 4589 sin->sin_port = sh->src_port; 4590 sin->sin_addr = iph->ip_src; 4591 /* lookup address */ 4592 stc.address[0] = sin->sin_addr.s_addr; 4593 stc.address[1] = 0; 4594 stc.address[2] = 0; 4595 stc.address[3] = 0; 4596 stc.addr_type = SCTP_IPV4_ADDRESS; 4597 /* local from address */ 4598 memset(&iproute, 0, sizeof(iproute)); 4599 ro = &iproute; 4600 memcpy(&ro->ro_dst, sin, sizeof(*sin)); 4601 addr = sctp_source_address_selection(inp, NULL, 4602 ro, NULL, 0, 4603 vrf_id); 4604 if (addr == NULL) 4605 return; 4606 4607 if (ro->ro_rt) { 4608 RTFREE(ro->ro_rt); 4609 ro->ro_rt = NULL; 4610 } 4611 stc.laddress[0] = addr->address.sin.sin_addr.s_addr; 4612 stc.laddress[1] = 0; 4613 stc.laddress[2] = 0; 4614 stc.laddress[3] = 0; 4615 stc.laddr_type = SCTP_IPV4_ADDRESS; 4616 /* scope_id is only for v6 */ 4617 stc.scope_id = 0; 4618 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE 4619 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 4620 stc.ipv4_scope = 1; 4621 } 4622 #else 4623 stc.ipv4_scope = 1; 4624 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 4625 /* Must use the address in this case */ 4626 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) { 4627 stc.loopback_scope = 1; 4628 stc.ipv4_scope = 1; 4629 stc.site_scope = 1; 4630 stc.local_scope = 0; 4631 } 4632 sctp_free_ifa(addr); 4633 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 4634 struct sctp_ifa *addr; 4635 struct route_in6 iproute6; 4636 4637 ip6 = mtod(init_pkt, struct ip6_hdr *); 4638 sin6->sin6_family = AF_INET6; 4639 sin6->sin6_len = sizeof(struct sockaddr_in6); 4640 sin6->sin6_port = sh->src_port; 4641 sin6->sin6_addr = ip6->ip6_src; 4642 /* lookup address */ 4643 memcpy(&stc.address, &sin6->sin6_addr, 4644 sizeof(struct in6_addr)); 4645 sin6->sin6_scope_id = 0; 4646 stc.addr_type = SCTP_IPV6_ADDRESS; 4647 stc.scope_id = 0; 4648 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) { 4649 stc.loopback_scope = 1; 4650 stc.local_scope = 0; 4651 stc.site_scope = 1; 4652 stc.ipv4_scope = 1; 4653 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 4654 /* 4655 * If the new destination is a LINK_LOCAL we 4656 * must have common both site and local 4657 * scope. Don't set local scope though since 4658 * we must depend on the source to be added 4659 * implicitly. We cannot assure just because 4660 * we share one link that all links are 4661 * common. 4662 */ 4663 stc.local_scope = 0; 4664 stc.site_scope = 1; 4665 stc.ipv4_scope = 1; 4666 /* 4667 * we start counting for the private address 4668 * stuff at 1. since the link local we 4669 * source from won't show up in our scoped 4670 * count. 4671 */ 4672 cnt_inits_to = 1; 4673 /* pull out the scope_id from incoming pkt */ 4674 /* FIX ME: does this have scope from rcvif? */ 4675 (void)sa6_recoverscope(sin6); 4676 4677 sa6_embedscope(sin6, ip6_use_defzone); 4678 stc.scope_id = sin6->sin6_scope_id; 4679 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 4680 /* 4681 * If the new destination is SITE_LOCAL then 4682 * we must have site scope in common. 4683 */ 4684 stc.site_scope = 1; 4685 } 4686 /* local from address */ 4687 memset(&iproute6, 0, sizeof(iproute6)); 4688 ro = (sctp_route_t *) & iproute6; 4689 memcpy(&ro->ro_dst, sin6, sizeof(*sin6)); 4690 addr = sctp_source_address_selection(inp, NULL, 4691 ro, NULL, 0, vrf_id); 4692 if (addr == NULL) 4693 return; 4694 4695 if (ro->ro_rt) { 4696 RTFREE(ro->ro_rt); 4697 ro->ro_rt = NULL; 4698 } 4699 memcpy(&stc.laddress, &addr->address.sin6.sin6_addr, sizeof(struct in6_addr)); 4700 stc.laddr_type = SCTP_IPV6_ADDRESS; 4701 sctp_free_ifa(addr); 4702 } 4703 } else { 4704 /* set the scope per the existing tcb */ 4705 struct sctp_nets *lnet; 4706 4707 stc.loopback_scope = asoc->loopback_scope; 4708 stc.ipv4_scope = asoc->ipv4_local_scope; 4709 stc.site_scope = asoc->site_scope; 4710 stc.local_scope = asoc->local_scope; 4711 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 4712 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 4713 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 4714 /* 4715 * if we have a LL address, start 4716 * counting at 1. 4717 */ 4718 cnt_inits_to = 1; 4719 } 4720 } 4721 } 4722 4723 /* use the net pointer */ 4724 to = (struct sockaddr *)&net->ro._l_addr; 4725 if (to->sa_family == AF_INET) { 4726 sin = (struct sockaddr_in *)to; 4727 stc.address[0] = sin->sin_addr.s_addr; 4728 stc.address[1] = 0; 4729 stc.address[2] = 0; 4730 stc.address[3] = 0; 4731 stc.addr_type = SCTP_IPV4_ADDRESS; 4732 if (net->src_addr_selected == 0) { 4733 /* 4734 * strange case here, the INIT should have 4735 * did the selection. 4736 */ 4737 net->ro._s_addr = sctp_source_address_selection(inp, 4738 stcb, (sctp_route_t *) & net->ro, 4739 net, 0, vrf_id); 4740 if (net->ro._s_addr == NULL) 4741 return; 4742 4743 net->src_addr_selected = 1; 4744 4745 } 4746 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; 4747 stc.laddress[1] = 0; 4748 stc.laddress[2] = 0; 4749 stc.laddress[3] = 0; 4750 stc.laddr_type = SCTP_IPV4_ADDRESS; 4751 } else if (to->sa_family == AF_INET6) { 4752 sin6 = (struct sockaddr_in6 *)to; 4753 memcpy(&stc.address, &sin6->sin6_addr, 4754 sizeof(struct in6_addr)); 4755 stc.addr_type = SCTP_IPV6_ADDRESS; 4756 if (net->src_addr_selected == 0) { 4757 /* 4758 * strange case here, the INIT should have 4759 * did the selection. 4760 */ 4761 net->ro._s_addr = sctp_source_address_selection(inp, 4762 stcb, (sctp_route_t *) & net->ro, 4763 net, 0, vrf_id); 4764 if (net->ro._s_addr == NULL) 4765 return; 4766 4767 net->src_addr_selected = 1; 4768 } 4769 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, 4770 sizeof(struct in6_addr)); 4771 stc.laddr_type = SCTP_IPV6_ADDRESS; 4772 } 4773 } 4774 /* Now lets put the SCTP header in place */ 4775 initackm_out = mtod(m, struct sctp_init_msg *); 4776 initackm_out->sh.src_port = inp->sctp_lport; 4777 initackm_out->sh.dest_port = sh->src_port; 4778 initackm_out->sh.v_tag = init_chk->init.initiate_tag; 4779 /* Save it off for quick ref */ 4780 stc.peers_vtag = init_chk->init.initiate_tag; 4781 initackm_out->sh.checksum = 0; /* calculate later */ 4782 /* who are we */ 4783 memcpy(stc.identification, SCTP_VERSION_STRING, 4784 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 4785 /* now the chunk header */ 4786 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK; 4787 initackm_out->msg.ch.chunk_flags = 0; 4788 /* fill in later from mbuf we build */ 4789 initackm_out->msg.ch.chunk_length = 0; 4790 /* place in my tag */ 4791 if ((asoc != NULL) && 4792 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 4793 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || 4794 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { 4795 /* re-use the v-tags and init-seq here */ 4796 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag); 4797 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number); 4798 } else { 4799 uint32_t vtag; 4800 4801 if (asoc) { 4802 atomic_add_int(&asoc->refcnt, 1); 4803 SCTP_TCB_UNLOCK(stcb); 4804 vtag = sctp_select_a_tag(inp); 4805 initackm_out->msg.init.initiate_tag = htonl(vtag); 4806 /* get a TSN to use too */ 4807 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 4808 SCTP_TCB_LOCK(stcb); 4809 atomic_add_int(&asoc->refcnt, -1); 4810 } else { 4811 vtag = sctp_select_a_tag(inp); 4812 initackm_out->msg.init.initiate_tag = htonl(vtag); 4813 /* get a TSN to use too */ 4814 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 4815 } 4816 } 4817 /* save away my tag to */ 4818 stc.my_vtag = initackm_out->msg.init.initiate_tag; 4819 4820 /* set up some of the credits. */ 4821 initackm_out->msg.init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND)); 4822 /* set what I want */ 4823 his_limit = ntohs(init_chk->init.num_inbound_streams); 4824 /* choose what I want */ 4825 if (asoc != NULL) { 4826 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { 4827 i_want = asoc->streamoutcnt; 4828 } else { 4829 i_want = inp->sctp_ep.pre_open_stream_count; 4830 } 4831 } else { 4832 i_want = inp->sctp_ep.pre_open_stream_count; 4833 } 4834 if (his_limit < i_want) { 4835 /* I Want more :< */ 4836 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams; 4837 } else { 4838 /* I can have what I want :> */ 4839 initackm_out->msg.init.num_outbound_streams = htons(i_want); 4840 } 4841 /* tell him his limt. */ 4842 initackm_out->msg.init.num_inbound_streams = 4843 htons(inp->sctp_ep.max_open_streams_intome); 4844 /* setup the ECN pointer */ 4845 4846 if (inp->sctp_ep.adaptation_layer_indicator) { 4847 struct sctp_adaptation_layer_indication *ali; 4848 4849 ali = (struct sctp_adaptation_layer_indication *)( 4850 (caddr_t)initackm_out + sizeof(*initackm_out)); 4851 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 4852 ali->ph.param_length = htons(sizeof(*ali)); 4853 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 4854 SCTP_BUF_LEN(m) += sizeof(*ali); 4855 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 4856 sizeof(*ali)); 4857 } else { 4858 ecn = (struct sctp_ecn_supported_param *)( 4859 (caddr_t)initackm_out + sizeof(*initackm_out)); 4860 } 4861 4862 /* ECN parameter */ 4863 if (sctp_ecn_enable == 1) { 4864 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 4865 ecn->ph.param_length = htons(sizeof(*ecn)); 4866 SCTP_BUF_LEN(m) += sizeof(*ecn); 4867 4868 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 4869 sizeof(*ecn)); 4870 } else { 4871 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 4872 } 4873 /* And now tell the peer we do pr-sctp */ 4874 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 4875 prsctp->ph.param_length = htons(sizeof(*prsctp)); 4876 SCTP_BUF_LEN(m) += sizeof(*prsctp); 4877 4878 /* And now tell the peer we do all the extensions */ 4879 pr_supported = (struct sctp_supported_chunk_types_param *) 4880 ((caddr_t)prsctp + sizeof(*prsctp)); 4881 4882 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 4883 num_ext = 0; 4884 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 4885 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 4886 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 4887 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 4888 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 4889 if (!sctp_auth_disable) 4890 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 4891 p_len = sizeof(*pr_supported) + num_ext; 4892 pr_supported->ph.param_length = htons(p_len); 4893 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 4894 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4895 4896 /* ECN nonce: And now tell the peer we support ECN nonce */ 4897 if (sctp_ecn_nonce) { 4898 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 4899 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 4900 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 4901 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 4902 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 4903 } 4904 /* add authentication parameters */ 4905 if (!sctp_auth_disable) { 4906 struct sctp_auth_random *random; 4907 struct sctp_auth_hmac_algo *hmacs; 4908 struct sctp_auth_chunk_list *chunks; 4909 uint16_t random_len; 4910 4911 /* generate and add RANDOM parameter */ 4912 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT; 4913 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4914 random->ph.param_type = htons(SCTP_RANDOM); 4915 p_len = sizeof(*random) + random_len; 4916 random->ph.param_length = htons(p_len); 4917 SCTP_READ_RANDOM(random->random_data, random_len); 4918 /* zero out any padding required */ 4919 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len); 4920 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4921 4922 /* add HMAC_ALGO parameter */ 4923 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4924 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 4925 (uint8_t *) hmacs->hmac_ids); 4926 if (p_len > 0) { 4927 p_len += sizeof(*hmacs); 4928 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 4929 hmacs->ph.param_length = htons(p_len); 4930 /* zero out any padding required */ 4931 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 4932 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4933 } 4934 /* add CHUNKS parameter */ 4935 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4936 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 4937 chunks->chunk_types); 4938 if (p_len > 0) { 4939 p_len += sizeof(*chunks); 4940 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 4941 chunks->ph.param_length = htons(p_len); 4942 /* zero out any padding required */ 4943 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 4944 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4945 } 4946 } 4947 m_at = m; 4948 /* now the addresses */ 4949 { 4950 struct sctp_scoping scp; 4951 4952 /* 4953 * To optimize this we could put the scoping stuff into a 4954 * structure and remove the individual uint8's from the stc 4955 * structure. Then we could just sifa in the address within 4956 * the stc.. but for now this is a quick hack to get the 4957 * address stuff teased apart. 4958 */ 4959 scp.ipv4_addr_legal = stc.ipv4_addr_legal; 4960 scp.ipv6_addr_legal = stc.ipv6_addr_legal; 4961 scp.loopback_scope = stc.loopback_scope; 4962 scp.ipv4_local_scope = stc.ipv4_scope; 4963 scp.local_scope = stc.local_scope; 4964 scp.site_scope = stc.site_scope; 4965 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 4966 } 4967 4968 /* tack on the operational error if present */ 4969 if (op_err) { 4970 struct mbuf *ol; 4971 int llen; 4972 4973 llen = 0; 4974 ol = op_err; 4975 while (ol) { 4976 llen += SCTP_BUF_LEN(ol); 4977 ol = SCTP_BUF_NEXT(ol); 4978 } 4979 if (llen % 4) { 4980 /* must add a pad to the param */ 4981 uint32_t cpthis = 0; 4982 int padlen; 4983 4984 padlen = 4 - (llen % 4); 4985 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis); 4986 } 4987 while (SCTP_BUF_NEXT(m_at) != NULL) { 4988 m_at = SCTP_BUF_NEXT(m_at); 4989 } 4990 SCTP_BUF_NEXT(m_at) = op_err; 4991 while (SCTP_BUF_NEXT(m_at) != NULL) { 4992 m_at = SCTP_BUF_NEXT(m_at); 4993 } 4994 } 4995 /* Get total size of init packet */ 4996 sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length)); 4997 /* pre-calulate the size and update pkt header and chunk header */ 4998 p_len = 0; 4999 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 5000 p_len += SCTP_BUF_LEN(m_tmp); 5001 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5002 /* m_tmp should now point to last one */ 5003 break; 5004 } 5005 } 5006 /* 5007 * Figure now the size of the cookie. We know the size of the 5008 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK, 5009 * COOKIE-STRUCTURE and SIGNATURE. 5010 */ 5011 5012 /* 5013 * take our earlier INIT calc and add in the sz we just calculated 5014 * minus the size of the sctphdr (its not included in chunk size 5015 */ 5016 5017 /* add once for the INIT-ACK */ 5018 sz_of += (p_len - sizeof(struct sctphdr)); 5019 5020 /* add a second time for the INIT-ACK in the cookie */ 5021 sz_of += (p_len - sizeof(struct sctphdr)); 5022 5023 /* Now add the cookie header and cookie message struct */ 5024 sz_of += sizeof(struct sctp_state_cookie_param); 5025 /* ...and add the size of our signature */ 5026 sz_of += SCTP_SIGNATURE_SIZE; 5027 initackm_out->msg.ch.chunk_length = htons(sz_of); 5028 5029 /* Now we must build a cookie */ 5030 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 5031 sizeof(struct sctphdr), &stc); 5032 if (m_cookie == NULL) { 5033 /* memory problem */ 5034 sctp_m_freem(m); 5035 return; 5036 } 5037 /* Now append the cookie to the end and update the space/size */ 5038 SCTP_BUF_NEXT(m_tmp) = m_cookie; 5039 for (; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 5040 p_len += SCTP_BUF_LEN(m_tmp); 5041 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5042 /* m_tmp should now point to last one */ 5043 mp_last = m_tmp; 5044 break; 5045 } 5046 } 5047 5048 /* 5049 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 5050 * here since the timer will drive a retranmission. 5051 */ 5052 padval = p_len % 4; 5053 if ((padval) && (mp_last)) { 5054 /* see my previous comments on mp_last */ 5055 int ret; 5056 5057 ret = sctp_add_pad_tombuf(mp_last, (4 - padval)); 5058 if (ret) { 5059 /* Houston we have a problem, no space */ 5060 sctp_m_freem(m); 5061 return; 5062 } 5063 p_len += padval; 5064 } 5065 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 5066 NULL, 0); 5067 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5068 } 5069 5070 5071 void 5072 sctp_insert_on_wheel(struct sctp_tcb *stcb, 5073 struct sctp_association *asoc, 5074 struct sctp_stream_out *strq, int holds_lock) 5075 { 5076 struct sctp_stream_out *stre, *strn; 5077 5078 if (holds_lock == 0) { 5079 SCTP_TCB_SEND_LOCK(stcb); 5080 } 5081 if ((strq->next_spoke.tqe_next) || 5082 (strq->next_spoke.tqe_prev)) { 5083 /* already on wheel */ 5084 goto outof_here; 5085 } 5086 stre = TAILQ_FIRST(&asoc->out_wheel); 5087 if (stre == NULL) { 5088 /* only one on wheel */ 5089 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke); 5090 goto outof_here; 5091 } 5092 for (; stre; stre = strn) { 5093 strn = TAILQ_NEXT(stre, next_spoke); 5094 if (stre->stream_no > strq->stream_no) { 5095 TAILQ_INSERT_BEFORE(stre, strq, next_spoke); 5096 goto outof_here; 5097 } else if (stre->stream_no == strq->stream_no) { 5098 /* huh, should not happen */ 5099 goto outof_here; 5100 } else if (strn == NULL) { 5101 /* next one is null */ 5102 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq, 5103 next_spoke); 5104 } 5105 } 5106 outof_here: 5107 if (holds_lock == 0) { 5108 SCTP_TCB_SEND_UNLOCK(stcb); 5109 } 5110 } 5111 5112 static void 5113 sctp_remove_from_wheel(struct sctp_tcb *stcb, 5114 struct sctp_association *asoc, 5115 struct sctp_stream_out *strq) 5116 { 5117 /* take off and then setup so we know it is not on the wheel */ 5118 SCTP_TCB_SEND_LOCK(stcb); 5119 if (TAILQ_FIRST(&strq->outqueue)) { 5120 /* more was added */ 5121 SCTP_TCB_SEND_UNLOCK(stcb); 5122 return; 5123 } 5124 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke); 5125 strq->next_spoke.tqe_next = NULL; 5126 strq->next_spoke.tqe_prev = NULL; 5127 SCTP_TCB_SEND_UNLOCK(stcb); 5128 } 5129 5130 static void 5131 sctp_prune_prsctp(struct sctp_tcb *stcb, 5132 struct sctp_association *asoc, 5133 struct sctp_sndrcvinfo *srcv, 5134 int dataout) 5135 { 5136 int freed_spc = 0; 5137 struct sctp_tmit_chunk *chk, *nchk; 5138 5139 SCTP_TCB_LOCK_ASSERT(stcb); 5140 if ((asoc->peer_supports_prsctp) && 5141 (asoc->sent_queue_cnt_removeable > 0)) { 5142 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 5143 /* 5144 * Look for chunks marked with the PR_SCTP flag AND 5145 * the buffer space flag. If the one being sent is 5146 * equal or greater priority then purge the old one 5147 * and free some space. 5148 */ 5149 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 5150 /* 5151 * This one is PR-SCTP AND buffer space 5152 * limited type 5153 */ 5154 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 5155 /* 5156 * Lower numbers equates to higher 5157 * priority so if the one we are 5158 * looking at has a larger or equal 5159 * priority we want to drop the data 5160 * and NOT retransmit it. 5161 */ 5162 if (chk->data) { 5163 /* 5164 * We release the book_size 5165 * if the mbuf is here 5166 */ 5167 int ret_spc; 5168 int cause; 5169 5170 if (chk->sent > SCTP_DATAGRAM_UNSENT) 5171 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT; 5172 else 5173 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT; 5174 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 5175 cause, 5176 &asoc->sent_queue); 5177 freed_spc += ret_spc; 5178 if (freed_spc >= dataout) { 5179 return; 5180 } 5181 } /* if chunk was present */ 5182 } /* if of sufficent priority */ 5183 } /* if chunk has enabled */ 5184 } /* tailqforeach */ 5185 5186 chk = TAILQ_FIRST(&asoc->send_queue); 5187 while (chk) { 5188 nchk = TAILQ_NEXT(chk, sctp_next); 5189 /* Here we must move to the sent queue and mark */ 5190 if (PR_SCTP_TTL_ENABLED(chk->flags)) { 5191 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 5192 if (chk->data) { 5193 /* 5194 * We release the book_size 5195 * if the mbuf is here 5196 */ 5197 int ret_spc; 5198 5199 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 5200 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT, 5201 &asoc->send_queue); 5202 5203 freed_spc += ret_spc; 5204 if (freed_spc >= dataout) { 5205 return; 5206 } 5207 } /* end if chk->data */ 5208 } /* end if right class */ 5209 } /* end if chk pr-sctp */ 5210 chk = nchk; 5211 } /* end while (chk) */ 5212 } /* if enabled in asoc */ 5213 } 5214 5215 __inline int 5216 sctp_get_frag_point(struct sctp_tcb *stcb, 5217 struct sctp_association *asoc) 5218 { 5219 int siz, ovh; 5220 5221 /* 5222 * For endpoints that have both v6 and v4 addresses we must reserve 5223 * room for the ipv6 header, for those that are only dealing with V4 5224 * we use a larger frag point. 5225 */ 5226 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 5227 ovh = SCTP_MED_OVERHEAD; 5228 } else { 5229 ovh = SCTP_MED_V4_OVERHEAD; 5230 } 5231 5232 if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu) 5233 siz = asoc->smallest_mtu - ovh; 5234 else 5235 siz = (stcb->sctp_ep->sctp_frag_point - ovh); 5236 /* 5237 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { 5238 */ 5239 /* A data chunk MUST fit in a cluster */ 5240 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ 5241 /* } */ 5242 5243 /* adjust for an AUTH chunk if DATA requires auth */ 5244 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) 5245 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 5246 5247 if (siz % 4) { 5248 /* make it an even word boundary please */ 5249 siz -= (siz % 4); 5250 } 5251 return (siz); 5252 } 5253 5254 static void 5255 sctp_set_prsctp_policy(struct sctp_tcb *stcb, 5256 struct sctp_stream_queue_pending *sp) 5257 { 5258 sp->pr_sctp_on = 0; 5259 if (stcb->asoc.peer_supports_prsctp) { 5260 /* 5261 * We assume that the user wants PR_SCTP_TTL if the user 5262 * provides a positive lifetime but does not specify any 5263 * PR_SCTP policy. This is a BAD assumption and causes 5264 * problems at least with the U-Vancovers MPI folks. I will 5265 * change this to be no policy means NO PR-SCTP. 5266 */ 5267 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 5268 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 5269 sp->pr_sctp_on = 1; 5270 } else { 5271 return; 5272 } 5273 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 5274 case CHUNK_FLAGS_PR_SCTP_BUF: 5275 /* 5276 * Time to live is a priority stored in tv_sec when 5277 * doing the buffer drop thing. 5278 */ 5279 sp->ts.tv_sec = sp->timetolive; 5280 sp->ts.tv_usec = 0; 5281 break; 5282 case CHUNK_FLAGS_PR_SCTP_TTL: 5283 { 5284 struct timeval tv; 5285 5286 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 5287 tv.tv_sec = sp->timetolive / 1000; 5288 tv.tv_usec = (sp->timetolive * 1000) % 1000000; 5289 timevaladd(&sp->ts, &tv); 5290 } 5291 break; 5292 case CHUNK_FLAGS_PR_SCTP_RTX: 5293 /* 5294 * Time to live is a the number or retransmissions 5295 * stored in tv_sec. 5296 */ 5297 sp->ts.tv_sec = sp->timetolive; 5298 sp->ts.tv_usec = 0; 5299 break; 5300 default: 5301 SCTPDBG(SCTP_DEBUG_USRREQ1, 5302 "Unknown PR_SCTP policy %u.\n", 5303 PR_SCTP_POLICY(sp->sinfo_flags)); 5304 break; 5305 } 5306 } 5307 } 5308 5309 static int 5310 sctp_msg_append(struct sctp_tcb *stcb, 5311 struct sctp_nets *net, 5312 struct mbuf *m, 5313 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) 5314 { 5315 int error = 0, holds_lock; 5316 struct mbuf *at; 5317 struct sctp_stream_queue_pending *sp = NULL; 5318 struct sctp_stream_out *strm; 5319 5320 /* 5321 * Given an mbuf chain, put it into the association send queue and 5322 * place it on the wheel 5323 */ 5324 holds_lock = hold_stcb_lock; 5325 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 5326 /* Invalid stream number */ 5327 error = EINVAL; 5328 goto out_now; 5329 } 5330 if ((stcb->asoc.stream_locked) && 5331 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 5332 error = EAGAIN; 5333 goto out_now; 5334 } 5335 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 5336 /* Now can we send this? */ 5337 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || 5338 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5339 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 5340 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 5341 /* got data while shutting down */ 5342 error = ECONNRESET; 5343 goto out_now; 5344 } 5345 sctp_alloc_a_strmoq(stcb, sp); 5346 if (sp == NULL) { 5347 error = ENOMEM; 5348 goto out_now; 5349 } 5350 sp->sinfo_flags = srcv->sinfo_flags; 5351 sp->timetolive = srcv->sinfo_timetolive; 5352 sp->ppid = srcv->sinfo_ppid; 5353 sp->context = srcv->sinfo_context; 5354 sp->strseq = 0; 5355 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 5356 sp->net = net; 5357 sp->addr_over = 1; 5358 } else { 5359 sp->net = stcb->asoc.primary_destination; 5360 sp->addr_over = 0; 5361 } 5362 atomic_add_int(&sp->net->ref_count, 1); 5363 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 5364 sp->stream = srcv->sinfo_stream; 5365 sp->msg_is_complete = 1; 5366 sp->sender_all_done = 1; 5367 sp->some_taken = 0; 5368 sp->data = m; 5369 sp->tail_mbuf = NULL; 5370 sp->length = 0; 5371 at = m; 5372 sctp_set_prsctp_policy(stcb, sp); 5373 /* 5374 * We could in theory (for sendall) sifa the length in, but we would 5375 * still have to hunt through the chain since we need to setup the 5376 * tail_mbuf 5377 */ 5378 while (at) { 5379 if (SCTP_BUF_NEXT(at) == NULL) 5380 sp->tail_mbuf = at; 5381 sp->length += SCTP_BUF_LEN(at); 5382 at = SCTP_BUF_NEXT(at); 5383 } 5384 SCTP_TCB_SEND_LOCK(stcb); 5385 sctp_snd_sb_alloc(stcb, sp->length); 5386 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); 5387 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 5388 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 5389 sp->strseq = strm->next_sequence_sent; 5390 strm->next_sequence_sent++; 5391 } 5392 if ((strm->next_spoke.tqe_next == NULL) && 5393 (strm->next_spoke.tqe_prev == NULL)) { 5394 /* Not on wheel, insert */ 5395 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1); 5396 } 5397 m = NULL; 5398 SCTP_TCB_SEND_UNLOCK(stcb); 5399 out_now: 5400 if (m) { 5401 sctp_m_freem(m); 5402 } 5403 return (error); 5404 } 5405 5406 5407 static struct mbuf * 5408 sctp_copy_mbufchain(struct mbuf *clonechain, 5409 struct mbuf *outchain, 5410 struct mbuf **endofchain, 5411 int can_take_mbuf, 5412 int sizeofcpy, 5413 uint8_t copy_by_ref) 5414 { 5415 struct mbuf *m; 5416 struct mbuf *appendchain; 5417 caddr_t cp; 5418 int len; 5419 5420 if (endofchain == NULL) { 5421 /* error */ 5422 error_out: 5423 if (outchain) 5424 sctp_m_freem(outchain); 5425 return (NULL); 5426 } 5427 if (can_take_mbuf) { 5428 appendchain = clonechain; 5429 } else { 5430 if (!copy_by_ref && 5431 (sizeofcpy <= ((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN))) 5432 ) { 5433 /* Its not in a cluster */ 5434 if (*endofchain == NULL) { 5435 /* lets get a mbuf cluster */ 5436 if (outchain == NULL) { 5437 /* This is the general case */ 5438 new_mbuf: 5439 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 5440 if (outchain == NULL) { 5441 goto error_out; 5442 } 5443 SCTP_BUF_LEN(outchain) = 0; 5444 *endofchain = outchain; 5445 /* get the prepend space */ 5446 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4)); 5447 } else { 5448 /* 5449 * We really should not get a NULL 5450 * in endofchain 5451 */ 5452 /* find end */ 5453 m = outchain; 5454 while (m) { 5455 if (SCTP_BUF_NEXT(m) == NULL) { 5456 *endofchain = m; 5457 break; 5458 } 5459 m = SCTP_BUF_NEXT(m); 5460 } 5461 /* sanity */ 5462 if (*endofchain == NULL) { 5463 /* 5464 * huh, TSNH XXX maybe we 5465 * should panic 5466 */ 5467 sctp_m_freem(outchain); 5468 goto new_mbuf; 5469 } 5470 } 5471 /* get the new end of length */ 5472 len = M_TRAILINGSPACE(*endofchain); 5473 } else { 5474 /* how much is left at the end? */ 5475 len = M_TRAILINGSPACE(*endofchain); 5476 } 5477 /* Find the end of the data, for appending */ 5478 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain))); 5479 5480 /* Now lets copy it out */ 5481 if (len >= sizeofcpy) { 5482 /* It all fits, copy it in */ 5483 m_copydata(clonechain, 0, sizeofcpy, cp); 5484 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 5485 } else { 5486 /* fill up the end of the chain */ 5487 if (len > 0) { 5488 m_copydata(clonechain, 0, len, cp); 5489 SCTP_BUF_LEN((*endofchain)) += len; 5490 /* now we need another one */ 5491 sizeofcpy -= len; 5492 } 5493 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 5494 if (m == NULL) { 5495 /* We failed */ 5496 goto error_out; 5497 } 5498 SCTP_BUF_NEXT((*endofchain)) = m; 5499 *endofchain = m; 5500 cp = mtod((*endofchain), caddr_t); 5501 m_copydata(clonechain, len, sizeofcpy, cp); 5502 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 5503 } 5504 return (outchain); 5505 } else { 5506 /* copy the old fashion way */ 5507 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT); 5508 } 5509 } 5510 if (appendchain == NULL) { 5511 /* error */ 5512 if (outchain) 5513 sctp_m_freem(outchain); 5514 return (NULL); 5515 } 5516 if (outchain) { 5517 /* tack on to the end */ 5518 if (*endofchain != NULL) { 5519 SCTP_BUF_NEXT(((*endofchain))) = appendchain; 5520 } else { 5521 m = outchain; 5522 while (m) { 5523 if (SCTP_BUF_NEXT(m) == NULL) { 5524 SCTP_BUF_NEXT(m) = appendchain; 5525 break; 5526 } 5527 m = SCTP_BUF_NEXT(m); 5528 } 5529 } 5530 /* 5531 * save off the end and update the end-chain postion 5532 */ 5533 m = appendchain; 5534 while (m) { 5535 if (SCTP_BUF_NEXT(m) == NULL) { 5536 *endofchain = m; 5537 break; 5538 } 5539 m = SCTP_BUF_NEXT(m); 5540 } 5541 return (outchain); 5542 } else { 5543 /* save off the end and update the end-chain postion */ 5544 m = appendchain; 5545 while (m) { 5546 if (SCTP_BUF_NEXT(m) == NULL) { 5547 *endofchain = m; 5548 break; 5549 } 5550 m = SCTP_BUF_NEXT(m); 5551 } 5552 return (appendchain); 5553 } 5554 } 5555 5556 int 5557 sctp_med_chunk_output(struct sctp_inpcb *inp, 5558 struct sctp_tcb *stcb, 5559 struct sctp_association *asoc, 5560 int *num_out, 5561 int *reason_code, 5562 int control_only, int *cwnd_full, int from_where, 5563 struct timeval *now, int *now_filled, int frag_point); 5564 5565 static void 5566 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 5567 uint32_t val) 5568 { 5569 struct sctp_copy_all *ca; 5570 struct mbuf *m; 5571 int ret = 0; 5572 int added_control = 0; 5573 int un_sent, do_chunk_output = 1; 5574 struct sctp_association *asoc; 5575 5576 ca = (struct sctp_copy_all *)ptr; 5577 if (ca->m == NULL) { 5578 return; 5579 } 5580 if (ca->inp != inp) { 5581 /* TSNH */ 5582 return; 5583 } 5584 if ((ca->m) && ca->sndlen) { 5585 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT); 5586 if (m == NULL) { 5587 /* can't copy so we are done */ 5588 ca->cnt_failed++; 5589 return; 5590 } 5591 } else { 5592 m = NULL; 5593 } 5594 SCTP_TCB_LOCK_ASSERT(stcb); 5595 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 5596 /* Abort this assoc with m as the user defined reason */ 5597 if (m) { 5598 struct sctp_paramhdr *ph; 5599 5600 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT); 5601 if (m) { 5602 ph = mtod(m, struct sctp_paramhdr *); 5603 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 5604 ph->param_length = htons(ca->sndlen); 5605 } 5606 /* 5607 * We add one here to keep the assoc from 5608 * dis-appearing on us. 5609 */ 5610 atomic_add_int(&stcb->asoc.refcnt, 1); 5611 sctp_abort_an_association(inp, stcb, 5612 SCTP_RESPONSE_TO_USER_REQ, 5613 m); 5614 /* 5615 * sctp_abort_an_association calls sctp_free_asoc() 5616 * free association will NOT free it since we 5617 * incremented the refcnt .. we do this to prevent 5618 * it being freed and things getting tricky since we 5619 * could end up (from free_asoc) calling inpcb_free 5620 * which would get a recursive lock call to the 5621 * iterator lock.. But as a consequence of that the 5622 * stcb will return to us un-locked.. since 5623 * free_asoc returns with either no TCB or the TCB 5624 * unlocked, we must relock.. to unlock in the 5625 * iterator timer :-0 5626 */ 5627 SCTP_TCB_LOCK(stcb); 5628 atomic_add_int(&stcb->asoc.refcnt, -1); 5629 goto no_chunk_output; 5630 } 5631 } else { 5632 if (m) { 5633 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m, 5634 &ca->sndrcv, 1); 5635 } 5636 asoc = &stcb->asoc; 5637 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 5638 /* shutdown this assoc */ 5639 if (TAILQ_EMPTY(&asoc->send_queue) && 5640 TAILQ_EMPTY(&asoc->sent_queue) && 5641 (asoc->stream_queue_cnt == 0)) { 5642 if (asoc->locked_on_sending) { 5643 goto abort_anyway; 5644 } 5645 /* 5646 * there is nothing queued to send, so I'm 5647 * done... 5648 */ 5649 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 5650 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 5651 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5652 /* 5653 * only send SHUTDOWN the first time 5654 * through 5655 */ 5656 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 5657 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 5658 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5659 } 5660 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 5661 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 5662 asoc->primary_destination); 5663 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 5664 asoc->primary_destination); 5665 added_control = 1; 5666 do_chunk_output = 0; 5667 } 5668 } else { 5669 /* 5670 * we still got (or just got) data to send, 5671 * so set SHUTDOWN_PENDING 5672 */ 5673 /* 5674 * XXX sockets draft says that SCTP_EOF 5675 * should be sent with no data. currently, 5676 * we will allow user data to be sent first 5677 * and move to SHUTDOWN-PENDING 5678 */ 5679 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 5680 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 5681 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5682 if (asoc->locked_on_sending) { 5683 /* 5684 * Locked to send out the 5685 * data 5686 */ 5687 struct sctp_stream_queue_pending *sp; 5688 5689 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 5690 if (sp) { 5691 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 5692 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 5693 } 5694 } 5695 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 5696 if (TAILQ_EMPTY(&asoc->send_queue) && 5697 TAILQ_EMPTY(&asoc->sent_queue) && 5698 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 5699 abort_anyway: 5700 atomic_add_int(&stcb->asoc.refcnt, 1); 5701 sctp_abort_an_association(stcb->sctp_ep, stcb, 5702 SCTP_RESPONSE_TO_USER_REQ, 5703 NULL); 5704 atomic_add_int(&stcb->asoc.refcnt, -1); 5705 goto no_chunk_output; 5706 } 5707 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 5708 asoc->primary_destination); 5709 } 5710 } 5711 5712 } 5713 } 5714 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 5715 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 5716 5717 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 5718 (stcb->asoc.total_flight > 0) && 5719 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 5720 ) { 5721 do_chunk_output = 0; 5722 } 5723 if (do_chunk_output) 5724 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 5725 else if (added_control) { 5726 int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0; 5727 struct timeval now; 5728 int frag_point; 5729 5730 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 5731 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 5732 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); 5733 } 5734 no_chunk_output: 5735 if (ret) { 5736 ca->cnt_failed++; 5737 } else { 5738 ca->cnt_sent++; 5739 } 5740 } 5741 5742 static void 5743 sctp_sendall_completes(void *ptr, uint32_t val) 5744 { 5745 struct sctp_copy_all *ca; 5746 5747 ca = (struct sctp_copy_all *)ptr; 5748 /* 5749 * Do a notify here? Kacheong suggests that the notify be done at 5750 * the send time.. so you would push up a notification if any send 5751 * failed. Don't know if this is feasable since the only failures we 5752 * have is "memory" related and if you cannot get an mbuf to send 5753 * the data you surely can't get an mbuf to send up to notify the 5754 * user you can't send the data :-> 5755 */ 5756 5757 /* now free everything */ 5758 sctp_m_freem(ca->m); 5759 SCTP_FREE(ca); 5760 } 5761 5762 5763 #define MC_ALIGN(m, len) do { \ 5764 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ 5765 } while (0) 5766 5767 5768 5769 static struct mbuf * 5770 sctp_copy_out_all(struct uio *uio, int len) 5771 { 5772 struct mbuf *ret, *at; 5773 int left, willcpy, cancpy, error; 5774 5775 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA); 5776 if (ret == NULL) { 5777 /* TSNH */ 5778 return (NULL); 5779 } 5780 left = len; 5781 SCTP_BUF_LEN(ret) = 0; 5782 /* save space for the data chunk header */ 5783 cancpy = M_TRAILINGSPACE(ret); 5784 willcpy = min(cancpy, left); 5785 at = ret; 5786 while (left > 0) { 5787 /* Align data to the end */ 5788 error = uiomove(mtod(at, caddr_t), willcpy, uio); 5789 if (error) { 5790 err_out_now: 5791 sctp_m_freem(at); 5792 return (NULL); 5793 } 5794 SCTP_BUF_LEN(at) = willcpy; 5795 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 5796 left -= willcpy; 5797 if (left > 0) { 5798 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA); 5799 if (SCTP_BUF_NEXT(at) == NULL) { 5800 goto err_out_now; 5801 } 5802 at = SCTP_BUF_NEXT(at); 5803 SCTP_BUF_LEN(at) = 0; 5804 cancpy = M_TRAILINGSPACE(at); 5805 willcpy = min(cancpy, left); 5806 } 5807 } 5808 return (ret); 5809 } 5810 5811 static int 5812 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 5813 struct sctp_sndrcvinfo *srcv) 5814 { 5815 int ret; 5816 struct sctp_copy_all *ca; 5817 5818 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 5819 "CopyAll"); 5820 if (ca == NULL) { 5821 sctp_m_freem(m); 5822 return (ENOMEM); 5823 } 5824 memset(ca, 0, sizeof(struct sctp_copy_all)); 5825 5826 ca->inp = inp; 5827 ca->sndrcv = *srcv; 5828 /* 5829 * take off the sendall flag, it would be bad if we failed to do 5830 * this :-0 5831 */ 5832 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 5833 /* get length and mbuf chain */ 5834 if (uio) { 5835 ca->sndlen = uio->uio_resid; 5836 ca->m = sctp_copy_out_all(uio, ca->sndlen); 5837 if (ca->m == NULL) { 5838 SCTP_FREE(ca); 5839 return (ENOMEM); 5840 } 5841 } else { 5842 /* Gather the length of the send */ 5843 struct mbuf *mat; 5844 5845 mat = m; 5846 ca->sndlen = 0; 5847 while (m) { 5848 ca->sndlen += SCTP_BUF_LEN(m); 5849 m = SCTP_BUF_NEXT(m); 5850 } 5851 ca->m = m; 5852 } 5853 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, 5854 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, 5855 SCTP_ASOC_ANY_STATE, 5856 (void *)ca, 0, 5857 sctp_sendall_completes, inp, 1); 5858 if (ret) { 5859 SCTP_PRINTF("Failed to initiate iterator for sendall\n"); 5860 SCTP_FREE(ca); 5861 return (EFAULT); 5862 } 5863 return (0); 5864 } 5865 5866 5867 void 5868 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 5869 { 5870 struct sctp_tmit_chunk *chk, *nchk; 5871 5872 chk = TAILQ_FIRST(&asoc->control_send_queue); 5873 while (chk) { 5874 nchk = TAILQ_NEXT(chk, sctp_next); 5875 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 5876 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 5877 if (chk->data) { 5878 sctp_m_freem(chk->data); 5879 chk->data = NULL; 5880 } 5881 asoc->ctrl_queue_cnt--; 5882 if (chk->whoTo) 5883 sctp_free_remote_addr(chk->whoTo); 5884 sctp_free_a_chunk(stcb, chk); 5885 } 5886 chk = nchk; 5887 } 5888 } 5889 5890 void 5891 sctp_toss_old_asconf(struct sctp_tcb *stcb) 5892 { 5893 struct sctp_association *asoc; 5894 struct sctp_tmit_chunk *chk, *chk_tmp; 5895 5896 asoc = &stcb->asoc; 5897 for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL; 5898 chk = chk_tmp) { 5899 /* get next chk */ 5900 chk_tmp = TAILQ_NEXT(chk, sctp_next); 5901 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */ 5902 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 5903 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 5904 if (chk->data) { 5905 sctp_m_freem(chk->data); 5906 chk->data = NULL; 5907 } 5908 asoc->ctrl_queue_cnt--; 5909 if (chk->whoTo) 5910 sctp_free_remote_addr(chk->whoTo); 5911 sctp_free_a_chunk(stcb, chk); 5912 } 5913 } 5914 } 5915 5916 5917 static __inline void 5918 sctp_clean_up_datalist(struct sctp_tcb *stcb, 5919 5920 struct sctp_association *asoc, 5921 struct sctp_tmit_chunk **data_list, 5922 int bundle_at, 5923 struct sctp_nets *net) 5924 { 5925 int i; 5926 struct sctp_tmit_chunk *tp1; 5927 5928 for (i = 0; i < bundle_at; i++) { 5929 /* off of the send queue */ 5930 if (i) { 5931 /* 5932 * Any chunk NOT 0 you zap the time chunk 0 gets 5933 * zapped or set based on if a RTO measurment is 5934 * needed. 5935 */ 5936 data_list[i]->do_rtt = 0; 5937 } 5938 /* record time */ 5939 data_list[i]->sent_rcv_time = net->last_sent_time; 5940 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; 5941 TAILQ_REMOVE(&asoc->send_queue, 5942 data_list[i], 5943 sctp_next); 5944 /* on to the sent queue */ 5945 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 5946 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq, 5947 data_list[i]->rec.data.TSN_seq, MAX_TSN))) { 5948 struct sctp_tmit_chunk *tpp; 5949 5950 /* need to move back */ 5951 back_up_more: 5952 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 5953 if (tpp == NULL) { 5954 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 5955 goto all_done; 5956 } 5957 tp1 = tpp; 5958 if (compare_with_wrap(tp1->rec.data.TSN_seq, 5959 data_list[i]->rec.data.TSN_seq, MAX_TSN)) { 5960 goto back_up_more; 5961 } 5962 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 5963 } else { 5964 TAILQ_INSERT_TAIL(&asoc->sent_queue, 5965 data_list[i], 5966 sctp_next); 5967 } 5968 all_done: 5969 /* This does not lower until the cum-ack passes it */ 5970 asoc->sent_queue_cnt++; 5971 asoc->send_queue_cnt--; 5972 if ((asoc->peers_rwnd <= 0) && 5973 (asoc->total_flight == 0) && 5974 (bundle_at == 1)) { 5975 /* Mark the chunk as being a window probe */ 5976 SCTP_STAT_INCR(sctps_windowprobed); 5977 } 5978 #ifdef SCTP_AUDITING_ENABLED 5979 sctp_audit_log(0xC2, 3); 5980 #endif 5981 data_list[i]->sent = SCTP_DATAGRAM_SENT; 5982 data_list[i]->snd_count = 1; 5983 data_list[i]->rec.data.chunk_was_revoked = 0; 5984 #ifdef SCTP_FLIGHT_LOGGING 5985 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 5986 data_list[i]->whoTo->flight_size, 5987 data_list[i]->book_size, 5988 (uintptr_t) data_list[i]->whoTo, 5989 data_list[i]->rec.data.TSN_seq); 5990 #endif 5991 sctp_flight_size_increase(data_list[i]); 5992 sctp_total_flight_increase(stcb, data_list[i]); 5993 #ifdef SCTP_LOG_RWND 5994 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 5995 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 5996 #endif 5997 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 5998 (uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh)); 5999 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 6000 /* SWS sender side engages */ 6001 asoc->peers_rwnd = 0; 6002 } 6003 } 6004 } 6005 6006 static __inline void 6007 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc) 6008 { 6009 struct sctp_tmit_chunk *chk, *nchk; 6010 6011 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 6012 chk; chk = nchk) { 6013 nchk = TAILQ_NEXT(chk, sctp_next); 6014 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 6015 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 6016 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 6017 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 6018 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 6019 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 6020 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 6021 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 6022 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 6023 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 6024 /* Stray chunks must be cleaned up */ 6025 clean_up_anyway: 6026 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 6027 if (chk->data) { 6028 sctp_m_freem(chk->data); 6029 chk->data = NULL; 6030 } 6031 asoc->ctrl_queue_cnt--; 6032 sctp_free_remote_addr(chk->whoTo); 6033 sctp_free_a_chunk(stcb, chk); 6034 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 6035 /* special handling, we must look into the param */ 6036 if (chk != asoc->str_reset) { 6037 goto clean_up_anyway; 6038 } 6039 } 6040 } 6041 } 6042 6043 6044 static __inline int 6045 sctp_can_we_split_this(struct sctp_tcb *stcb, 6046 struct sctp_stream_queue_pending *sp, 6047 int goal_mtu, int frag_point, int eeor_on) 6048 { 6049 /* 6050 * Make a decision on if I should split a msg into multiple parts. 6051 * This is only asked of incomplete messages. 6052 */ 6053 if (eeor_on) { 6054 /* 6055 * If we are doing EEOR we need to always send it if its the 6056 * entire thing, since it might be all the guy is putting in 6057 * the hopper. 6058 */ 6059 if (goal_mtu >= sp->length) { 6060 /*- 6061 * If we have data outstanding, 6062 * we get another chance when the sack 6063 * arrives to transmit - wait for more data 6064 */ 6065 if (stcb->asoc.total_flight == 0) { 6066 /* 6067 * If nothing is in flight, we zero the 6068 * packet counter. 6069 */ 6070 return (sp->length); 6071 } 6072 return (0); 6073 6074 } else { 6075 /* You can fill the rest */ 6076 return (goal_mtu); 6077 } 6078 } 6079 if ((sp->length <= goal_mtu) || ((sp->length - goal_mtu) < sctp_min_residual)) { 6080 /* Sub-optimial residual don't split in non-eeor mode. */ 6081 return (0); 6082 } 6083 /* 6084 * If we reach here sp->length is larger than the goal_mtu. Do we 6085 * wish to split it for the sake of packet putting together? 6086 */ 6087 if (goal_mtu >= min(sctp_min_split_point, frag_point)) { 6088 /* Its ok to split it */ 6089 return (min(goal_mtu, frag_point)); 6090 } 6091 /* Nope, can't split */ 6092 return (0); 6093 6094 } 6095 6096 static int 6097 sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, 6098 struct sctp_stream_out *strq, 6099 int goal_mtu, 6100 int frag_point, 6101 int *locked, 6102 int *giveup, 6103 int eeor_mode) 6104 { 6105 /* Move from the stream to the send_queue keeping track of the total */ 6106 struct sctp_association *asoc; 6107 struct sctp_stream_queue_pending *sp; 6108 struct sctp_tmit_chunk *chk; 6109 struct sctp_data_chunk *dchkh; 6110 int to_move; 6111 uint8_t rcv_flags = 0; 6112 uint8_t some_taken; 6113 uint8_t send_lock_up = 0; 6114 6115 SCTP_TCB_LOCK_ASSERT(stcb); 6116 asoc = &stcb->asoc; 6117 one_more_time: 6118 /* sa_ignore FREED_MEMORY */ 6119 sp = TAILQ_FIRST(&strq->outqueue); 6120 if (sp == NULL) { 6121 *locked = 0; 6122 SCTP_TCB_SEND_LOCK(stcb); 6123 sp = TAILQ_FIRST(&strq->outqueue); 6124 if (sp) { 6125 SCTP_TCB_SEND_UNLOCK(stcb); 6126 goto one_more_time; 6127 } 6128 if (strq->last_msg_incomplete) { 6129 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 6130 strq->stream_no, 6131 strq->last_msg_incomplete); 6132 strq->last_msg_incomplete = 0; 6133 } 6134 SCTP_TCB_SEND_UNLOCK(stcb); 6135 return (0); 6136 } 6137 if (sp->msg_is_complete) { 6138 if (sp->length == 0) { 6139 if (sp->sender_all_done) { 6140 /* 6141 * We are doing differed cleanup. Last time 6142 * through when we took all the data the 6143 * sender_all_done was not set. 6144 */ 6145 if (sp->put_last_out == 0) { 6146 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 6147 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 6148 sp->sender_all_done, 6149 sp->length, 6150 sp->msg_is_complete, 6151 sp->put_last_out, 6152 send_lock_up); 6153 } 6154 if (TAILQ_NEXT(sp, next) == NULL) { 6155 SCTP_TCB_SEND_LOCK(stcb); 6156 send_lock_up = 1; 6157 } 6158 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 6159 TAILQ_REMOVE(&strq->outqueue, sp, next); 6160 sctp_free_remote_addr(sp->net); 6161 if (sp->data) { 6162 sctp_m_freem(sp->data); 6163 sp->data = NULL; 6164 } 6165 sctp_free_a_strmoq(stcb, sp); 6166 6167 /* we can't be locked to it */ 6168 *locked = 0; 6169 stcb->asoc.locked_on_sending = NULL; 6170 if (send_lock_up) { 6171 SCTP_TCB_SEND_UNLOCK(stcb); 6172 send_lock_up = 0; 6173 } 6174 /* back to get the next msg */ 6175 goto one_more_time; 6176 } else { 6177 /* 6178 * sender just finished this but still holds 6179 * a reference 6180 */ 6181 *locked = 1; 6182 *giveup = 1; 6183 return (0); 6184 } 6185 } 6186 } else { 6187 /* is there some to get */ 6188 if (sp->length == 0) { 6189 /* no */ 6190 *locked = 1; 6191 *giveup = 1; 6192 return (0); 6193 } 6194 } 6195 some_taken = sp->some_taken; 6196 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 6197 sp->msg_is_complete = 1; 6198 } 6199 re_look: 6200 if (sp->msg_is_complete) { 6201 /* The message is complete */ 6202 to_move = min(sp->length, frag_point); 6203 if (to_move == sp->length) { 6204 /* All of it fits in the MTU */ 6205 if (sp->some_taken) { 6206 rcv_flags |= SCTP_DATA_LAST_FRAG; 6207 sp->put_last_out = 1; 6208 } else { 6209 rcv_flags |= SCTP_DATA_NOT_FRAG; 6210 sp->put_last_out = 1; 6211 } 6212 } else { 6213 /* Not all of it fits, we fragment */ 6214 if (sp->some_taken == 0) { 6215 rcv_flags |= SCTP_DATA_FIRST_FRAG; 6216 } 6217 sp->some_taken = 1; 6218 } 6219 } else { 6220 to_move = sctp_can_we_split_this(stcb, sp, goal_mtu, 6221 frag_point, eeor_mode); 6222 if (to_move) { 6223 /*- 6224 * We use a snapshot of length in case it 6225 * is expanding during the compare. 6226 */ 6227 uint32_t llen; 6228 6229 llen = sp->length; 6230 if (to_move >= llen) { 6231 to_move = llen; 6232 if (send_lock_up == 0) { 6233 /*- 6234 * We are taking all of an incomplete msg 6235 * thus we need a send lock. 6236 */ 6237 SCTP_TCB_SEND_LOCK(stcb); 6238 send_lock_up = 1; 6239 if (sp->msg_is_complete) { 6240 /* 6241 * the sender finished the 6242 * msg 6243 */ 6244 goto re_look; 6245 } 6246 } 6247 } 6248 if (sp->some_taken == 0) { 6249 rcv_flags |= SCTP_DATA_FIRST_FRAG; 6250 sp->some_taken = 1; 6251 } 6252 } else { 6253 /* Nothing to take. */ 6254 if (sp->some_taken) { 6255 *locked = 1; 6256 } 6257 *giveup = 1; 6258 return (0); 6259 } 6260 } 6261 6262 /* If we reach here, we can copy out a chunk */ 6263 sctp_alloc_a_chunk(stcb, chk); 6264 if (chk == NULL) { 6265 /* No chunk memory */ 6266 out_gu: 6267 if (send_lock_up) { 6268 SCTP_TCB_SEND_UNLOCK(stcb); 6269 send_lock_up = 0; 6270 } 6271 *giveup = 1; 6272 return (0); 6273 } 6274 /* 6275 * Setup for unordered if needed by looking at the user sent info 6276 * flags. 6277 */ 6278 if (sp->sinfo_flags & SCTP_UNORDERED) { 6279 rcv_flags |= SCTP_DATA_UNORDERED; 6280 } 6281 /* clear out the chunk before setting up */ 6282 memset(chk, sizeof(*chk), 0); 6283 chk->rec.data.rcv_flags = rcv_flags; 6284 if (SCTP_BUF_IS_EXTENDED(sp->data)) { 6285 chk->copy_by_ref = 1; 6286 } else { 6287 chk->copy_by_ref = 0; 6288 } 6289 if (to_move >= sp->length) { 6290 /* we can steal the whole thing */ 6291 chk->data = sp->data; 6292 chk->last_mbuf = sp->tail_mbuf; 6293 /* register the stealing */ 6294 sp->data = sp->tail_mbuf = NULL; 6295 } else { 6296 struct mbuf *m; 6297 6298 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT); 6299 chk->last_mbuf = NULL; 6300 if (chk->data == NULL) { 6301 sp->some_taken = some_taken; 6302 sctp_free_a_chunk(stcb, chk); 6303 goto out_gu; 6304 } 6305 /* Pull off the data */ 6306 m_adj(sp->data, to_move); 6307 /* Now lets work our way down and compact it */ 6308 m = sp->data; 6309 while (m && (SCTP_BUF_LEN(m) == 0)) { 6310 sp->data = SCTP_BUF_NEXT(m); 6311 SCTP_BUF_NEXT(m) = NULL; 6312 if (sp->tail_mbuf == m) { 6313 /*- 6314 * Freeing tail? TSNH since 6315 * we supposedly were taking less 6316 * than the sp->length. 6317 */ 6318 #ifdef INVARIANTS 6319 panic("Huh, freing tail? - TSNH"); 6320 #else 6321 SCTP_PRINTF("Huh, freeing tail? - TSNH\n"); 6322 sp->tail_mbuf = sp->data = NULL; 6323 sp->length = 0; 6324 #endif 6325 6326 } 6327 sctp_m_free(m); 6328 m = sp->data; 6329 } 6330 } 6331 if (to_move > sp->length) { 6332 /*- This should not happen either 6333 * since we always lower to_move to the size 6334 * of sp->length if its larger. 6335 */ 6336 #ifdef INVARIANTS 6337 panic("Huh, how can to_move be larger?"); 6338 #else 6339 SCTP_PRINTF("Huh, how can to_move be larger?\n"); 6340 sp->length = 0; 6341 #endif 6342 } else { 6343 atomic_subtract_int(&sp->length, to_move); 6344 } 6345 if (M_LEADINGSPACE(chk->data) < sizeof(struct sctp_data_chunk)) { 6346 /* Not enough room for a chunk header, get some */ 6347 struct mbuf *m; 6348 6349 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA); 6350 if (m == NULL) { 6351 /* 6352 * we're in trouble here. _PREPEND below will free 6353 * all the data if there is no leading space, so we 6354 * must put the data back and restore. 6355 */ 6356 if (send_lock_up == 0) { 6357 SCTP_TCB_SEND_LOCK(stcb); 6358 send_lock_up = 1; 6359 } 6360 if (chk->data == NULL) { 6361 /* unsteal the data */ 6362 sp->data = chk->data; 6363 sp->tail_mbuf = chk->last_mbuf; 6364 } else { 6365 struct mbuf *m; 6366 6367 /* reassemble the data */ 6368 m = sp->data; 6369 sp->data = chk->data; 6370 SCTP_BUF_NEXT(sp->data) = m; 6371 } 6372 sp->some_taken = some_taken; 6373 atomic_add_int(&sp->length, to_move); 6374 chk->data = NULL; 6375 sctp_free_a_chunk(stcb, chk); 6376 goto out_gu; 6377 } else { 6378 SCTP_BUF_LEN(m) = 0; 6379 SCTP_BUF_NEXT(m) = chk->data; 6380 chk->data = m; 6381 M_ALIGN(chk->data, 4); 6382 } 6383 } 6384 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT); 6385 if (chk->data == NULL) { 6386 /* HELP, TSNH since we assured it would not above? */ 6387 #ifdef INVARIANTS 6388 panic("prepend failes HELP?"); 6389 #else 6390 SCTP_PRINTF("prepend fails HELP?\n"); 6391 sctp_free_a_chunk(stcb, chk); 6392 #endif 6393 goto out_gu; 6394 } 6395 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); 6396 chk->book_size = chk->send_size = (to_move + 6397 sizeof(struct sctp_data_chunk)); 6398 chk->book_size_scale = 0; 6399 chk->sent = SCTP_DATAGRAM_UNSENT; 6400 6401 /* 6402 * get last_mbuf and counts of mb useage This is ugly but hopefully 6403 * its only one mbuf. 6404 */ 6405 if (chk->last_mbuf == NULL) { 6406 chk->last_mbuf = chk->data; 6407 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 6408 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 6409 } 6410 } 6411 chk->flags = 0; 6412 chk->asoc = &stcb->asoc; 6413 chk->pad_inplace = 0; 6414 chk->no_fr_allowed = 0; 6415 chk->rec.data.stream_seq = sp->strseq; 6416 chk->rec.data.stream_number = sp->stream; 6417 chk->rec.data.payloadtype = sp->ppid; 6418 chk->rec.data.context = sp->context; 6419 chk->rec.data.doing_fast_retransmit = 0; 6420 chk->rec.data.ect_nonce = 0; /* ECN Nonce */ 6421 6422 chk->rec.data.timetodrop = sp->ts; 6423 chk->flags = sp->act_flags; 6424 chk->addr_over = sp->addr_over; 6425 6426 chk->whoTo = net; 6427 atomic_add_int(&chk->whoTo->ref_count, 1); 6428 6429 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); 6430 #ifdef SCTP_LOG_SENDING_STR 6431 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 6432 (uintptr_t) stcb, (uintptr_t) sp, 6433 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), 6434 chk->rec.data.TSN_seq); 6435 #endif 6436 6437 dchkh = mtod(chk->data, struct sctp_data_chunk *); 6438 /* 6439 * Put the rest of the things in place now. Size was done earlier in 6440 * previous loop prior to padding. 6441 */ 6442 6443 #ifdef SCTP_ASOCLOG_OF_TSNS 6444 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq; 6445 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number; 6446 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq; 6447 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size; 6448 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags; 6449 asoc->tsn_out_at++; 6450 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { 6451 asoc->tsn_out_at = 0; 6452 asoc->tsn_out_wrapped = 1; 6453 } 6454 #endif 6455 6456 dchkh->ch.chunk_type = SCTP_DATA; 6457 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 6458 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); 6459 dchkh->dp.stream_id = htons(strq->stream_no); 6460 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); 6461 dchkh->dp.protocol_id = chk->rec.data.payloadtype; 6462 dchkh->ch.chunk_length = htons(chk->send_size); 6463 /* Now advance the chk->send_size by the actual pad needed. */ 6464 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 6465 /* need a pad */ 6466 struct mbuf *lm; 6467 int pads; 6468 6469 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 6470 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) { 6471 chk->pad_inplace = 1; 6472 } 6473 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) { 6474 /* pad added an mbuf */ 6475 chk->last_mbuf = lm; 6476 } 6477 chk->send_size += pads; 6478 } 6479 /* We only re-set the policy if it is on */ 6480 if (sp->pr_sctp_on) { 6481 sctp_set_prsctp_policy(stcb, sp); 6482 asoc->pr_sctp_cnt++; 6483 chk->pr_sctp_on = 1; 6484 } else { 6485 chk->pr_sctp_on = 0; 6486 } 6487 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) { 6488 /* All done pull and kill the message */ 6489 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 6490 if (sp->put_last_out == 0) { 6491 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n"); 6492 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 6493 sp->sender_all_done, 6494 sp->length, 6495 sp->msg_is_complete, 6496 sp->put_last_out, 6497 send_lock_up); 6498 } 6499 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) { 6500 SCTP_TCB_SEND_LOCK(stcb); 6501 send_lock_up = 1; 6502 } 6503 TAILQ_REMOVE(&strq->outqueue, sp, next); 6504 sctp_free_remote_addr(sp->net); 6505 if (sp->data) { 6506 sctp_m_freem(sp->data); 6507 sp->data = NULL; 6508 } 6509 sctp_free_a_strmoq(stcb, sp); 6510 6511 /* we can't be locked to it */ 6512 *locked = 0; 6513 stcb->asoc.locked_on_sending = NULL; 6514 } else { 6515 /* more to go, we are locked */ 6516 *locked = 1; 6517 } 6518 asoc->chunks_on_out_queue++; 6519 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 6520 asoc->send_queue_cnt++; 6521 if (send_lock_up) { 6522 SCTP_TCB_SEND_UNLOCK(stcb); 6523 send_lock_up = 0; 6524 } 6525 return (to_move); 6526 } 6527 6528 6529 static struct sctp_stream_out * 6530 sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc) 6531 { 6532 struct sctp_stream_out *strq; 6533 6534 /* Find the next stream to use */ 6535 if (asoc->last_out_stream == NULL) { 6536 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 6537 if (asoc->last_out_stream == NULL) { 6538 /* huh nothing on the wheel, TSNH */ 6539 return (NULL); 6540 } 6541 goto done_it; 6542 } 6543 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke); 6544 done_it: 6545 if (strq == NULL) { 6546 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 6547 } 6548 return (strq); 6549 6550 } 6551 6552 static void 6553 sctp_fill_outqueue(struct sctp_tcb *stcb, 6554 struct sctp_nets *net, int frag_point, int eeor_mode) 6555 { 6556 struct sctp_association *asoc; 6557 struct sctp_stream_out *strq, *strqn, *strqt; 6558 int goal_mtu, moved_how_much, total_moved = 0; 6559 int locked, giveup; 6560 struct sctp_stream_queue_pending *sp; 6561 6562 SCTP_TCB_LOCK_ASSERT(stcb); 6563 asoc = &stcb->asoc; 6564 #ifdef INET6 6565 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 6566 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 6567 } else { 6568 /* ?? not sure what else to do */ 6569 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 6570 } 6571 #else 6572 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 6573 mtu_fromwheel = 0; 6574 #endif 6575 /* Need an allowance for the data chunk header too */ 6576 goal_mtu -= sizeof(struct sctp_data_chunk); 6577 6578 /* must make even word boundary */ 6579 goal_mtu &= 0xfffffffc; 6580 if (asoc->locked_on_sending) { 6581 /* We are stuck on one stream until the message completes. */ 6582 strqn = strq = asoc->locked_on_sending; 6583 locked = 1; 6584 } else { 6585 strqn = strq = sctp_select_a_stream(stcb, asoc); 6586 locked = 0; 6587 } 6588 6589 while ((goal_mtu > 0) && strq) { 6590 sp = TAILQ_FIRST(&strq->outqueue); 6591 /* 6592 * If CMT is off, we must validate that the stream in 6593 * question has the first item pointed towards are network 6594 * destionation requested by the caller. Note that if we 6595 * turn out to be locked to a stream (assigning TSN's then 6596 * we must stop, since we cannot look for another stream 6597 * with data to send to that destination). In CMT's case, by 6598 * skipping this check, we will send one data packet towards 6599 * the requested net. 6600 */ 6601 if (sp == NULL) { 6602 break; 6603 } 6604 if ((sp->net != net) && (sctp_cmt_on_off == 0)) { 6605 /* none for this network */ 6606 if (locked) { 6607 break; 6608 } else { 6609 strq = sctp_select_a_stream(stcb, asoc); 6610 if (strq == NULL) 6611 /* none left */ 6612 break; 6613 if (strqn == strq) { 6614 /* I have circled */ 6615 break; 6616 } 6617 continue; 6618 } 6619 } 6620 giveup = 0; 6621 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked, 6622 &giveup, eeor_mode); 6623 asoc->last_out_stream = strq; 6624 if (locked) { 6625 asoc->locked_on_sending = strq; 6626 if ((moved_how_much == 0) || (giveup)) 6627 /* no more to move for now */ 6628 break; 6629 } else { 6630 asoc->locked_on_sending = NULL; 6631 strqt = sctp_select_a_stream(stcb, asoc); 6632 if (TAILQ_FIRST(&strq->outqueue) == NULL) { 6633 sctp_remove_from_wheel(stcb, asoc, strq); 6634 } 6635 if (giveup) { 6636 break; 6637 } 6638 strq = strqt; 6639 if (strq == NULL) { 6640 break; 6641 } 6642 } 6643 total_moved += moved_how_much; 6644 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk)); 6645 goal_mtu &= 0xfffffffc; 6646 } 6647 if (total_moved == 0) { 6648 if ((sctp_cmt_on_off == 0) && 6649 (net == stcb->asoc.primary_destination)) { 6650 /* ran dry for primary network net */ 6651 SCTP_STAT_INCR(sctps_primary_randry); 6652 } else if (sctp_cmt_on_off) { 6653 /* ran dry with CMT on */ 6654 SCTP_STAT_INCR(sctps_cmt_randry); 6655 } 6656 } 6657 } 6658 6659 __inline void 6660 sctp_fix_ecn_echo(struct sctp_association *asoc) 6661 { 6662 struct sctp_tmit_chunk *chk; 6663 6664 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 6665 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 6666 chk->sent = SCTP_DATAGRAM_UNSENT; 6667 } 6668 } 6669 } 6670 6671 static void 6672 sctp_move_to_an_alt(struct sctp_tcb *stcb, 6673 struct sctp_association *asoc, 6674 struct sctp_nets *net) 6675 { 6676 struct sctp_tmit_chunk *chk; 6677 struct sctp_nets *a_net; 6678 6679 SCTP_TCB_LOCK_ASSERT(stcb); 6680 a_net = sctp_find_alternate_net(stcb, net, 0); 6681 if ((a_net != net) && 6682 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) { 6683 /* 6684 * We only proceed if a valid alternate is found that is not 6685 * this one and is reachable. Here we must move all chunks 6686 * queued in the send queue off of the destination address 6687 * to our alternate. 6688 */ 6689 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 6690 if (chk->whoTo == net) { 6691 /* Move the chunk to our alternate */ 6692 sctp_free_remote_addr(chk->whoTo); 6693 chk->whoTo = a_net; 6694 atomic_add_int(&a_net->ref_count, 1); 6695 } 6696 } 6697 } 6698 } 6699 6700 int 6701 sctp_med_chunk_output(struct sctp_inpcb *inp, 6702 struct sctp_tcb *stcb, 6703 struct sctp_association *asoc, 6704 int *num_out, 6705 int *reason_code, 6706 int control_only, int *cwnd_full, int from_where, 6707 struct timeval *now, int *now_filled, int frag_point) 6708 { 6709 /* 6710 * Ok this is the generic chunk service queue. we must do the 6711 * following: - Service the stream queue that is next, moving any 6712 * message (note I must get a complete message i.e. FIRST/MIDDLE and 6713 * LAST to the out queue in one pass) and assigning TSN's - Check to 6714 * see if the cwnd/rwnd allows any output, if so we go ahead and 6715 * fomulate and send the low level chunks. Making sure to combine 6716 * any control in the control chunk queue also. 6717 */ 6718 struct sctp_nets *net; 6719 struct mbuf *outchain, *endoutchain; 6720 struct sctp_tmit_chunk *chk, *nchk; 6721 struct sctphdr *shdr; 6722 6723 /* temp arrays for unlinking */ 6724 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 6725 int no_fragmentflg, error; 6726 int one_chunk, hbflag, skip_data_for_this_net; 6727 int asconf, cookie, no_out_cnt; 6728 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode; 6729 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 6730 struct sctp_nets *start_at, *old_startat = NULL, *send_start_at; 6731 int tsns_sent = 0; 6732 uint32_t auth_offset = 0; 6733 struct sctp_auth_chunk *auth = NULL; 6734 6735 *num_out = 0; 6736 cwnd_full_ind = 0; 6737 6738 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 6739 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || 6740 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 6741 eeor_mode = 1; 6742 } else { 6743 eeor_mode = 0; 6744 } 6745 ctl_cnt = no_out_cnt = asconf = cookie = 0; 6746 /* 6747 * First lets prime the pump. For each destination, if there is room 6748 * in the flight size, attempt to pull an MTU's worth out of the 6749 * stream queues into the general send_queue 6750 */ 6751 #ifdef SCTP_AUDITING_ENABLED 6752 sctp_audit_log(0xC2, 2); 6753 #endif 6754 SCTP_TCB_LOCK_ASSERT(stcb); 6755 hbflag = 0; 6756 if ((control_only) || (asoc->stream_reset_outstanding)) 6757 no_data_chunks = 1; 6758 else 6759 no_data_chunks = 0; 6760 6761 /* Nothing to possible to send? */ 6762 if (TAILQ_EMPTY(&asoc->control_send_queue) && 6763 TAILQ_EMPTY(&asoc->send_queue) && 6764 TAILQ_EMPTY(&asoc->out_wheel)) { 6765 *reason_code = 9; 6766 return (0); 6767 } 6768 if (asoc->peers_rwnd == 0) { 6769 /* No room in peers rwnd */ 6770 *cwnd_full = 1; 6771 *reason_code = 1; 6772 if (asoc->total_flight > 0) { 6773 /* we are allowed one chunk in flight */ 6774 no_data_chunks = 1; 6775 } 6776 } 6777 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) { 6778 if (sctp_cmt_on_off) { 6779 /* 6780 * for CMT we start at the next one past the one we 6781 * last added data to. 6782 */ 6783 if (TAILQ_FIRST(&asoc->send_queue) != NULL) { 6784 goto skip_the_fill_from_streams; 6785 } 6786 if (asoc->last_net_data_came_from) { 6787 net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next); 6788 if (net == NULL) { 6789 net = TAILQ_FIRST(&asoc->nets); 6790 } 6791 } else { 6792 /* back to start */ 6793 net = TAILQ_FIRST(&asoc->nets); 6794 } 6795 6796 } else { 6797 net = asoc->primary_destination; 6798 if (net == NULL) { 6799 /* TSNH */ 6800 net = TAILQ_FIRST(&asoc->nets); 6801 } 6802 } 6803 start_at = net; 6804 one_more_time: 6805 for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 6806 net->window_probe = 0; 6807 if (old_startat && (old_startat == net)) { 6808 break; 6809 } 6810 if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) { 6811 /* nothing can be in queue for this guy */ 6812 continue; 6813 } 6814 if (net->flight_size >= net->cwnd) { 6815 /* skip this network, no room */ 6816 cwnd_full_ind++; 6817 continue; 6818 } 6819 /* 6820 * @@@ JRI : this for loop we are in takes in each 6821 * net, if its's got space in cwnd and has data sent 6822 * to it (when CMT is off) then it calls 6823 * sctp_fill_outqueue for the net. This gets data on 6824 * the send queue for that network. 6825 * 6826 * In sctp_fill_outqueue TSN's are assigned and data is 6827 * copied out of the stream buffers. Note mostly 6828 * copy by reference (we hope). 6829 */ 6830 #ifdef SCTP_CWND_LOGGING 6831 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 6832 #endif 6833 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode); 6834 } 6835 if (start_at != TAILQ_FIRST(&asoc->nets)) { 6836 /* got to pick up the beginning stuff. */ 6837 old_startat = start_at; 6838 start_at = net = TAILQ_FIRST(&asoc->nets); 6839 goto one_more_time; 6840 } 6841 } 6842 skip_the_fill_from_streams: 6843 *cwnd_full = cwnd_full_ind; 6844 /* now service each destination and send out what we can for it */ 6845 /* Nothing to send? */ 6846 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) && 6847 (TAILQ_FIRST(&asoc->send_queue) == NULL)) { 6848 *reason_code = 8; 6849 return (0); 6850 } 6851 chk = TAILQ_FIRST(&asoc->send_queue); 6852 if (chk) { 6853 send_start_at = chk->whoTo; 6854 } else { 6855 send_start_at = TAILQ_FIRST(&asoc->nets); 6856 } 6857 old_startat = NULL; 6858 again_one_more_time: 6859 for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 6860 /* how much can we send? */ 6861 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ 6862 if (old_startat && (old_startat == net)) { 6863 /* through list ocmpletely. */ 6864 break; 6865 } 6866 tsns_sent = 0; 6867 if (net->ref_count < 2) { 6868 /* 6869 * Ref-count of 1 so we cannot have data or control 6870 * queued to this address. Skip it. 6871 */ 6872 continue; 6873 } 6874 ctl_cnt = bundle_at = 0; 6875 endoutchain = outchain = NULL; 6876 no_fragmentflg = 1; 6877 one_chunk = 0; 6878 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 6879 skip_data_for_this_net = 1; 6880 } else { 6881 skip_data_for_this_net = 0; 6882 } 6883 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { 6884 /* 6885 * if we have a route and an ifp check to see if we 6886 * have room to send to this guy 6887 */ 6888 struct ifnet *ifp; 6889 6890 ifp = net->ro.ro_rt->rt_ifp; 6891 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { 6892 SCTP_STAT_INCR(sctps_ifnomemqueued); 6893 #ifdef SCTP_LOG_MAXBURST 6894 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); 6895 #endif 6896 continue; 6897 } 6898 } 6899 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 6900 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 6901 } else { 6902 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 6903 } 6904 mx_mtu = mtu; 6905 to_out = 0; 6906 if (mtu > asoc->peers_rwnd) { 6907 if (asoc->total_flight > 0) { 6908 /* We have a packet in flight somewhere */ 6909 r_mtu = asoc->peers_rwnd; 6910 } else { 6911 /* We are always allowed to send one MTU out */ 6912 one_chunk = 1; 6913 r_mtu = mtu; 6914 } 6915 } else { 6916 r_mtu = mtu; 6917 } 6918 /************************/ 6919 /* Control transmission */ 6920 /************************/ 6921 /* Now first lets go through the control queue */ 6922 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 6923 chk; chk = nchk) { 6924 nchk = TAILQ_NEXT(chk, sctp_next); 6925 if (chk->whoTo != net) { 6926 /* 6927 * No, not sent to the network we are 6928 * looking at 6929 */ 6930 continue; 6931 } 6932 if (chk->data == NULL) { 6933 continue; 6934 } 6935 if (chk->sent != SCTP_DATAGRAM_UNSENT) { 6936 /* 6937 * It must be unsent. Cookies and ASCONF's 6938 * hang around but there timers will force 6939 * when marked for resend. 6940 */ 6941 continue; 6942 } 6943 /* 6944 * if no AUTH is yet included and this chunk 6945 * requires it, make sure to account for it. We 6946 * don't apply the size until the AUTH chunk is 6947 * actually added below in case there is no room for 6948 * this chunk. NOTE: we overload the use of "omtu" 6949 * here 6950 */ 6951 if ((auth == NULL) && 6952 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 6953 stcb->asoc.peer_auth_chunks)) { 6954 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 6955 } else 6956 omtu = 0; 6957 /* Here we do NOT factor the r_mtu */ 6958 if ((chk->send_size < (int)(mtu - omtu)) || 6959 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 6960 /* 6961 * We probably should glom the mbuf chain 6962 * from the chk->data for control but the 6963 * problem is it becomes yet one more level 6964 * of tracking to do if for some reason 6965 * output fails. Then I have got to 6966 * reconstruct the merged control chain.. el 6967 * yucko.. for now we take the easy way and 6968 * do the copy 6969 */ 6970 /* 6971 * Add an AUTH chunk, if chunk requires it 6972 * save the offset into the chain for AUTH 6973 */ 6974 if ((auth == NULL) && 6975 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 6976 stcb->asoc.peer_auth_chunks))) { 6977 outchain = sctp_add_auth_chunk(outchain, 6978 &endoutchain, 6979 &auth, 6980 &auth_offset, 6981 stcb, 6982 chk->rec.chunk_id.id); 6983 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 6984 } 6985 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 6986 (int)chk->rec.chunk_id.can_take_data, 6987 chk->send_size, chk->copy_by_ref); 6988 if (outchain == NULL) { 6989 *reason_code = 8; 6990 return (ENOMEM); 6991 } 6992 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 6993 /* update our MTU size */ 6994 if (mtu > (chk->send_size + omtu)) 6995 mtu -= (chk->send_size + omtu); 6996 else 6997 mtu = 0; 6998 to_out += (chk->send_size + omtu); 6999 /* Do clear IP_DF ? */ 7000 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 7001 no_fragmentflg = 0; 7002 } 7003 if (chk->rec.chunk_id.can_take_data) 7004 chk->data = NULL; 7005 /* Mark things to be removed, if needed */ 7006 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 7007 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 7008 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 7009 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 7010 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 7011 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 7012 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 7013 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 7014 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 7015 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 7016 7017 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) 7018 hbflag = 1; 7019 /* remove these chunks at the end */ 7020 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 7021 /* turn off the timer */ 7022 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 7023 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 7024 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); 7025 } 7026 } 7027 ctl_cnt++; 7028 } else { 7029 /* 7030 * Other chunks, since they have 7031 * timers running (i.e. COOKIE or 7032 * ASCONF) we just "trust" that it 7033 * gets sent or retransmitted. 7034 */ 7035 ctl_cnt++; 7036 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 7037 cookie = 1; 7038 no_out_cnt = 1; 7039 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) { 7040 /* 7041 * set hb flag since we can 7042 * use these for RTO 7043 */ 7044 hbflag = 1; 7045 asconf = 1; 7046 } 7047 chk->sent = SCTP_DATAGRAM_SENT; 7048 chk->snd_count++; 7049 } 7050 if (mtu == 0) { 7051 /* 7052 * Ok we are out of room but we can 7053 * output without effecting the 7054 * flight size since this little guy 7055 * is a control only packet. 7056 */ 7057 if (asconf) { 7058 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 7059 asconf = 0; 7060 } 7061 if (cookie) { 7062 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 7063 cookie = 0; 7064 } 7065 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 7066 if (outchain == NULL) { 7067 /* no memory */ 7068 error = ENOBUFS; 7069 goto error_out_again; 7070 } 7071 shdr = mtod(outchain, struct sctphdr *); 7072 shdr->src_port = inp->sctp_lport; 7073 shdr->dest_port = stcb->rport; 7074 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 7075 shdr->checksum = 0; 7076 auth_offset += sizeof(struct sctphdr); 7077 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 7078 (struct sockaddr *)&net->ro._l_addr, 7079 outchain, auth_offset, auth, 7080 no_fragmentflg, 0, NULL, asconf))) { 7081 if (error == ENOBUFS) { 7082 asoc->ifp_had_enobuf = 1; 7083 SCTP_STAT_INCR(sctps_lowlevelerr); 7084 } 7085 if (from_where == 0) { 7086 SCTP_STAT_INCR(sctps_lowlevelerrusr); 7087 } 7088 error_out_again: 7089 /* error, could not output */ 7090 if (hbflag) { 7091 if (*now_filled == 0) { 7092 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7093 *now_filled = 1; 7094 *now = net->last_sent_time; 7095 } else { 7096 net->last_sent_time = *now; 7097 } 7098 hbflag = 0; 7099 } 7100 if (error == EHOSTUNREACH) { 7101 /* 7102 * Destination went 7103 * unreachable 7104 * during this send 7105 */ 7106 sctp_move_to_an_alt(stcb, asoc, net); 7107 } 7108 *reason_code = 7; 7109 continue; 7110 } else 7111 asoc->ifp_had_enobuf = 0; 7112 /* Only HB or ASCONF advances time */ 7113 if (hbflag) { 7114 if (*now_filled == 0) { 7115 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7116 *now_filled = 1; 7117 *now = net->last_sent_time; 7118 } else { 7119 net->last_sent_time = *now; 7120 } 7121 hbflag = 0; 7122 } 7123 /* 7124 * increase the number we sent, if a 7125 * cookie is sent we don't tell them 7126 * any was sent out. 7127 */ 7128 outchain = endoutchain = NULL; 7129 auth = NULL; 7130 auth_offset = 0; 7131 if (!no_out_cnt) 7132 *num_out += ctl_cnt; 7133 /* recalc a clean slate and setup */ 7134 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7135 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 7136 } else { 7137 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD); 7138 } 7139 to_out = 0; 7140 no_fragmentflg = 1; 7141 } 7142 } 7143 } 7144 /*********************/ 7145 /* Data transmission */ 7146 /*********************/ 7147 /* 7148 * if AUTH for DATA is required and no AUTH has been added 7149 * yet, account for this in the mtu now... if no data can be 7150 * bundled, this adjustment won't matter anyways since the 7151 * packet will be going out... 7152 */ 7153 if ((auth == NULL) && 7154 sctp_auth_is_required_chunk(SCTP_DATA, 7155 stcb->asoc.peer_auth_chunks)) { 7156 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 7157 } 7158 /* now lets add any data within the MTU constraints */ 7159 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 7160 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) 7161 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 7162 else 7163 omtu = 0; 7164 } else { 7165 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) 7166 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 7167 else 7168 omtu = 0; 7169 } 7170 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && (skip_data_for_this_net == 0)) || 7171 (cookie)) { 7172 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) { 7173 if (no_data_chunks) { 7174 /* let only control go out */ 7175 *reason_code = 1; 7176 break; 7177 } 7178 if (net->flight_size >= net->cwnd) { 7179 /* skip this net, no room for data */ 7180 *reason_code = 2; 7181 break; 7182 } 7183 nchk = TAILQ_NEXT(chk, sctp_next); 7184 if (chk->whoTo != net) { 7185 /* No, not sent to this net */ 7186 continue; 7187 } 7188 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 7189 /*- 7190 * strange, we have a chunk that is 7191 * to big for its destination and 7192 * yet no fragment ok flag. 7193 * Something went wrong when the 7194 * PMTU changed...we did not mark 7195 * this chunk for some reason?? I 7196 * will fix it here by letting IP 7197 * fragment it for now and printing 7198 * a warning. This really should not 7199 * happen ... 7200 */ 7201 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 7202 chk->send_size, mtu); 7203 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 7204 } 7205 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 7206 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 7207 /* ok we will add this one */ 7208 7209 /* 7210 * Add an AUTH chunk, if chunk 7211 * requires it, save the offset into 7212 * the chain for AUTH 7213 */ 7214 if ((auth == NULL) && 7215 (sctp_auth_is_required_chunk(SCTP_DATA, 7216 stcb->asoc.peer_auth_chunks))) { 7217 7218 outchain = sctp_add_auth_chunk(outchain, 7219 &endoutchain, 7220 &auth, 7221 &auth_offset, 7222 stcb, 7223 SCTP_DATA); 7224 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7225 } 7226 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 7227 chk->send_size, chk->copy_by_ref); 7228 if (outchain == NULL) { 7229 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n"); 7230 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 7231 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 7232 } 7233 *reason_code = 3; 7234 return (ENOMEM); 7235 } 7236 /* upate our MTU size */ 7237 /* Do clear IP_DF ? */ 7238 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 7239 no_fragmentflg = 0; 7240 } 7241 /* unsigned subtraction of mtu */ 7242 if (mtu > chk->send_size) 7243 mtu -= chk->send_size; 7244 else 7245 mtu = 0; 7246 /* unsigned subtraction of r_mtu */ 7247 if (r_mtu > chk->send_size) 7248 r_mtu -= chk->send_size; 7249 else 7250 r_mtu = 0; 7251 7252 to_out += chk->send_size; 7253 if (to_out > mx_mtu) { 7254 #ifdef INVARIANTS 7255 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out); 7256 #else 7257 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n", 7258 mx_mtu, to_out); 7259 #endif 7260 } 7261 chk->window_probe = 0; 7262 data_list[bundle_at++] = chk; 7263 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 7264 mtu = 0; 7265 break; 7266 } 7267 if (chk->sent == SCTP_DATAGRAM_UNSENT) { 7268 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 7269 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 7270 } else { 7271 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 7272 } 7273 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 7274 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 7275 /* 7276 * Count number of 7277 * user msg's that 7278 * were fragmented 7279 * we do this by 7280 * counting when we 7281 * see a LAST 7282 * fragment only. 7283 */ 7284 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 7285 } 7286 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 7287 if (one_chunk) { 7288 data_list[0]->window_probe = 1; 7289 net->window_probe = 1; 7290 } 7291 break; 7292 } 7293 } else { 7294 /* 7295 * Must be sent in order of the 7296 * TSN's (on a network) 7297 */ 7298 break; 7299 } 7300 } /* for (chunk gather loop for this net) */ 7301 } /* if asoc.state OPEN */ 7302 /* Is there something to send for this destination? */ 7303 if (outchain) { 7304 /* We may need to start a control timer or two */ 7305 if (asconf) { 7306 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 7307 asconf = 0; 7308 } 7309 if (cookie) { 7310 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 7311 cookie = 0; 7312 } 7313 /* must start a send timer if data is being sent */ 7314 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 7315 /* 7316 * no timer running on this destination 7317 * restart it. 7318 */ 7319 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 7320 } 7321 /* Now send it, if there is anything to send :> */ 7322 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 7323 if (outchain == NULL) { 7324 /* out of mbufs */ 7325 error = ENOBUFS; 7326 goto errored_send; 7327 } 7328 shdr = mtod(outchain, struct sctphdr *); 7329 shdr->src_port = inp->sctp_lport; 7330 shdr->dest_port = stcb->rport; 7331 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 7332 shdr->checksum = 0; 7333 auth_offset += sizeof(struct sctphdr); 7334 if ((error = sctp_lowlevel_chunk_output(inp, 7335 stcb, 7336 net, 7337 (struct sockaddr *)&net->ro._l_addr, 7338 outchain, 7339 auth_offset, 7340 auth, 7341 no_fragmentflg, 7342 bundle_at, 7343 data_list[0], 7344 asconf))) { 7345 /* error, we could not output */ 7346 if (error == ENOBUFS) { 7347 SCTP_STAT_INCR(sctps_lowlevelerr); 7348 asoc->ifp_had_enobuf = 1; 7349 } 7350 if (from_where == 0) { 7351 SCTP_STAT_INCR(sctps_lowlevelerrusr); 7352 } 7353 errored_send: 7354 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 7355 if (hbflag) { 7356 if (*now_filled == 0) { 7357 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7358 *now_filled = 1; 7359 *now = net->last_sent_time; 7360 } else { 7361 net->last_sent_time = *now; 7362 } 7363 hbflag = 0; 7364 } 7365 if (error == EHOSTUNREACH) { 7366 /* 7367 * Destination went unreachable 7368 * during this send 7369 */ 7370 sctp_move_to_an_alt(stcb, asoc, net); 7371 } 7372 *reason_code = 6; 7373 /*- 7374 * I add this line to be paranoid. As far as 7375 * I can tell the continue, takes us back to 7376 * the top of the for, but just to make sure 7377 * I will reset these again here. 7378 */ 7379 ctl_cnt = bundle_at = 0; 7380 continue; /* This takes us back to the 7381 * for() for the nets. */ 7382 } else { 7383 asoc->ifp_had_enobuf = 0; 7384 } 7385 outchain = endoutchain = NULL; 7386 auth = NULL; 7387 auth_offset = 0; 7388 if (bundle_at || hbflag) { 7389 /* For data/asconf and hb set time */ 7390 if (*now_filled == 0) { 7391 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7392 *now_filled = 1; 7393 *now = net->last_sent_time; 7394 } else { 7395 net->last_sent_time = *now; 7396 } 7397 } 7398 if (!no_out_cnt) { 7399 *num_out += (ctl_cnt + bundle_at); 7400 } 7401 if (bundle_at) { 7402 /* setup for a RTO measurement */ 7403 tsns_sent = data_list[0]->rec.data.TSN_seq; 7404 7405 data_list[0]->do_rtt = 1; 7406 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 7407 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 7408 if (sctp_early_fr) { 7409 if (net->flight_size < net->cwnd) { 7410 /* start or restart it */ 7411 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 7412 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 7413 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); 7414 } 7415 SCTP_STAT_INCR(sctps_earlyfrstrout); 7416 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net); 7417 } else { 7418 /* stop it if its running */ 7419 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 7420 SCTP_STAT_INCR(sctps_earlyfrstpout); 7421 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 7422 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); 7423 } 7424 } 7425 } 7426 } 7427 if (one_chunk) { 7428 break; 7429 } 7430 } 7431 #ifdef SCTP_CWND_LOGGING 7432 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 7433 #endif 7434 } 7435 if (old_startat == NULL) { 7436 old_startat = send_start_at; 7437 send_start_at = TAILQ_FIRST(&asoc->nets); 7438 goto again_one_more_time; 7439 } 7440 /* 7441 * At the end there should be no NON timed chunks hanging on this 7442 * queue. 7443 */ 7444 #ifdef SCTP_CWND_LOGGING 7445 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 7446 #endif 7447 if ((*num_out == 0) && (*reason_code == 0)) { 7448 *reason_code = 4; 7449 } else { 7450 *reason_code = 5; 7451 } 7452 sctp_clean_up_ctl(stcb, asoc); 7453 return (0); 7454 } 7455 7456 void 7457 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 7458 { 7459 /*- 7460 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 7461 * the control chunk queue. 7462 */ 7463 struct sctp_chunkhdr *hdr; 7464 struct sctp_tmit_chunk *chk; 7465 struct mbuf *mat; 7466 7467 SCTP_TCB_LOCK_ASSERT(stcb); 7468 sctp_alloc_a_chunk(stcb, chk); 7469 if (chk == NULL) { 7470 /* no memory */ 7471 sctp_m_freem(op_err); 7472 return; 7473 } 7474 chk->copy_by_ref = 0; 7475 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT); 7476 if (op_err == NULL) { 7477 sctp_free_a_chunk(stcb, chk); 7478 return; 7479 } 7480 chk->send_size = 0; 7481 mat = op_err; 7482 while (mat != NULL) { 7483 chk->send_size += SCTP_BUF_LEN(mat); 7484 mat = SCTP_BUF_NEXT(mat); 7485 } 7486 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 7487 chk->rec.chunk_id.can_take_data = 1; 7488 chk->sent = SCTP_DATAGRAM_UNSENT; 7489 chk->snd_count = 0; 7490 chk->flags = 0; 7491 chk->asoc = &stcb->asoc; 7492 chk->data = op_err; 7493 chk->whoTo = chk->asoc->primary_destination; 7494 atomic_add_int(&chk->whoTo->ref_count, 1); 7495 hdr = mtod(op_err, struct sctp_chunkhdr *); 7496 hdr->chunk_type = SCTP_OPERATION_ERROR; 7497 hdr->chunk_flags = 0; 7498 hdr->chunk_length = htons(chk->send_size); 7499 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, 7500 chk, 7501 sctp_next); 7502 chk->asoc->ctrl_queue_cnt++; 7503 } 7504 7505 int 7506 sctp_send_cookie_echo(struct mbuf *m, 7507 int offset, 7508 struct sctp_tcb *stcb, 7509 struct sctp_nets *net) 7510 { 7511 /*- 7512 * pull out the cookie and put it at the front of the control chunk 7513 * queue. 7514 */ 7515 int at; 7516 struct mbuf *cookie; 7517 struct sctp_paramhdr parm, *phdr; 7518 struct sctp_chunkhdr *hdr; 7519 struct sctp_tmit_chunk *chk; 7520 uint16_t ptype, plen; 7521 7522 /* First find the cookie in the param area */ 7523 cookie = NULL; 7524 at = offset + sizeof(struct sctp_init_chunk); 7525 7526 SCTP_TCB_LOCK_ASSERT(stcb); 7527 do { 7528 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); 7529 if (phdr == NULL) { 7530 return (-3); 7531 } 7532 ptype = ntohs(phdr->param_type); 7533 plen = ntohs(phdr->param_length); 7534 if (ptype == SCTP_STATE_COOKIE) { 7535 int pad; 7536 7537 /* found the cookie */ 7538 if ((pad = (plen % 4))) { 7539 plen += 4 - pad; 7540 } 7541 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT); 7542 if (cookie == NULL) { 7543 /* No memory */ 7544 return (-2); 7545 } 7546 break; 7547 } 7548 at += SCTP_SIZE32(plen); 7549 } while (phdr); 7550 if (cookie == NULL) { 7551 /* Did not find the cookie */ 7552 return (-3); 7553 } 7554 /* ok, we got the cookie lets change it into a cookie echo chunk */ 7555 7556 /* first the change from param to cookie */ 7557 hdr = mtod(cookie, struct sctp_chunkhdr *); 7558 hdr->chunk_type = SCTP_COOKIE_ECHO; 7559 hdr->chunk_flags = 0; 7560 /* get the chunk stuff now and place it in the FRONT of the queue */ 7561 sctp_alloc_a_chunk(stcb, chk); 7562 if (chk == NULL) { 7563 /* no memory */ 7564 sctp_m_freem(cookie); 7565 return (-5); 7566 } 7567 chk->copy_by_ref = 0; 7568 chk->send_size = plen; 7569 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 7570 chk->rec.chunk_id.can_take_data = 0; 7571 chk->sent = SCTP_DATAGRAM_UNSENT; 7572 chk->snd_count = 0; 7573 chk->flags = 0; 7574 chk->asoc = &stcb->asoc; 7575 chk->data = cookie; 7576 chk->whoTo = chk->asoc->primary_destination; 7577 atomic_add_int(&chk->whoTo->ref_count, 1); 7578 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 7579 chk->asoc->ctrl_queue_cnt++; 7580 return (0); 7581 } 7582 7583 void 7584 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 7585 struct mbuf *m, 7586 int offset, 7587 int chk_length, 7588 struct sctp_nets *net) 7589 { 7590 /* 7591 * take a HB request and make it into a HB ack and send it. 7592 */ 7593 struct mbuf *outchain; 7594 struct sctp_chunkhdr *chdr; 7595 struct sctp_tmit_chunk *chk; 7596 7597 7598 if (net == NULL) 7599 /* must have a net pointer */ 7600 return; 7601 7602 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT); 7603 if (outchain == NULL) { 7604 /* gak out of memory */ 7605 return; 7606 } 7607 chdr = mtod(outchain, struct sctp_chunkhdr *); 7608 chdr->chunk_type = SCTP_HEARTBEAT_ACK; 7609 chdr->chunk_flags = 0; 7610 if (chk_length % 4) { 7611 /* need pad */ 7612 uint32_t cpthis = 0; 7613 int padlen; 7614 7615 padlen = 4 - (chk_length % 4); 7616 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); 7617 } 7618 sctp_alloc_a_chunk(stcb, chk); 7619 if (chk == NULL) { 7620 /* no memory */ 7621 sctp_m_freem(outchain); 7622 return; 7623 } 7624 chk->copy_by_ref = 0; 7625 chk->send_size = chk_length; 7626 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 7627 chk->rec.chunk_id.can_take_data = 1; 7628 chk->sent = SCTP_DATAGRAM_UNSENT; 7629 chk->snd_count = 0; 7630 chk->flags = 0; 7631 chk->asoc = &stcb->asoc; 7632 chk->data = outchain; 7633 chk->whoTo = net; 7634 atomic_add_int(&chk->whoTo->ref_count, 1); 7635 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7636 chk->asoc->ctrl_queue_cnt++; 7637 } 7638 7639 void 7640 sctp_send_cookie_ack(struct sctp_tcb *stcb) 7641 { 7642 /* formulate and queue a cookie-ack back to sender */ 7643 struct mbuf *cookie_ack; 7644 struct sctp_chunkhdr *hdr; 7645 struct sctp_tmit_chunk *chk; 7646 7647 cookie_ack = NULL; 7648 SCTP_TCB_LOCK_ASSERT(stcb); 7649 7650 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER); 7651 if (cookie_ack == NULL) { 7652 /* no mbuf's */ 7653 return; 7654 } 7655 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 7656 sctp_alloc_a_chunk(stcb, chk); 7657 if (chk == NULL) { 7658 /* no memory */ 7659 sctp_m_freem(cookie_ack); 7660 return; 7661 } 7662 chk->copy_by_ref = 0; 7663 chk->send_size = sizeof(struct sctp_chunkhdr); 7664 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 7665 chk->rec.chunk_id.can_take_data = 1; 7666 chk->sent = SCTP_DATAGRAM_UNSENT; 7667 chk->snd_count = 0; 7668 chk->flags = 0; 7669 chk->asoc = &stcb->asoc; 7670 chk->data = cookie_ack; 7671 if (chk->asoc->last_control_chunk_from != NULL) { 7672 chk->whoTo = chk->asoc->last_control_chunk_from; 7673 } else { 7674 chk->whoTo = chk->asoc->primary_destination; 7675 } 7676 atomic_add_int(&chk->whoTo->ref_count, 1); 7677 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 7678 hdr->chunk_type = SCTP_COOKIE_ACK; 7679 hdr->chunk_flags = 0; 7680 hdr->chunk_length = htons(chk->send_size); 7681 SCTP_BUF_LEN(cookie_ack) = chk->send_size; 7682 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7683 chk->asoc->ctrl_queue_cnt++; 7684 return; 7685 } 7686 7687 7688 void 7689 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 7690 { 7691 /* formulate and queue a SHUTDOWN-ACK back to the sender */ 7692 struct mbuf *m_shutdown_ack; 7693 struct sctp_shutdown_ack_chunk *ack_cp; 7694 struct sctp_tmit_chunk *chk; 7695 7696 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 7697 if (m_shutdown_ack == NULL) { 7698 /* no mbuf's */ 7699 return; 7700 } 7701 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 7702 sctp_alloc_a_chunk(stcb, chk); 7703 if (chk == NULL) { 7704 /* no memory */ 7705 sctp_m_freem(m_shutdown_ack); 7706 return; 7707 } 7708 chk->copy_by_ref = 0; 7709 7710 chk->send_size = sizeof(struct sctp_chunkhdr); 7711 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 7712 chk->rec.chunk_id.can_take_data = 1; 7713 chk->sent = SCTP_DATAGRAM_UNSENT; 7714 chk->snd_count = 0; 7715 chk->flags = 0; 7716 chk->asoc = &stcb->asoc; 7717 chk->data = m_shutdown_ack; 7718 chk->whoTo = net; 7719 atomic_add_int(&net->ref_count, 1); 7720 7721 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 7722 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 7723 ack_cp->ch.chunk_flags = 0; 7724 ack_cp->ch.chunk_length = htons(chk->send_size); 7725 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 7726 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7727 chk->asoc->ctrl_queue_cnt++; 7728 return; 7729 } 7730 7731 void 7732 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 7733 { 7734 /* formulate and queue a SHUTDOWN to the sender */ 7735 struct mbuf *m_shutdown; 7736 struct sctp_shutdown_chunk *shutdown_cp; 7737 struct sctp_tmit_chunk *chk; 7738 7739 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 7740 if (m_shutdown == NULL) { 7741 /* no mbuf's */ 7742 return; 7743 } 7744 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 7745 sctp_alloc_a_chunk(stcb, chk); 7746 if (chk == NULL) { 7747 /* no memory */ 7748 sctp_m_freem(m_shutdown); 7749 return; 7750 } 7751 chk->copy_by_ref = 0; 7752 chk->send_size = sizeof(struct sctp_shutdown_chunk); 7753 chk->rec.chunk_id.id = SCTP_SHUTDOWN; 7754 chk->rec.chunk_id.can_take_data = 1; 7755 chk->sent = SCTP_DATAGRAM_UNSENT; 7756 chk->snd_count = 0; 7757 chk->flags = 0; 7758 chk->asoc = &stcb->asoc; 7759 chk->data = m_shutdown; 7760 chk->whoTo = net; 7761 atomic_add_int(&net->ref_count, 1); 7762 7763 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 7764 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 7765 shutdown_cp->ch.chunk_flags = 0; 7766 shutdown_cp->ch.chunk_length = htons(chk->send_size); 7767 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 7768 SCTP_BUF_LEN(m_shutdown) = chk->send_size; 7769 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7770 chk->asoc->ctrl_queue_cnt++; 7771 return; 7772 } 7773 7774 void 7775 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net) 7776 { 7777 /* 7778 * formulate and queue an ASCONF to the peer ASCONF parameters 7779 * should be queued on the assoc queue 7780 */ 7781 struct sctp_tmit_chunk *chk; 7782 struct mbuf *m_asconf; 7783 struct sctp_asconf_chunk *acp; 7784 int len; 7785 7786 7787 SCTP_TCB_LOCK_ASSERT(stcb); 7788 /* compose an ASCONF chunk, maximum length is PMTU */ 7789 m_asconf = sctp_compose_asconf(stcb, &len); 7790 if (m_asconf == NULL) { 7791 return; 7792 } 7793 acp = mtod(m_asconf, struct sctp_asconf_chunk *); 7794 sctp_alloc_a_chunk(stcb, chk); 7795 if (chk == NULL) { 7796 /* no memory */ 7797 sctp_m_freem(m_asconf); 7798 return; 7799 } 7800 chk->copy_by_ref = 0; 7801 chk->data = m_asconf; 7802 chk->send_size = len; 7803 chk->rec.chunk_id.id = SCTP_ASCONF; 7804 chk->rec.chunk_id.can_take_data = 0; 7805 chk->sent = SCTP_DATAGRAM_UNSENT; 7806 chk->snd_count = 0; 7807 chk->flags = 0; 7808 chk->asoc = &stcb->asoc; 7809 chk->whoTo = chk->asoc->primary_destination; 7810 atomic_add_int(&chk->whoTo->ref_count, 1); 7811 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7812 chk->asoc->ctrl_queue_cnt++; 7813 return; 7814 } 7815 7816 void 7817 sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans) 7818 { 7819 /* 7820 * formulate and queue a asconf-ack back to sender the asconf-ack 7821 * must be stored in the tcb 7822 */ 7823 struct sctp_tmit_chunk *chk; 7824 struct mbuf *m_ack, *m; 7825 7826 SCTP_TCB_LOCK_ASSERT(stcb); 7827 /* is there a asconf-ack mbuf chain to send? */ 7828 if (stcb->asoc.last_asconf_ack_sent == NULL) { 7829 return; 7830 } 7831 /* copy the asconf_ack */ 7832 m_ack = SCTP_M_COPYM(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL, M_DONTWAIT); 7833 if (m_ack == NULL) { 7834 /* couldn't copy it */ 7835 7836 return; 7837 } 7838 sctp_alloc_a_chunk(stcb, chk); 7839 if (chk == NULL) { 7840 /* no memory */ 7841 if (m_ack) 7842 sctp_m_freem(m_ack); 7843 return; 7844 } 7845 chk->copy_by_ref = 0; 7846 /* figure out where it goes to */ 7847 if (retrans) { 7848 /* we're doing a retransmission */ 7849 if (stcb->asoc.used_alt_asconfack > 2) { 7850 /* tried alternate nets already, go back */ 7851 chk->whoTo = NULL; 7852 } else { 7853 /* need to try and alternate net */ 7854 chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 7855 stcb->asoc.used_alt_asconfack++; 7856 } 7857 if (chk->whoTo == NULL) { 7858 /* no alternate */ 7859 if (stcb->asoc.last_control_chunk_from == NULL) 7860 chk->whoTo = stcb->asoc.primary_destination; 7861 else 7862 chk->whoTo = stcb->asoc.last_control_chunk_from; 7863 stcb->asoc.used_alt_asconfack = 0; 7864 } 7865 } else { 7866 /* normal case */ 7867 if (stcb->asoc.last_control_chunk_from == NULL) 7868 chk->whoTo = stcb->asoc.primary_destination; 7869 else 7870 chk->whoTo = stcb->asoc.last_control_chunk_from; 7871 stcb->asoc.used_alt_asconfack = 0; 7872 } 7873 chk->data = m_ack; 7874 chk->send_size = 0; 7875 /* Get size */ 7876 m = m_ack; 7877 while (m) { 7878 chk->send_size += SCTP_BUF_LEN(m); 7879 m = SCTP_BUF_NEXT(m); 7880 } 7881 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 7882 chk->rec.chunk_id.can_take_data = 1; 7883 chk->sent = SCTP_DATAGRAM_UNSENT; 7884 chk->snd_count = 0; 7885 chk->flags = 0; 7886 chk->asoc = &stcb->asoc; 7887 atomic_add_int(&chk->whoTo->ref_count, 1); 7888 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 7889 chk->asoc->ctrl_queue_cnt++; 7890 return; 7891 } 7892 7893 7894 static int 7895 sctp_chunk_retransmission(struct sctp_inpcb *inp, 7896 struct sctp_tcb *stcb, 7897 struct sctp_association *asoc, 7898 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done) 7899 { 7900 /*- 7901 * send out one MTU of retransmission. If fast_retransmit is 7902 * happening we ignore the cwnd. Otherwise we obey the cwnd and 7903 * rwnd. For a Cookie or Asconf in the control chunk queue we 7904 * retransmit them by themselves. 7905 * 7906 * For data chunks we will pick out the lowest TSN's in the sent_queue 7907 * marked for resend and bundle them all together (up to a MTU of 7908 * destination). The address to send to should have been 7909 * selected/changed where the retransmission was marked (i.e. in FR 7910 * or t3-timeout routines). 7911 */ 7912 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 7913 struct sctp_tmit_chunk *chk, *fwd; 7914 struct mbuf *m, *endofchain; 7915 struct sctphdr *shdr; 7916 int asconf; 7917 struct sctp_nets *net; 7918 uint32_t tsns_sent = 0; 7919 int no_fragmentflg, bundle_at, cnt_thru; 7920 unsigned int mtu; 7921 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 7922 struct sctp_auth_chunk *auth = NULL; 7923 uint32_t auth_offset = 0; 7924 uint32_t dmtu = 0; 7925 7926 SCTP_TCB_LOCK_ASSERT(stcb); 7927 tmr_started = ctl_cnt = bundle_at = error = 0; 7928 no_fragmentflg = 1; 7929 asconf = 0; 7930 fwd_tsn = 0; 7931 *cnt_out = 0; 7932 fwd = NULL; 7933 endofchain = m = NULL; 7934 #ifdef SCTP_AUDITING_ENABLED 7935 sctp_audit_log(0xC3, 1); 7936 #endif 7937 if ((TAILQ_EMPTY(&asoc->sent_queue)) && 7938 (TAILQ_EMPTY(&asoc->control_send_queue))) { 7939 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n", 7940 asoc->sent_queue_retran_cnt); 7941 asoc->sent_queue_cnt = 0; 7942 asoc->sent_queue_cnt_removeable = 0; 7943 /* send back 0/0 so we enter normal transmission */ 7944 *cnt_out = 0; 7945 return (0); 7946 } 7947 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7948 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 7949 (chk->rec.chunk_id.id == SCTP_ASCONF) || 7950 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 7951 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 7952 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 7953 if (chk != asoc->str_reset) { 7954 /* 7955 * not eligible for retran if its 7956 * not ours 7957 */ 7958 continue; 7959 } 7960 } 7961 ctl_cnt++; 7962 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 7963 no_fragmentflg = 1; 7964 asconf = 1; 7965 } 7966 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 7967 fwd_tsn = 1; 7968 fwd = chk; 7969 } 7970 /* 7971 * Add an AUTH chunk, if chunk requires it save the 7972 * offset into the chain for AUTH 7973 */ 7974 if ((auth == NULL) && 7975 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7976 stcb->asoc.peer_auth_chunks))) { 7977 m = sctp_add_auth_chunk(m, &endofchain, 7978 &auth, &auth_offset, 7979 stcb, 7980 chk->rec.chunk_id.id); 7981 } 7982 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 7983 break; 7984 } 7985 } 7986 one_chunk = 0; 7987 cnt_thru = 0; 7988 /* do we have control chunks to retransmit? */ 7989 if (m != NULL) { 7990 /* Start a timer no matter if we suceed or fail */ 7991 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 7992 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 7993 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) 7994 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 7995 7996 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 7997 if (m == NULL) { 7998 return (ENOBUFS); 7999 } 8000 shdr = mtod(m, struct sctphdr *); 8001 shdr->src_port = inp->sctp_lport; 8002 shdr->dest_port = stcb->rport; 8003 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 8004 shdr->checksum = 0; 8005 auth_offset += sizeof(struct sctphdr); 8006 chk->snd_count++; /* update our count */ 8007 8008 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 8009 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset, 8010 auth, no_fragmentflg, 0, NULL, asconf))) { 8011 SCTP_STAT_INCR(sctps_lowlevelerr); 8012 return (error); 8013 } 8014 m = endofchain = NULL; 8015 auth = NULL; 8016 auth_offset = 0; 8017 /* 8018 * We don't want to mark the net->sent time here since this 8019 * we use this for HB and retrans cannot measure RTT 8020 */ 8021 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 8022 *cnt_out += 1; 8023 chk->sent = SCTP_DATAGRAM_SENT; 8024 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 8025 if (fwd_tsn == 0) { 8026 return (0); 8027 } else { 8028 /* Clean up the fwd-tsn list */ 8029 sctp_clean_up_ctl(stcb, asoc); 8030 return (0); 8031 } 8032 } 8033 /* 8034 * Ok, it is just data retransmission we need to do or that and a 8035 * fwd-tsn with it all. 8036 */ 8037 if (TAILQ_EMPTY(&asoc->sent_queue)) { 8038 return (SCTP_RETRAN_DONE); 8039 } 8040 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || 8041 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { 8042 /* not yet open, resend the cookie and that is it */ 8043 return (1); 8044 } 8045 #ifdef SCTP_AUDITING_ENABLED 8046 sctp_auditing(20, inp, stcb, NULL); 8047 #endif 8048 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 8049 if (chk->sent != SCTP_DATAGRAM_RESEND) { 8050 /* No, not sent to this net or not ready for rtx */ 8051 continue; 8052 } 8053 if ((sctp_max_retran_chunk) && (chk->snd_count >= sctp_max_retran_chunk)) { 8054 /* Gak, we have exceeded max unlucky retran, abort! */ 8055 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n", 8056 chk->snd_count, 8057 sctp_max_retran_chunk); 8058 sctp_send_abort_tcb(stcb, NULL); 8059 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 8060 return (SCTP_RETRAN_EXIT); 8061 } 8062 /* pick up the net */ 8063 net = chk->whoTo; 8064 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 8065 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 8066 } else { 8067 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 8068 } 8069 8070 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 8071 /* No room in peers rwnd */ 8072 uint32_t tsn; 8073 8074 tsn = asoc->last_acked_seq + 1; 8075 if (tsn == chk->rec.data.TSN_seq) { 8076 /* 8077 * we make a special exception for this 8078 * case. The peer has no rwnd but is missing 8079 * the lowest chunk.. which is probably what 8080 * is holding up the rwnd. 8081 */ 8082 goto one_chunk_around; 8083 } 8084 return (1); 8085 } 8086 one_chunk_around: 8087 if (asoc->peers_rwnd < mtu) { 8088 one_chunk = 1; 8089 if ((asoc->peers_rwnd == 0) && 8090 (asoc->total_flight == 0)) { 8091 chk->window_probe = 1; 8092 chk->whoTo->window_probe = 1; 8093 } 8094 } 8095 #ifdef SCTP_AUDITING_ENABLED 8096 sctp_audit_log(0xC3, 2); 8097 #endif 8098 bundle_at = 0; 8099 m = NULL; 8100 net->fast_retran_ip = 0; 8101 if (chk->rec.data.doing_fast_retransmit == 0) { 8102 /* 8103 * if no FR in progress skip destination that have 8104 * flight_size > cwnd. 8105 */ 8106 if (net->flight_size >= net->cwnd) { 8107 continue; 8108 } 8109 } else { 8110 /* 8111 * Mark the destination net to have FR recovery 8112 * limits put on it. 8113 */ 8114 *fr_done = 1; 8115 net->fast_retran_ip = 1; 8116 } 8117 8118 /* 8119 * if no AUTH is yet included and this chunk requires it, 8120 * make sure to account for it. We don't apply the size 8121 * until the AUTH chunk is actually added below in case 8122 * there is no room for this chunk. 8123 */ 8124 if ((auth == NULL) && 8125 sctp_auth_is_required_chunk(SCTP_DATA, 8126 stcb->asoc.peer_auth_chunks)) { 8127 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 8128 } else 8129 dmtu = 0; 8130 8131 if ((chk->send_size <= (mtu - dmtu)) || 8132 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 8133 /* ok we will add this one */ 8134 if ((auth == NULL) && 8135 (sctp_auth_is_required_chunk(SCTP_DATA, 8136 stcb->asoc.peer_auth_chunks))) { 8137 m = sctp_add_auth_chunk(m, &endofchain, 8138 &auth, &auth_offset, 8139 stcb, SCTP_DATA); 8140 } 8141 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 8142 if (m == NULL) { 8143 return (ENOMEM); 8144 } 8145 /* Do clear IP_DF ? */ 8146 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8147 no_fragmentflg = 0; 8148 } 8149 /* upate our MTU size */ 8150 if (mtu > (chk->send_size + dmtu)) 8151 mtu -= (chk->send_size + dmtu); 8152 else 8153 mtu = 0; 8154 data_list[bundle_at++] = chk; 8155 if (one_chunk && (asoc->total_flight <= 0)) { 8156 SCTP_STAT_INCR(sctps_windowprobed); 8157 } 8158 } 8159 if (one_chunk == 0) { 8160 /* 8161 * now are there anymore forward from chk to pick 8162 * up? 8163 */ 8164 fwd = TAILQ_NEXT(chk, sctp_next); 8165 while (fwd) { 8166 if (fwd->sent != SCTP_DATAGRAM_RESEND) { 8167 /* Nope, not for retran */ 8168 fwd = TAILQ_NEXT(fwd, sctp_next); 8169 continue; 8170 } 8171 if (fwd->whoTo != net) { 8172 /* Nope, not the net in question */ 8173 fwd = TAILQ_NEXT(fwd, sctp_next); 8174 continue; 8175 } 8176 if ((auth == NULL) && 8177 sctp_auth_is_required_chunk(SCTP_DATA, 8178 stcb->asoc.peer_auth_chunks)) { 8179 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 8180 } else 8181 dmtu = 0; 8182 if (fwd->send_size <= (mtu - dmtu)) { 8183 if ((auth == NULL) && 8184 (sctp_auth_is_required_chunk(SCTP_DATA, 8185 stcb->asoc.peer_auth_chunks))) { 8186 m = sctp_add_auth_chunk(m, 8187 &endofchain, 8188 &auth, &auth_offset, 8189 stcb, 8190 SCTP_DATA); 8191 } 8192 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 8193 if (m == NULL) { 8194 return (ENOMEM); 8195 } 8196 /* Do clear IP_DF ? */ 8197 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8198 no_fragmentflg = 0; 8199 } 8200 /* upate our MTU size */ 8201 if (mtu > (fwd->send_size + dmtu)) 8202 mtu -= (fwd->send_size + dmtu); 8203 else 8204 mtu = 0; 8205 data_list[bundle_at++] = fwd; 8206 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 8207 break; 8208 } 8209 fwd = TAILQ_NEXT(fwd, sctp_next); 8210 } else { 8211 /* can't fit so we are done */ 8212 break; 8213 } 8214 } 8215 } 8216 /* Is there something to send for this destination? */ 8217 if (m) { 8218 /* 8219 * No matter if we fail/or suceed we should start a 8220 * timer. A failure is like a lost IP packet :-) 8221 */ 8222 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 8223 /* 8224 * no timer running on this destination 8225 * restart it. 8226 */ 8227 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8228 tmr_started = 1; 8229 } 8230 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 8231 if (m == NULL) { 8232 return (ENOBUFS); 8233 } 8234 shdr = mtod(m, struct sctphdr *); 8235 shdr->src_port = inp->sctp_lport; 8236 shdr->dest_port = stcb->rport; 8237 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 8238 shdr->checksum = 0; 8239 auth_offset += sizeof(struct sctphdr); 8240 /* Now lets send it, if there is anything to send :> */ 8241 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 8242 (struct sockaddr *)&net->ro._l_addr, m, auth_offset, 8243 auth, no_fragmentflg, 0, NULL, asconf))) { 8244 /* error, we could not output */ 8245 SCTP_STAT_INCR(sctps_lowlevelerr); 8246 return (error); 8247 } 8248 m = endofchain = NULL; 8249 auth = NULL; 8250 auth_offset = 0; 8251 /* For HB's */ 8252 /* 8253 * We don't want to mark the net->sent time here 8254 * since this we use this for HB and retrans cannot 8255 * measure RTT 8256 */ 8257 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 8258 8259 /* For auto-close */ 8260 cnt_thru++; 8261 if (*now_filled == 0) { 8262 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 8263 *now = asoc->time_last_sent; 8264 *now_filled = 1; 8265 } else { 8266 asoc->time_last_sent = *now; 8267 } 8268 *cnt_out += bundle_at; 8269 #ifdef SCTP_AUDITING_ENABLED 8270 sctp_audit_log(0xC4, bundle_at); 8271 #endif 8272 if (bundle_at) { 8273 tsns_sent = data_list[0]->rec.data.TSN_seq; 8274 } 8275 for (i = 0; i < bundle_at; i++) { 8276 SCTP_STAT_INCR(sctps_sendretransdata); 8277 data_list[i]->sent = SCTP_DATAGRAM_SENT; 8278 /* 8279 * When we have a revoked data, and we 8280 * retransmit it, then we clear the revoked 8281 * flag since this flag dictates if we 8282 * subtracted from the fs 8283 */ 8284 if (data_list[i]->rec.data.chunk_was_revoked) { 8285 /* Deflate the cwnd */ 8286 data_list[i]->whoTo->cwnd -= data_list[i]->book_size; 8287 data_list[i]->rec.data.chunk_was_revoked = 0; 8288 } 8289 data_list[i]->snd_count++; 8290 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 8291 /* record the time */ 8292 data_list[i]->sent_rcv_time = asoc->time_last_sent; 8293 if (data_list[i]->book_size_scale) { 8294 /* 8295 * need to double the book size on 8296 * this one 8297 */ 8298 data_list[i]->book_size_scale = 0; 8299 /* 8300 * Since we double the booksize, we 8301 * must also double the output queue 8302 * size, since this get shrunk when 8303 * we free by this amount. 8304 */ 8305 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); 8306 data_list[i]->book_size *= 2; 8307 8308 8309 } else { 8310 #ifdef SCTP_LOG_RWND 8311 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 8312 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 8313 #endif 8314 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 8315 (uint32_t) (data_list[i]->send_size + 8316 sctp_peer_chunk_oh)); 8317 } 8318 #ifdef SCTP_FLIGHT_LOGGING 8319 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND, 8320 data_list[i]->whoTo->flight_size, 8321 data_list[i]->book_size, 8322 (uintptr_t) data_list[i]->whoTo, 8323 data_list[i]->rec.data.TSN_seq); 8324 #endif 8325 sctp_flight_size_increase(data_list[i]); 8326 sctp_total_flight_increase(stcb, data_list[i]); 8327 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 8328 /* SWS sender side engages */ 8329 asoc->peers_rwnd = 0; 8330 } 8331 if ((i == 0) && 8332 (data_list[i]->rec.data.doing_fast_retransmit)) { 8333 SCTP_STAT_INCR(sctps_sendfastretrans); 8334 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 8335 (tmr_started == 0)) { 8336 /*- 8337 * ok we just fast-retrans'd 8338 * the lowest TSN, i.e the 8339 * first on the list. In 8340 * this case we want to give 8341 * some more time to get a 8342 * SACK back without a 8343 * t3-expiring. 8344 */ 8345 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 8346 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); 8347 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8348 } 8349 } 8350 } 8351 #ifdef SCTP_CWND_LOGGING 8352 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 8353 #endif 8354 #ifdef SCTP_AUDITING_ENABLED 8355 sctp_auditing(21, inp, stcb, NULL); 8356 #endif 8357 } else { 8358 /* None will fit */ 8359 return (1); 8360 } 8361 if (asoc->sent_queue_retran_cnt <= 0) { 8362 /* all done we have no more to retran */ 8363 asoc->sent_queue_retran_cnt = 0; 8364 break; 8365 } 8366 if (one_chunk) { 8367 /* No more room in rwnd */ 8368 return (1); 8369 } 8370 /* stop the for loop here. we sent out a packet */ 8371 break; 8372 } 8373 return (0); 8374 } 8375 8376 8377 static int 8378 sctp_timer_validation(struct sctp_inpcb *inp, 8379 struct sctp_tcb *stcb, 8380 struct sctp_association *asoc, 8381 int ret) 8382 { 8383 struct sctp_nets *net; 8384 8385 /* Validate that a timer is running somewhere */ 8386 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 8387 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 8388 /* Here is a timer */ 8389 return (ret); 8390 } 8391 } 8392 SCTP_TCB_LOCK_ASSERT(stcb); 8393 /* Gak, we did not have a timer somewhere */ 8394 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n"); 8395 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 8396 return (ret); 8397 } 8398 8399 void 8400 sctp_chunk_output(struct sctp_inpcb *inp, 8401 struct sctp_tcb *stcb, 8402 int from_where) 8403 { 8404 /*- 8405 * Ok this is the generic chunk service queue. we must do the 8406 * following: 8407 * - See if there are retransmits pending, if so we must 8408 * do these first. 8409 * - Service the stream queue that is next, moving any 8410 * message (note I must get a complete message i.e. 8411 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 8412 * TSN's 8413 * - Check to see if the cwnd/rwnd allows any output, if so we 8414 * go ahead and fomulate and send the low level chunks. Making sure 8415 * to combine any control in the control chunk queue also. 8416 */ 8417 struct sctp_association *asoc; 8418 struct sctp_nets *net; 8419 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0, 8420 burst_cnt = 0, burst_limit = 0; 8421 struct timeval now; 8422 int now_filled = 0; 8423 int cwnd_full = 0; 8424 int nagle_on = 0; 8425 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 8426 int un_sent = 0; 8427 int fr_done, tot_frs = 0; 8428 8429 asoc = &stcb->asoc; 8430 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 8431 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 8432 nagle_on = 0; 8433 } else { 8434 nagle_on = 1; 8435 } 8436 } 8437 SCTP_TCB_LOCK_ASSERT(stcb); 8438 8439 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 8440 8441 if ((un_sent <= 0) && 8442 (TAILQ_EMPTY(&asoc->control_send_queue)) && 8443 (asoc->sent_queue_retran_cnt == 0)) { 8444 /* Nothing to do unless there is something to be sent left */ 8445 return; 8446 } 8447 /* 8448 * Do we have something to send, data or control AND a sack timer 8449 * running, if so piggy-back the sack. 8450 */ 8451 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 8452 sctp_send_sack(stcb); 8453 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 8454 } 8455 while (asoc->sent_queue_retran_cnt) { 8456 /*- 8457 * Ok, it is retransmission time only, we send out only ONE 8458 * packet with a single call off to the retran code. 8459 */ 8460 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 8461 /*- 8462 * Special hook for handling cookiess discarded 8463 * by peer that carried data. Send cookie-ack only 8464 * and then the next call with get the retran's. 8465 */ 8466 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 8467 &cwnd_full, from_where, 8468 &now, &now_filled, frag_point); 8469 return; 8470 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 8471 /* if its not from a HB then do it */ 8472 fr_done = 0; 8473 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done); 8474 if (fr_done) { 8475 tot_frs++; 8476 } 8477 } else { 8478 /* 8479 * its from any other place, we don't allow retran 8480 * output (only control) 8481 */ 8482 ret = 1; 8483 } 8484 if (ret > 0) { 8485 /* Can't send anymore */ 8486 /*- 8487 * now lets push out control by calling med-level 8488 * output once. this assures that we WILL send HB's 8489 * if queued too. 8490 */ 8491 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 8492 &cwnd_full, from_where, 8493 &now, &now_filled, frag_point); 8494 #ifdef SCTP_AUDITING_ENABLED 8495 sctp_auditing(8, inp, stcb, NULL); 8496 #endif 8497 (void)sctp_timer_validation(inp, stcb, asoc, ret); 8498 return; 8499 } 8500 if (ret < 0) { 8501 /*- 8502 * The count was off.. retran is not happening so do 8503 * the normal retransmission. 8504 */ 8505 #ifdef SCTP_AUDITING_ENABLED 8506 sctp_auditing(9, inp, stcb, NULL); 8507 #endif 8508 if (ret == SCTP_RETRAN_EXIT) { 8509 return; 8510 } 8511 break; 8512 } 8513 if (from_where == SCTP_OUTPUT_FROM_T3) { 8514 /* Only one transmission allowed out of a timeout */ 8515 #ifdef SCTP_AUDITING_ENABLED 8516 sctp_auditing(10, inp, stcb, NULL); 8517 #endif 8518 /* Push out any control */ 8519 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where, 8520 &now, &now_filled, frag_point); 8521 return; 8522 } 8523 if (tot_frs > asoc->max_burst) { 8524 /* Hit FR burst limit */ 8525 return; 8526 } 8527 if ((num_out == 0) && (ret == 0)) { 8528 8529 /* No more retrans to send */ 8530 break; 8531 } 8532 } 8533 #ifdef SCTP_AUDITING_ENABLED 8534 sctp_auditing(12, inp, stcb, NULL); 8535 #endif 8536 /* Check for bad destinations, if they exist move chunks around. */ 8537 burst_limit = asoc->max_burst; 8538 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 8539 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 8540 SCTP_ADDR_NOT_REACHABLE) { 8541 /*- 8542 * if possible move things off of this address we 8543 * still may send below due to the dormant state but 8544 * we try to find an alternate address to send to 8545 * and if we have one we move all queued data on the 8546 * out wheel to this alternate address. 8547 */ 8548 if (net->ref_count > 1) 8549 sctp_move_to_an_alt(stcb, asoc, net); 8550 } else { 8551 /*- 8552 * if ((asoc->sat_network) || (net->addr_is_local)) 8553 * { burst_limit = asoc->max_burst * 8554 * SCTP_SAT_NETWORK_BURST_INCR; } 8555 */ 8556 if (sctp_use_cwnd_based_maxburst) { 8557 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) { 8558 int old_cwnd; 8559 8560 if (net->ssthresh < net->cwnd) 8561 net->ssthresh = net->cwnd; 8562 old_cwnd = net->cwnd; 8563 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 8564 8565 #ifdef SCTP_CWND_MONITOR 8566 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 8567 #endif 8568 8569 #ifdef SCTP_LOG_MAXBURST 8570 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED); 8571 #endif 8572 SCTP_STAT_INCR(sctps_maxburstqueued); 8573 } 8574 net->fast_retran_ip = 0; 8575 } else { 8576 if (net->flight_size == 0) { 8577 /* Should be decaying the cwnd here */ 8578 ; 8579 } 8580 } 8581 } 8582 8583 } 8584 burst_cnt = 0; 8585 cwnd_full = 0; 8586 do { 8587 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 8588 &reason_code, 0, &cwnd_full, from_where, 8589 &now, &now_filled, frag_point); 8590 if (error) { 8591 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); 8592 #ifdef SCTP_LOG_MAXBURST 8593 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 8594 #endif 8595 #ifdef SCTP_CWND_LOGGING 8596 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 8597 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 8598 #endif 8599 8600 break; 8601 } 8602 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out); 8603 8604 tot_out += num_out; 8605 burst_cnt++; 8606 #ifdef SCTP_CWND_LOGGING 8607 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 8608 if (num_out == 0) { 8609 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 8610 } 8611 #endif 8612 if (nagle_on) { 8613 /*- 8614 * When nagle is on, we look at how much is un_sent, then 8615 * if its smaller than an MTU and we have data in 8616 * flight we stop. 8617 */ 8618 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 8619 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) 8620 * sizeof(struct sctp_data_chunk))); 8621 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 8622 (stcb->asoc.total_flight > 0)) { 8623 break; 8624 } 8625 } 8626 if (TAILQ_EMPTY(&asoc->control_send_queue) && 8627 TAILQ_EMPTY(&asoc->send_queue) && 8628 TAILQ_EMPTY(&asoc->out_wheel)) { 8629 /* Nothing left to send */ 8630 break; 8631 } 8632 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 8633 /* Nothing left to send */ 8634 break; 8635 } 8636 } while (num_out && (sctp_use_cwnd_based_maxburst || 8637 (burst_cnt < burst_limit))); 8638 8639 if (sctp_use_cwnd_based_maxburst == 0) { 8640 if (burst_cnt >= burst_limit) { 8641 SCTP_STAT_INCR(sctps_maxburstqueued); 8642 asoc->burst_limit_applied = 1; 8643 #ifdef SCTP_LOG_MAXBURST 8644 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 8645 #endif 8646 } else { 8647 asoc->burst_limit_applied = 0; 8648 } 8649 } 8650 #ifdef SCTP_CWND_LOGGING 8651 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 8652 #endif 8653 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n", 8654 tot_out); 8655 8656 /*- 8657 * Now we need to clean up the control chunk chain if a ECNE is on 8658 * it. It must be marked as UNSENT again so next call will continue 8659 * to send it until such time that we get a CWR, to remove it. 8660 */ 8661 if (stcb->asoc.ecn_echo_cnt_onq) 8662 sctp_fix_ecn_echo(asoc); 8663 return; 8664 } 8665 8666 8667 int 8668 sctp_output(inp, m, addr, control, p, flags) 8669 struct sctp_inpcb *inp; 8670 struct mbuf *m; 8671 struct sockaddr *addr; 8672 struct mbuf *control; 8673 struct thread *p; 8674 int flags; 8675 { 8676 if (inp == NULL) { 8677 return (EINVAL); 8678 } 8679 if (inp->sctp_socket == NULL) { 8680 return (EINVAL); 8681 } 8682 return (sctp_sosend(inp->sctp_socket, 8683 addr, 8684 (struct uio *)NULL, 8685 m, 8686 control, 8687 flags, p 8688 )); 8689 } 8690 8691 void 8692 send_forward_tsn(struct sctp_tcb *stcb, 8693 struct sctp_association *asoc) 8694 { 8695 struct sctp_tmit_chunk *chk; 8696 struct sctp_forward_tsn_chunk *fwdtsn; 8697 8698 SCTP_TCB_LOCK_ASSERT(stcb); 8699 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8700 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 8701 /* mark it to unsent */ 8702 chk->sent = SCTP_DATAGRAM_UNSENT; 8703 chk->snd_count = 0; 8704 /* Do we correct its output location? */ 8705 if (chk->whoTo != asoc->primary_destination) { 8706 sctp_free_remote_addr(chk->whoTo); 8707 chk->whoTo = asoc->primary_destination; 8708 atomic_add_int(&chk->whoTo->ref_count, 1); 8709 } 8710 goto sctp_fill_in_rest; 8711 } 8712 } 8713 /* Ok if we reach here we must build one */ 8714 sctp_alloc_a_chunk(stcb, chk); 8715 if (chk == NULL) { 8716 return; 8717 } 8718 chk->copy_by_ref = 0; 8719 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 8720 chk->rec.chunk_id.can_take_data = 0; 8721 chk->asoc = asoc; 8722 chk->whoTo = NULL; 8723 8724 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 8725 if (chk->data == NULL) { 8726 sctp_free_a_chunk(stcb, chk); 8727 return; 8728 } 8729 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8730 chk->sent = SCTP_DATAGRAM_UNSENT; 8731 chk->snd_count = 0; 8732 chk->whoTo = asoc->primary_destination; 8733 atomic_add_int(&chk->whoTo->ref_count, 1); 8734 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 8735 asoc->ctrl_queue_cnt++; 8736 sctp_fill_in_rest: 8737 /*- 8738 * Here we go through and fill out the part that deals with 8739 * stream/seq of the ones we skip. 8740 */ 8741 SCTP_BUF_LEN(chk->data) = 0; 8742 { 8743 struct sctp_tmit_chunk *at, *tp1, *last; 8744 struct sctp_strseq *strseq; 8745 unsigned int cnt_of_space, i, ovh; 8746 unsigned int space_needed; 8747 unsigned int cnt_of_skipped = 0; 8748 8749 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 8750 if (at->sent != SCTP_FORWARD_TSN_SKIP) { 8751 /* no more to look at */ 8752 break; 8753 } 8754 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 8755 /* We don't report these */ 8756 continue; 8757 } 8758 cnt_of_skipped++; 8759 } 8760 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 8761 (cnt_of_skipped * sizeof(struct sctp_strseq))); 8762 8763 cnt_of_space = M_TRAILINGSPACE(chk->data); 8764 8765 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 8766 ovh = SCTP_MIN_OVERHEAD; 8767 } else { 8768 ovh = SCTP_MIN_V4_OVERHEAD; 8769 } 8770 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 8771 /* trim to a mtu size */ 8772 cnt_of_space = asoc->smallest_mtu - ovh; 8773 } 8774 if (cnt_of_space < space_needed) { 8775 /*- 8776 * ok we must trim down the chunk by lowering the 8777 * advance peer ack point. 8778 */ 8779 cnt_of_skipped = (cnt_of_space - 8780 ((sizeof(struct sctp_forward_tsn_chunk)) / 8781 sizeof(struct sctp_strseq))); 8782 /*- 8783 * Go through and find the TSN that will be the one 8784 * we report. 8785 */ 8786 at = TAILQ_FIRST(&asoc->sent_queue); 8787 for (i = 0; i < cnt_of_skipped; i++) { 8788 tp1 = TAILQ_NEXT(at, sctp_next); 8789 at = tp1; 8790 } 8791 last = at; 8792 /*- 8793 * last now points to last one I can report, update 8794 * peer ack point 8795 */ 8796 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq; 8797 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq)); 8798 } 8799 chk->send_size = space_needed; 8800 /* Setup the chunk */ 8801 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 8802 fwdtsn->ch.chunk_length = htons(chk->send_size); 8803 fwdtsn->ch.chunk_flags = 0; 8804 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 8805 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point); 8806 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) + 8807 (cnt_of_skipped * sizeof(struct sctp_strseq))); 8808 SCTP_BUF_LEN(chk->data) = chk->send_size; 8809 fwdtsn++; 8810 /*- 8811 * Move pointer to after the fwdtsn and transfer to the 8812 * strseq pointer. 8813 */ 8814 strseq = (struct sctp_strseq *)fwdtsn; 8815 /*- 8816 * Now populate the strseq list. This is done blindly 8817 * without pulling out duplicate stream info. This is 8818 * inefficent but won't harm the process since the peer will 8819 * look at these in sequence and will thus release anything. 8820 * It could mean we exceed the PMTU and chop off some that 8821 * we could have included.. but this is unlikely (aka 1432/4 8822 * would mean 300+ stream seq's would have to be reported in 8823 * one FWD-TSN. With a bit of work we can later FIX this to 8824 * optimize and pull out duplcates.. but it does add more 8825 * overhead. So for now... not! 8826 */ 8827 at = TAILQ_FIRST(&asoc->sent_queue); 8828 for (i = 0; i < cnt_of_skipped; i++) { 8829 tp1 = TAILQ_NEXT(at, sctp_next); 8830 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 8831 /* We don't report these */ 8832 i--; 8833 at = tp1; 8834 continue; 8835 } 8836 strseq->stream = ntohs(at->rec.data.stream_number); 8837 strseq->sequence = ntohs(at->rec.data.stream_seq); 8838 strseq++; 8839 at = tp1; 8840 } 8841 } 8842 return; 8843 8844 } 8845 8846 void 8847 sctp_send_sack(struct sctp_tcb *stcb) 8848 { 8849 /*- 8850 * Queue up a SACK in the control queue. We must first check to see 8851 * if a SACK is somehow on the control queue. If so, we will take 8852 * and and remove the old one. 8853 */ 8854 struct sctp_association *asoc; 8855 struct sctp_tmit_chunk *chk, *a_chk; 8856 struct sctp_sack_chunk *sack; 8857 struct sctp_gap_ack_block *gap_descriptor; 8858 struct sack_track *selector; 8859 int mergeable = 0; 8860 int offset; 8861 caddr_t limit; 8862 uint32_t *dup; 8863 int limit_reached = 0; 8864 unsigned int i, jstart, siz, j; 8865 unsigned int num_gap_blocks = 0, space; 8866 int num_dups = 0; 8867 int space_req; 8868 8869 8870 a_chk = NULL; 8871 asoc = &stcb->asoc; 8872 SCTP_TCB_LOCK_ASSERT(stcb); 8873 if (asoc->last_data_chunk_from == NULL) { 8874 /* Hmm we never received anything */ 8875 return; 8876 } 8877 sctp_set_rwnd(stcb, asoc); 8878 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8879 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 8880 /* Hmm, found a sack already on queue, remove it */ 8881 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 8882 asoc->ctrl_queue_cnt++; 8883 a_chk = chk; 8884 if (a_chk->data) { 8885 sctp_m_freem(a_chk->data); 8886 a_chk->data = NULL; 8887 } 8888 sctp_free_remote_addr(a_chk->whoTo); 8889 a_chk->whoTo = NULL; 8890 break; 8891 } 8892 } 8893 if (a_chk == NULL) { 8894 sctp_alloc_a_chunk(stcb, a_chk); 8895 if (a_chk == NULL) { 8896 /* No memory so we drop the idea, and set a timer */ 8897 if (stcb->asoc.delayed_ack) { 8898 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 8899 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 8900 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 8901 stcb->sctp_ep, stcb, NULL); 8902 } else { 8903 stcb->asoc.send_sack = 1; 8904 } 8905 return; 8906 } 8907 a_chk->copy_by_ref = 0; 8908 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */ 8909 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; 8910 a_chk->rec.chunk_id.can_take_data = 1; 8911 } 8912 /* Clear our pkt counts */ 8913 asoc->data_pkts_seen = 0; 8914 8915 a_chk->asoc = asoc; 8916 a_chk->snd_count = 0; 8917 a_chk->send_size = 0; /* fill in later */ 8918 a_chk->sent = SCTP_DATAGRAM_UNSENT; 8919 a_chk->whoTo = NULL; 8920 8921 if ((asoc->numduptsns) || 8922 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE) 8923 ) { 8924 /*- 8925 * Ok, we have some duplicates or the destination for the 8926 * sack is unreachable, lets see if we can select an 8927 * alternate than asoc->last_data_chunk_from 8928 */ 8929 if ((!(asoc->last_data_chunk_from->dest_state & 8930 SCTP_ADDR_NOT_REACHABLE)) && 8931 (asoc->used_alt_onsack > asoc->numnets)) { 8932 /* We used an alt last time, don't this time */ 8933 a_chk->whoTo = NULL; 8934 } else { 8935 asoc->used_alt_onsack++; 8936 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 8937 } 8938 if (a_chk->whoTo == NULL) { 8939 /* Nope, no alternate */ 8940 a_chk->whoTo = asoc->last_data_chunk_from; 8941 asoc->used_alt_onsack = 0; 8942 } 8943 } else { 8944 /* 8945 * No duplicates so we use the last place we received data 8946 * from. 8947 */ 8948 asoc->used_alt_onsack = 0; 8949 a_chk->whoTo = asoc->last_data_chunk_from; 8950 } 8951 if (a_chk->whoTo) { 8952 atomic_add_int(&a_chk->whoTo->ref_count, 1); 8953 } 8954 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) { 8955 /* no gaps */ 8956 space_req = sizeof(struct sctp_sack_chunk); 8957 } else { 8958 /* gaps get a cluster */ 8959 space_req = MCLBYTES; 8960 } 8961 /* Ok now lets formulate a MBUF with our sack */ 8962 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA); 8963 if ((a_chk->data == NULL) || 8964 (a_chk->whoTo == NULL)) { 8965 /* rats, no mbuf memory */ 8966 if (a_chk->data) { 8967 /* was a problem with the destination */ 8968 sctp_m_freem(a_chk->data); 8969 a_chk->data = NULL; 8970 } 8971 if (a_chk->whoTo) 8972 atomic_subtract_int(&a_chk->whoTo->ref_count, 1); 8973 sctp_free_a_chunk(stcb, a_chk); 8974 if (stcb->asoc.delayed_ack) { 8975 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 8976 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 8977 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 8978 stcb->sctp_ep, stcb, NULL); 8979 } else { 8980 stcb->asoc.send_sack = 1; 8981 } 8982 return; 8983 } 8984 /* ok, lets go through and fill it in */ 8985 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 8986 space = M_TRAILINGSPACE(a_chk->data); 8987 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 8988 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 8989 } 8990 limit = mtod(a_chk->data, caddr_t); 8991 limit += space; 8992 8993 sack = mtod(a_chk->data, struct sctp_sack_chunk *); 8994 sack->ch.chunk_type = SCTP_SELECTIVE_ACK; 8995 /* 0x01 is used by nonce for ecn */ 8996 if ((sctp_ecn_enable) && 8997 (sctp_ecn_nonce) && 8998 (asoc->peer_supports_ecn_nonce)) 8999 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM); 9000 else 9001 sack->ch.chunk_flags = 0; 9002 9003 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 9004 /*- 9005 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 9006 * received, then set high bit to 1, else 0. Reset 9007 * pkts_rcvd. 9008 */ 9009 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6); 9010 asoc->cmt_dac_pkts_rcvd = 0; 9011 } 9012 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 9013 sack->sack.a_rwnd = htonl(asoc->my_rwnd); 9014 asoc->my_last_reported_rwnd = asoc->my_rwnd; 9015 9016 /* reset the readers interpretation */ 9017 stcb->freed_by_sorcv_sincelast = 0; 9018 9019 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 9020 9021 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 9022 if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) { 9023 offset = 1; 9024 /*- 9025 * cum-ack behind the mapping array, so we start and use all 9026 * entries. 9027 */ 9028 jstart = 0; 9029 } else { 9030 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 9031 /*- 9032 * we skip the first one when the cum-ack is at or above the 9033 * mapping array base. Note this only works if 9034 */ 9035 jstart = 1; 9036 } 9037 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) { 9038 /* we have a gap .. maybe */ 9039 for (i = 0; i < siz; i++) { 9040 selector = &sack_array[asoc->mapping_array[i]]; 9041 if (mergeable && selector->right_edge) { 9042 /* 9043 * Backup, left and right edges were ok to 9044 * merge. 9045 */ 9046 num_gap_blocks--; 9047 gap_descriptor--; 9048 } 9049 if (selector->num_entries == 0) 9050 mergeable = 0; 9051 else { 9052 for (j = jstart; j < selector->num_entries; j++) { 9053 if (mergeable && selector->right_edge) { 9054 /* 9055 * do a merge by NOT setting 9056 * the left side 9057 */ 9058 mergeable = 0; 9059 } else { 9060 /* 9061 * no merge, set the left 9062 * side 9063 */ 9064 mergeable = 0; 9065 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 9066 } 9067 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 9068 num_gap_blocks++; 9069 gap_descriptor++; 9070 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 9071 /* no more room */ 9072 limit_reached = 1; 9073 break; 9074 } 9075 } 9076 if (selector->left_edge) { 9077 mergeable = 1; 9078 } 9079 } 9080 if (limit_reached) { 9081 /* Reached the limit stop */ 9082 break; 9083 } 9084 jstart = 0; 9085 offset += 8; 9086 } 9087 if (num_gap_blocks == 0) { 9088 /* reneged all chunks */ 9089 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 9090 } 9091 } 9092 /* now we must add any dups we are going to report. */ 9093 if ((limit_reached == 0) && (asoc->numduptsns)) { 9094 dup = (uint32_t *) gap_descriptor; 9095 for (i = 0; i < asoc->numduptsns; i++) { 9096 *dup = htonl(asoc->dup_tsns[i]); 9097 dup++; 9098 num_dups++; 9099 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 9100 /* no more room */ 9101 break; 9102 } 9103 } 9104 asoc->numduptsns = 0; 9105 } 9106 /* 9107 * now that the chunk is prepared queue it to the control chunk 9108 * queue. 9109 */ 9110 a_chk->send_size = (sizeof(struct sctp_sack_chunk) + 9111 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) + 9112 (num_dups * sizeof(int32_t))); 9113 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 9114 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 9115 sack->sack.num_dup_tsns = htons(num_dups); 9116 sack->ch.chunk_length = htons(a_chk->send_size); 9117 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 9118 asoc->ctrl_queue_cnt++; 9119 asoc->send_sack = 0; 9120 SCTP_STAT_INCR(sctps_sendsacks); 9121 return; 9122 } 9123 9124 9125 void 9126 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr) 9127 { 9128 struct mbuf *m_abort; 9129 struct mbuf *m_out = NULL, *m_end = NULL; 9130 struct sctp_abort_chunk *abort = NULL; 9131 int sz; 9132 uint32_t auth_offset = 0; 9133 struct sctp_auth_chunk *auth = NULL; 9134 struct sctphdr *shdr; 9135 9136 /*- 9137 * Add an AUTH chunk, if chunk requires it and save the offset into 9138 * the chain for AUTH 9139 */ 9140 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 9141 stcb->asoc.peer_auth_chunks)) { 9142 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset, 9143 stcb, SCTP_ABORT_ASSOCIATION); 9144 } 9145 SCTP_TCB_LOCK_ASSERT(stcb); 9146 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 9147 if (m_abort == NULL) { 9148 /* no mbuf's */ 9149 if (m_out) 9150 sctp_m_freem(m_out); 9151 return; 9152 } 9153 /* link in any error */ 9154 SCTP_BUF_NEXT(m_abort) = operr; 9155 sz = 0; 9156 if (operr) { 9157 struct mbuf *n; 9158 9159 n = operr; 9160 while (n) { 9161 sz += SCTP_BUF_LEN(n); 9162 n = SCTP_BUF_NEXT(n); 9163 } 9164 } 9165 SCTP_BUF_LEN(m_abort) = sizeof(*abort); 9166 if (m_out == NULL) { 9167 /* NO Auth chunk prepended, so reserve space in front */ 9168 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 9169 m_out = m_abort; 9170 } else { 9171 /* Put AUTH chunk at the front of the chain */ 9172 SCTP_BUF_NEXT(m_end) = m_abort; 9173 } 9174 9175 /* fill in the ABORT chunk */ 9176 abort = mtod(m_abort, struct sctp_abort_chunk *); 9177 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 9178 abort->ch.chunk_flags = 0; 9179 abort->ch.chunk_length = htons(sizeof(*abort) + sz); 9180 9181 /* prepend and fill in the SCTP header */ 9182 SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT); 9183 if (m_out == NULL) { 9184 /* TSNH: no memory */ 9185 return; 9186 } 9187 shdr = mtod(m_out, struct sctphdr *); 9188 shdr->src_port = stcb->sctp_ep->sctp_lport; 9189 shdr->dest_port = stcb->rport; 9190 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 9191 shdr->checksum = 0; 9192 auth_offset += sizeof(struct sctphdr); 9193 9194 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, 9195 stcb->asoc.primary_destination, 9196 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr, 9197 m_out, auth_offset, auth, 1, 0, NULL, 0); 9198 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9199 } 9200 9201 void 9202 sctp_send_shutdown_complete(struct sctp_tcb *stcb, 9203 struct sctp_nets *net) 9204 { 9205 /* formulate and SEND a SHUTDOWN-COMPLETE */ 9206 struct mbuf *m_shutdown_comp; 9207 struct sctp_shutdown_complete_msg *comp_cp; 9208 9209 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 0, M_DONTWAIT, 1, MT_HEADER); 9210 if (m_shutdown_comp == NULL) { 9211 /* no mbuf's */ 9212 return; 9213 } 9214 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *); 9215 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 9216 comp_cp->shut_cmp.ch.chunk_flags = 0; 9217 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 9218 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport; 9219 comp_cp->sh.dest_port = stcb->rport; 9220 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag); 9221 comp_cp->sh.checksum = 0; 9222 9223 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg); 9224 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 9225 (struct sockaddr *)&net->ro._l_addr, 9226 m_shutdown_comp, 0, NULL, 1, 0, NULL, 0); 9227 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9228 return; 9229 } 9230 9231 void 9232 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh, 9233 uint32_t vrf_id, uint32_t table_id) 9234 { 9235 /* formulate and SEND a SHUTDOWN-COMPLETE */ 9236 struct mbuf *o_pak; 9237 struct mbuf *mout; 9238 struct ip *iph, *iph_out; 9239 struct ip6_hdr *ip6, *ip6_out; 9240 int offset_out, len, mlen; 9241 struct sctp_shutdown_complete_msg *comp_cp; 9242 9243 /* Get room for the largest message */ 9244 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg)); 9245 mout = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA); 9246 if (mout == NULL) { 9247 return; 9248 } 9249 SCTP_BUF_LEN(mout) = len; 9250 iph = mtod(m, struct ip *); 9251 iph_out = NULL; 9252 ip6_out = NULL; 9253 offset_out = 0; 9254 if (iph->ip_v == IPVERSION) { 9255 SCTP_BUF_LEN(mout) = sizeof(struct ip) + 9256 sizeof(struct sctp_shutdown_complete_msg); 9257 SCTP_BUF_NEXT(mout) = NULL; 9258 iph_out = mtod(mout, struct ip *); 9259 9260 /* Fill in the IP header for the ABORT */ 9261 iph_out->ip_v = IPVERSION; 9262 iph_out->ip_hl = (sizeof(struct ip) / 4); 9263 iph_out->ip_tos = (u_char)0; 9264 iph_out->ip_id = 0; 9265 iph_out->ip_off = 0; 9266 iph_out->ip_ttl = MAXTTL; 9267 iph_out->ip_p = IPPROTO_SCTP; 9268 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 9269 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 9270 9271 /* let IP layer calculate this */ 9272 iph_out->ip_sum = 0; 9273 offset_out += sizeof(*iph_out); 9274 comp_cp = (struct sctp_shutdown_complete_msg *)( 9275 (caddr_t)iph_out + offset_out); 9276 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 9277 ip6 = (struct ip6_hdr *)iph; 9278 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr) + 9279 sizeof(struct sctp_shutdown_complete_msg); 9280 SCTP_BUF_NEXT(mout) = NULL; 9281 ip6_out = mtod(mout, struct ip6_hdr *); 9282 9283 /* Fill in the IPv6 header for the ABORT */ 9284 ip6_out->ip6_flow = ip6->ip6_flow; 9285 ip6_out->ip6_hlim = ip6_defhlim; 9286 ip6_out->ip6_nxt = IPPROTO_SCTP; 9287 ip6_out->ip6_src = ip6->ip6_dst; 9288 ip6_out->ip6_dst = ip6->ip6_src; 9289 /* 9290 * ?? The old code had both the iph len + payload, I think 9291 * this is wrong and would never have worked 9292 */ 9293 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg); 9294 offset_out += sizeof(*ip6_out); 9295 comp_cp = (struct sctp_shutdown_complete_msg *)( 9296 (caddr_t)ip6_out + offset_out); 9297 } else { 9298 /* Currently not supported. */ 9299 return; 9300 } 9301 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 9302 /* no mbuf's */ 9303 sctp_m_freem(mout); 9304 return; 9305 } 9306 /* Now copy in and fill in the ABORT tags etc. */ 9307 comp_cp->sh.src_port = sh->dest_port; 9308 comp_cp->sh.dest_port = sh->src_port; 9309 comp_cp->sh.checksum = 0; 9310 comp_cp->sh.v_tag = sh->v_tag; 9311 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB; 9312 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 9313 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 9314 9315 /* add checksum */ 9316 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(mout)) { 9317 comp_cp->sh.checksum = 0; 9318 } else { 9319 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out); 9320 } 9321 if (iph_out != NULL) { 9322 sctp_route_t ro; 9323 int ret; 9324 struct sctp_tcb *stcb = NULL; 9325 9326 mlen = SCTP_BUF_LEN(mout); 9327 bzero(&ro, sizeof ro); 9328 /* set IPv4 length */ 9329 iph_out->ip_len = mlen; 9330 SCTP_ATTACH_CHAIN(o_pak, mout, mlen); 9331 9332 /* out it goes */ 9333 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id, table_id); 9334 9335 /* Free the route if we got one back */ 9336 if (ro.ro_rt) 9337 RTFREE(ro.ro_rt); 9338 } else if (ip6_out != NULL) { 9339 struct route_in6 ro; 9340 int ret; 9341 struct sctp_tcb *stcb = NULL; 9342 struct ifnet *ifp = NULL; 9343 9344 bzero(&ro, sizeof(ro)); 9345 mlen = SCTP_BUF_LEN(mout); 9346 SCTP_ATTACH_CHAIN(o_pak, mout, mlen); 9347 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id, table_id); 9348 9349 /* Free the route if we got one back */ 9350 if (ro.ro_rt) 9351 RTFREE(ro.ro_rt); 9352 } 9353 SCTP_STAT_INCR(sctps_sendpackets); 9354 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 9355 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9356 return; 9357 9358 } 9359 9360 static struct sctp_nets * 9361 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now) 9362 { 9363 struct sctp_nets *net, *hnet; 9364 int ms_goneby, highest_ms, state_overide = 0; 9365 9366 (void)SCTP_GETTIME_TIMEVAL(now); 9367 highest_ms = 0; 9368 hnet = NULL; 9369 SCTP_TCB_LOCK_ASSERT(stcb); 9370 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 9371 if ( 9372 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) || 9373 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE) 9374 ) { 9375 /* 9376 * Skip this guy from consideration if HB is off AND 9377 * its confirmed 9378 */ 9379 continue; 9380 } 9381 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) { 9382 /* skip this dest net from consideration */ 9383 continue; 9384 } 9385 if (net->last_sent_time.tv_sec) { 9386 /* Sent to so we subtract */ 9387 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000; 9388 } else 9389 /* Never been sent to */ 9390 ms_goneby = 0x7fffffff; 9391 /*- 9392 * When the address state is unconfirmed but still 9393 * considered reachable, we HB at a higher rate. Once it 9394 * goes confirmed OR reaches the "unreachable" state, thenw 9395 * we cut it back to HB at a more normal pace. 9396 */ 9397 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) { 9398 state_overide = 1; 9399 } else { 9400 state_overide = 0; 9401 } 9402 9403 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) && 9404 (ms_goneby > highest_ms)) { 9405 highest_ms = ms_goneby; 9406 hnet = net; 9407 } 9408 } 9409 if (hnet && 9410 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) { 9411 state_overide = 1; 9412 } else { 9413 state_overide = 0; 9414 } 9415 9416 if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) { 9417 /*- 9418 * Found the one with longest delay bounds OR it is 9419 * unconfirmed and still not marked unreachable. 9420 */ 9421 SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet); 9422 #ifdef SCTP_DEBUG 9423 if (hnet) { 9424 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4, 9425 (struct sockaddr *)&hnet->ro._l_addr); 9426 } else { 9427 SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n"); 9428 } 9429 #endif 9430 /* update the timer now */ 9431 hnet->last_sent_time = *now; 9432 return (hnet); 9433 } 9434 /* Nothing to HB */ 9435 return (NULL); 9436 } 9437 9438 int 9439 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net) 9440 { 9441 struct sctp_tmit_chunk *chk; 9442 struct sctp_nets *net; 9443 struct sctp_heartbeat_chunk *hb; 9444 struct timeval now; 9445 struct sockaddr_in *sin; 9446 struct sockaddr_in6 *sin6; 9447 9448 SCTP_TCB_LOCK_ASSERT(stcb); 9449 if (user_req == 0) { 9450 net = sctp_select_hb_destination(stcb, &now); 9451 if (net == NULL) { 9452 /*- 9453 * All our busy none to send to, just start the 9454 * timer again. 9455 */ 9456 if (stcb->asoc.state == 0) { 9457 return (0); 9458 } 9459 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, 9460 stcb->sctp_ep, 9461 stcb, 9462 net); 9463 return (0); 9464 } 9465 } else { 9466 net = u_net; 9467 if (net == NULL) { 9468 return (0); 9469 } 9470 (void)SCTP_GETTIME_TIMEVAL(&now); 9471 } 9472 sin = (struct sockaddr_in *)&net->ro._l_addr; 9473 if (sin->sin_family != AF_INET) { 9474 if (sin->sin_family != AF_INET6) { 9475 /* huh */ 9476 return (0); 9477 } 9478 } 9479 sctp_alloc_a_chunk(stcb, chk); 9480 if (chk == NULL) { 9481 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n"); 9482 return (0); 9483 } 9484 chk->copy_by_ref = 0; 9485 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 9486 chk->rec.chunk_id.can_take_data = 1; 9487 chk->asoc = &stcb->asoc; 9488 chk->send_size = sizeof(struct sctp_heartbeat_chunk); 9489 9490 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 9491 if (chk->data == NULL) { 9492 sctp_free_a_chunk(stcb, chk); 9493 return (0); 9494 } 9495 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9496 SCTP_BUF_LEN(chk->data) = chk->send_size; 9497 chk->sent = SCTP_DATAGRAM_UNSENT; 9498 chk->snd_count = 0; 9499 chk->whoTo = net; 9500 atomic_add_int(&chk->whoTo->ref_count, 1); 9501 /* Now we have a mbuf that we can fill in with the details */ 9502 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 9503 9504 /* fill out chunk header */ 9505 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 9506 hb->ch.chunk_flags = 0; 9507 hb->ch.chunk_length = htons(chk->send_size); 9508 /* Fill out hb parameter */ 9509 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 9510 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 9511 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 9512 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 9513 /* Did our user request this one, put it in */ 9514 hb->heartbeat.hb_info.user_req = user_req; 9515 hb->heartbeat.hb_info.addr_family = sin->sin_family; 9516 hb->heartbeat.hb_info.addr_len = sin->sin_len; 9517 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 9518 /* 9519 * we only take from the entropy pool if the address is not 9520 * confirmed. 9521 */ 9522 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 9523 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 9524 } else { 9525 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 9526 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 9527 } 9528 if (sin->sin_family == AF_INET) { 9529 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr)); 9530 } else if (sin->sin_family == AF_INET6) { 9531 /* We leave the scope the way it is in our lookup table. */ 9532 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 9533 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr)); 9534 } else { 9535 /* huh compiler bug */ 9536 return (0); 9537 } 9538 /* ok we have a destination that needs a beat */ 9539 /* lets do the theshold management Qiaobing style */ 9540 9541 if (sctp_threshold_management(stcb->sctp_ep, stcb, net, 9542 stcb->asoc.max_send_times)) { 9543 /*- 9544 * we have lost the association, in a way this is 9545 * quite bad since we really are one less time since 9546 * we really did not send yet. This is the down side 9547 * to the Q's style as defined in the RFC and not my 9548 * alternate style defined in the RFC. 9549 */ 9550 atomic_subtract_int(&chk->whoTo->ref_count, 1); 9551 if (chk->data != NULL) { 9552 sctp_m_freem(chk->data); 9553 chk->data = NULL; 9554 } 9555 sctp_free_a_chunk(stcb, chk); 9556 return (-1); 9557 } 9558 net->hb_responded = 0; 9559 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9560 stcb->asoc.ctrl_queue_cnt++; 9561 SCTP_STAT_INCR(sctps_sendheartbeat); 9562 /*- 9563 * Call directly med level routine to put out the chunk. It will 9564 * always tumble out control chunks aka HB but it may even tumble 9565 * out data too. 9566 */ 9567 return (1); 9568 } 9569 9570 void 9571 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 9572 uint32_t high_tsn) 9573 { 9574 struct sctp_association *asoc; 9575 struct sctp_ecne_chunk *ecne; 9576 struct sctp_tmit_chunk *chk; 9577 9578 asoc = &stcb->asoc; 9579 SCTP_TCB_LOCK_ASSERT(stcb); 9580 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9581 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 9582 /* found a previous ECN_ECHO update it if needed */ 9583 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 9584 ecne->tsn = htonl(high_tsn); 9585 return; 9586 } 9587 } 9588 /* nope could not find one to update so we must build one */ 9589 sctp_alloc_a_chunk(stcb, chk); 9590 if (chk == NULL) { 9591 return; 9592 } 9593 chk->copy_by_ref = 0; 9594 SCTP_STAT_INCR(sctps_sendecne); 9595 chk->rec.chunk_id.id = SCTP_ECN_ECHO; 9596 chk->rec.chunk_id.can_take_data = 0; 9597 chk->asoc = &stcb->asoc; 9598 chk->send_size = sizeof(struct sctp_ecne_chunk); 9599 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 9600 if (chk->data == NULL) { 9601 sctp_free_a_chunk(stcb, chk); 9602 return; 9603 } 9604 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9605 SCTP_BUF_LEN(chk->data) = chk->send_size; 9606 chk->sent = SCTP_DATAGRAM_UNSENT; 9607 chk->snd_count = 0; 9608 chk->whoTo = net; 9609 atomic_add_int(&chk->whoTo->ref_count, 1); 9610 stcb->asoc.ecn_echo_cnt_onq++; 9611 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 9612 ecne->ch.chunk_type = SCTP_ECN_ECHO; 9613 ecne->ch.chunk_flags = 0; 9614 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 9615 ecne->tsn = htonl(high_tsn); 9616 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9617 asoc->ctrl_queue_cnt++; 9618 } 9619 9620 void 9621 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 9622 struct mbuf *m, int iphlen, int bad_crc) 9623 { 9624 struct sctp_association *asoc; 9625 struct sctp_pktdrop_chunk *drp; 9626 struct sctp_tmit_chunk *chk; 9627 uint8_t *datap; 9628 int len; 9629 unsigned int small_one; 9630 struct ip *iph; 9631 9632 long spc; 9633 9634 asoc = &stcb->asoc; 9635 SCTP_TCB_LOCK_ASSERT(stcb); 9636 if (asoc->peer_supports_pktdrop == 0) { 9637 /*- 9638 * peer must declare support before I send one. 9639 */ 9640 return; 9641 } 9642 if (stcb->sctp_socket == NULL) { 9643 return; 9644 } 9645 sctp_alloc_a_chunk(stcb, chk); 9646 if (chk == NULL) { 9647 return; 9648 } 9649 chk->copy_by_ref = 0; 9650 iph = mtod(m, struct ip *); 9651 if (iph == NULL) { 9652 return; 9653 } 9654 if (iph->ip_v == IPVERSION) { 9655 /* IPv4 */ 9656 len = chk->send_size = iph->ip_len; 9657 } else { 9658 struct ip6_hdr *ip6h; 9659 9660 /* IPv6 */ 9661 ip6h = mtod(m, struct ip6_hdr *); 9662 len = chk->send_size = htons(ip6h->ip6_plen); 9663 } 9664 chk->asoc = &stcb->asoc; 9665 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 9666 if (chk->data == NULL) { 9667 jump_out: 9668 sctp_free_a_chunk(stcb, chk); 9669 return; 9670 } 9671 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9672 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 9673 if (drp == NULL) { 9674 sctp_m_freem(chk->data); 9675 chk->data = NULL; 9676 goto jump_out; 9677 } 9678 small_one = asoc->smallest_mtu; 9679 if (small_one > MCLBYTES) { 9680 /* Only one cluster worth of data MAX */ 9681 small_one = MCLBYTES; 9682 } 9683 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 9684 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 9685 chk->book_size_scale = 0; 9686 if (chk->book_size > small_one) { 9687 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 9688 drp->trunc_len = htons(chk->send_size); 9689 chk->send_size = small_one - (SCTP_MED_OVERHEAD + 9690 sizeof(struct sctp_pktdrop_chunk) + 9691 sizeof(struct sctphdr)); 9692 len = chk->send_size; 9693 } else { 9694 /* no truncation needed */ 9695 drp->ch.chunk_flags = 0; 9696 drp->trunc_len = htons(0); 9697 } 9698 if (bad_crc) { 9699 drp->ch.chunk_flags |= SCTP_BADCRC; 9700 } 9701 chk->send_size += sizeof(struct sctp_pktdrop_chunk); 9702 SCTP_BUF_LEN(chk->data) = chk->send_size; 9703 chk->sent = SCTP_DATAGRAM_UNSENT; 9704 chk->snd_count = 0; 9705 if (net) { 9706 /* we should hit here */ 9707 chk->whoTo = net; 9708 } else { 9709 chk->whoTo = asoc->primary_destination; 9710 } 9711 atomic_add_int(&chk->whoTo->ref_count, 1); 9712 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 9713 chk->rec.chunk_id.can_take_data = 1; 9714 drp->ch.chunk_type = SCTP_PACKET_DROPPED; 9715 drp->ch.chunk_length = htons(chk->send_size); 9716 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); 9717 if (spc < 0) { 9718 spc = 0; 9719 } 9720 drp->bottle_bw = htonl(spc); 9721 if (asoc->my_rwnd) { 9722 drp->current_onq = htonl(asoc->size_on_reasm_queue + 9723 asoc->size_on_all_streams + 9724 asoc->my_rwnd_control_len + 9725 stcb->sctp_socket->so_rcv.sb_cc); 9726 } else { 9727 /*- 9728 * If my rwnd is 0, possibly from mbuf depletion as well as 9729 * space used, tell the peer there is NO space aka onq == bw 9730 */ 9731 drp->current_onq = htonl(spc); 9732 } 9733 drp->reserved = 0; 9734 datap = drp->data; 9735 m_copydata(m, iphlen, len, (caddr_t)datap); 9736 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9737 asoc->ctrl_queue_cnt++; 9738 } 9739 9740 void 9741 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn) 9742 { 9743 struct sctp_association *asoc; 9744 struct sctp_cwr_chunk *cwr; 9745 struct sctp_tmit_chunk *chk; 9746 9747 asoc = &stcb->asoc; 9748 SCTP_TCB_LOCK_ASSERT(stcb); 9749 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9750 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) { 9751 /* found a previous ECN_CWR update it if needed */ 9752 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 9753 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn), 9754 MAX_TSN)) { 9755 cwr->tsn = htonl(high_tsn); 9756 } 9757 return; 9758 } 9759 } 9760 /* nope could not find one to update so we must build one */ 9761 sctp_alloc_a_chunk(stcb, chk); 9762 if (chk == NULL) { 9763 return; 9764 } 9765 chk->copy_by_ref = 0; 9766 chk->rec.chunk_id.id = SCTP_ECN_CWR; 9767 chk->rec.chunk_id.can_take_data = 1; 9768 chk->asoc = &stcb->asoc; 9769 chk->send_size = sizeof(struct sctp_cwr_chunk); 9770 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 9771 if (chk->data == NULL) { 9772 sctp_free_a_chunk(stcb, chk); 9773 return; 9774 } 9775 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9776 SCTP_BUF_LEN(chk->data) = chk->send_size; 9777 chk->sent = SCTP_DATAGRAM_UNSENT; 9778 chk->snd_count = 0; 9779 chk->whoTo = net; 9780 atomic_add_int(&chk->whoTo->ref_count, 1); 9781 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 9782 cwr->ch.chunk_type = SCTP_ECN_CWR; 9783 cwr->ch.chunk_flags = 0; 9784 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 9785 cwr->tsn = htonl(high_tsn); 9786 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9787 asoc->ctrl_queue_cnt++; 9788 } 9789 9790 void 9791 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, 9792 int number_entries, uint16_t * list, 9793 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 9794 { 9795 int len, old_len, i; 9796 struct sctp_stream_reset_out_request *req_out; 9797 struct sctp_chunkhdr *ch; 9798 9799 ch = mtod(chk->data, struct sctp_chunkhdr *); 9800 9801 9802 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9803 9804 /* get to new offset for the param. */ 9805 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 9806 /* now how long will this param be? */ 9807 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 9808 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 9809 req_out->ph.param_length = htons(len); 9810 req_out->request_seq = htonl(seq); 9811 req_out->response_seq = htonl(resp_seq); 9812 req_out->send_reset_at_tsn = htonl(last_sent); 9813 if (number_entries) { 9814 for (i = 0; i < number_entries; i++) { 9815 req_out->list_of_streams[i] = htons(list[i]); 9816 } 9817 } 9818 if (SCTP_SIZE32(len) > len) { 9819 /*- 9820 * Need to worry about the pad we may end up adding to the 9821 * end. This is easy since the struct is either aligned to 4 9822 * bytes or 2 bytes off. 9823 */ 9824 req_out->list_of_streams[number_entries] = 0; 9825 } 9826 /* now fix the chunk length */ 9827 ch->chunk_length = htons(len + old_len); 9828 chk->book_size = len + old_len; 9829 chk->book_size_scale = 0; 9830 chk->send_size = SCTP_SIZE32(chk->book_size); 9831 SCTP_BUF_LEN(chk->data) = chk->send_size; 9832 return; 9833 } 9834 9835 9836 void 9837 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 9838 int number_entries, uint16_t * list, 9839 uint32_t seq) 9840 { 9841 int len, old_len, i; 9842 struct sctp_stream_reset_in_request *req_in; 9843 struct sctp_chunkhdr *ch; 9844 9845 ch = mtod(chk->data, struct sctp_chunkhdr *); 9846 9847 9848 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9849 9850 /* get to new offset for the param. */ 9851 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 9852 /* now how long will this param be? */ 9853 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 9854 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 9855 req_in->ph.param_length = htons(len); 9856 req_in->request_seq = htonl(seq); 9857 if (number_entries) { 9858 for (i = 0; i < number_entries; i++) { 9859 req_in->list_of_streams[i] = htons(list[i]); 9860 } 9861 } 9862 if (SCTP_SIZE32(len) > len) { 9863 /*- 9864 * Need to worry about the pad we may end up adding to the 9865 * end. This is easy since the struct is either aligned to 4 9866 * bytes or 2 bytes off. 9867 */ 9868 req_in->list_of_streams[number_entries] = 0; 9869 } 9870 /* now fix the chunk length */ 9871 ch->chunk_length = htons(len + old_len); 9872 chk->book_size = len + old_len; 9873 chk->book_size_scale = 0; 9874 chk->send_size = SCTP_SIZE32(chk->book_size); 9875 SCTP_BUF_LEN(chk->data) = chk->send_size; 9876 return; 9877 } 9878 9879 9880 void 9881 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 9882 uint32_t seq) 9883 { 9884 int len, old_len; 9885 struct sctp_stream_reset_tsn_request *req_tsn; 9886 struct sctp_chunkhdr *ch; 9887 9888 ch = mtod(chk->data, struct sctp_chunkhdr *); 9889 9890 9891 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9892 9893 /* get to new offset for the param. */ 9894 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 9895 /* now how long will this param be? */ 9896 len = sizeof(struct sctp_stream_reset_tsn_request); 9897 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 9898 req_tsn->ph.param_length = htons(len); 9899 req_tsn->request_seq = htonl(seq); 9900 9901 /* now fix the chunk length */ 9902 ch->chunk_length = htons(len + old_len); 9903 chk->send_size = len + old_len; 9904 chk->book_size = SCTP_SIZE32(chk->send_size); 9905 chk->book_size_scale = 0; 9906 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 9907 return; 9908 } 9909 9910 void 9911 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 9912 uint32_t resp_seq, uint32_t result) 9913 { 9914 int len, old_len; 9915 struct sctp_stream_reset_response *resp; 9916 struct sctp_chunkhdr *ch; 9917 9918 ch = mtod(chk->data, struct sctp_chunkhdr *); 9919 9920 9921 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9922 9923 /* get to new offset for the param. */ 9924 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 9925 /* now how long will this param be? */ 9926 len = sizeof(struct sctp_stream_reset_response); 9927 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 9928 resp->ph.param_length = htons(len); 9929 resp->response_seq = htonl(resp_seq); 9930 resp->result = ntohl(result); 9931 9932 /* now fix the chunk length */ 9933 ch->chunk_length = htons(len + old_len); 9934 chk->book_size = len + old_len; 9935 chk->book_size_scale = 0; 9936 chk->send_size = SCTP_SIZE32(chk->book_size); 9937 SCTP_BUF_LEN(chk->data) = chk->send_size; 9938 return; 9939 9940 } 9941 9942 9943 void 9944 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 9945 uint32_t resp_seq, uint32_t result, 9946 uint32_t send_una, uint32_t recv_next) 9947 { 9948 int len, old_len; 9949 struct sctp_stream_reset_response_tsn *resp; 9950 struct sctp_chunkhdr *ch; 9951 9952 ch = mtod(chk->data, struct sctp_chunkhdr *); 9953 9954 9955 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 9956 9957 /* get to new offset for the param. */ 9958 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 9959 /* now how long will this param be? */ 9960 len = sizeof(struct sctp_stream_reset_response_tsn); 9961 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 9962 resp->ph.param_length = htons(len); 9963 resp->response_seq = htonl(resp_seq); 9964 resp->result = htonl(result); 9965 resp->senders_next_tsn = htonl(send_una); 9966 resp->receivers_next_tsn = htonl(recv_next); 9967 9968 /* now fix the chunk length */ 9969 ch->chunk_length = htons(len + old_len); 9970 chk->book_size = len + old_len; 9971 chk->send_size = SCTP_SIZE32(chk->book_size); 9972 chk->book_size_scale = 0; 9973 SCTP_BUF_LEN(chk->data) = chk->send_size; 9974 return; 9975 } 9976 9977 9978 int 9979 sctp_send_str_reset_req(struct sctp_tcb *stcb, 9980 int number_entries, uint16_t * list, 9981 uint8_t send_out_req, uint32_t resp_seq, 9982 uint8_t send_in_req, 9983 uint8_t send_tsn_req) 9984 { 9985 9986 struct sctp_association *asoc; 9987 struct sctp_tmit_chunk *chk; 9988 struct sctp_chunkhdr *ch; 9989 uint32_t seq; 9990 9991 asoc = &stcb->asoc; 9992 if (asoc->stream_reset_outstanding) { 9993 /*- 9994 * Already one pending, must get ACK back to clear the flag. 9995 */ 9996 return (EBUSY); 9997 } 9998 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) { 9999 /* nothing to do */ 10000 return (EINVAL); 10001 } 10002 if (send_tsn_req && (send_out_req || send_in_req)) { 10003 /* error, can't do that */ 10004 return (EINVAL); 10005 } 10006 sctp_alloc_a_chunk(stcb, chk); 10007 if (chk == NULL) { 10008 return (ENOMEM); 10009 } 10010 chk->copy_by_ref = 0; 10011 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 10012 chk->rec.chunk_id.can_take_data = 0; 10013 chk->asoc = &stcb->asoc; 10014 chk->book_size = sizeof(struct sctp_chunkhdr); 10015 chk->send_size = SCTP_SIZE32(chk->book_size); 10016 chk->book_size_scale = 0; 10017 10018 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 10019 if (chk->data == NULL) { 10020 sctp_free_a_chunk(stcb, chk); 10021 return (ENOMEM); 10022 } 10023 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10024 10025 /* setup chunk parameters */ 10026 chk->sent = SCTP_DATAGRAM_UNSENT; 10027 chk->snd_count = 0; 10028 chk->whoTo = asoc->primary_destination; 10029 atomic_add_int(&chk->whoTo->ref_count, 1); 10030 10031 ch = mtod(chk->data, struct sctp_chunkhdr *); 10032 ch->chunk_type = SCTP_STREAM_RESET; 10033 ch->chunk_flags = 0; 10034 ch->chunk_length = htons(chk->book_size); 10035 SCTP_BUF_LEN(chk->data) = chk->send_size; 10036 10037 seq = stcb->asoc.str_reset_seq_out; 10038 if (send_out_req) { 10039 sctp_add_stream_reset_out(chk, number_entries, list, 10040 seq, resp_seq, (stcb->asoc.sending_seq - 1)); 10041 asoc->stream_reset_out_is_outstanding = 1; 10042 seq++; 10043 asoc->stream_reset_outstanding++; 10044 } 10045 if (send_in_req) { 10046 sctp_add_stream_reset_in(chk, number_entries, list, seq); 10047 asoc->stream_reset_outstanding++; 10048 } 10049 if (send_tsn_req) { 10050 sctp_add_stream_reset_tsn(chk, seq); 10051 asoc->stream_reset_outstanding++; 10052 } 10053 asoc->str_reset = chk; 10054 10055 /* insert the chunk for sending */ 10056 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 10057 chk, 10058 sctp_next); 10059 asoc->ctrl_queue_cnt++; 10060 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 10061 return (0); 10062 } 10063 10064 void 10065 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag, 10066 struct mbuf *err_cause, uint32_t vrf_id, uint32_t table_id) 10067 { 10068 /*- 10069 * Formulate the abort message, and send it back down. 10070 */ 10071 struct mbuf *o_pak; 10072 struct mbuf *mout; 10073 struct sctp_abort_msg *abm; 10074 struct ip *iph, *iph_out; 10075 struct ip6_hdr *ip6, *ip6_out; 10076 int iphlen_out, len; 10077 10078 /* don't respond to ABORT with ABORT */ 10079 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 10080 if (err_cause) 10081 sctp_m_freem(err_cause); 10082 return; 10083 } 10084 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg)); 10085 10086 mout = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA); 10087 if (mout == NULL) { 10088 if (err_cause) 10089 sctp_m_freem(err_cause); 10090 return; 10091 } 10092 iph = mtod(m, struct ip *); 10093 iph_out = NULL; 10094 ip6_out = NULL; 10095 if (iph->ip_v == IPVERSION) { 10096 iph_out = mtod(mout, struct ip *); 10097 SCTP_BUF_LEN(mout) = sizeof(*iph_out) + sizeof(*abm); 10098 SCTP_BUF_NEXT(mout) = err_cause; 10099 10100 /* Fill in the IP header for the ABORT */ 10101 iph_out->ip_v = IPVERSION; 10102 iph_out->ip_hl = (sizeof(struct ip) / 4); 10103 iph_out->ip_tos = (u_char)0; 10104 iph_out->ip_id = 0; 10105 iph_out->ip_off = 0; 10106 iph_out->ip_ttl = MAXTTL; 10107 iph_out->ip_p = IPPROTO_SCTP; 10108 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 10109 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 10110 /* let IP layer calculate this */ 10111 iph_out->ip_sum = 0; 10112 10113 iphlen_out = sizeof(*iph_out); 10114 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out); 10115 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 10116 ip6 = (struct ip6_hdr *)iph; 10117 ip6_out = mtod(mout, struct ip6_hdr *); 10118 SCTP_BUF_LEN(mout) = sizeof(*ip6_out) + sizeof(*abm); 10119 SCTP_BUF_NEXT(mout) = err_cause; 10120 10121 /* Fill in the IP6 header for the ABORT */ 10122 ip6_out->ip6_flow = ip6->ip6_flow; 10123 ip6_out->ip6_hlim = ip6_defhlim; 10124 ip6_out->ip6_nxt = IPPROTO_SCTP; 10125 ip6_out->ip6_src = ip6->ip6_dst; 10126 ip6_out->ip6_dst = ip6->ip6_src; 10127 10128 iphlen_out = sizeof(*ip6_out); 10129 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out); 10130 } else { 10131 /* Currently not supported */ 10132 return; 10133 } 10134 10135 abm->sh.src_port = sh->dest_port; 10136 abm->sh.dest_port = sh->src_port; 10137 abm->sh.checksum = 0; 10138 if (vtag == 0) { 10139 abm->sh.v_tag = sh->v_tag; 10140 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB; 10141 } else { 10142 abm->sh.v_tag = htonl(vtag); 10143 abm->msg.ch.chunk_flags = 0; 10144 } 10145 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION; 10146 10147 if (err_cause) { 10148 struct mbuf *m_tmp = err_cause; 10149 int err_len = 0; 10150 10151 /* get length of the err_cause chain */ 10152 while (m_tmp != NULL) { 10153 err_len += SCTP_BUF_LEN(m_tmp); 10154 m_tmp = SCTP_BUF_NEXT(m_tmp); 10155 } 10156 len = SCTP_BUF_LEN(mout) + err_len; 10157 if (err_len % 4) { 10158 /* need pad at end of chunk */ 10159 uint32_t cpthis = 0; 10160 int padlen; 10161 10162 padlen = 4 - (len % 4); 10163 m_copyback(mout, len, padlen, (caddr_t)&cpthis); 10164 len += padlen; 10165 } 10166 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len); 10167 } else { 10168 len = SCTP_BUF_LEN(mout); 10169 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch)); 10170 } 10171 10172 /* add checksum */ 10173 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) { 10174 abm->sh.checksum = 0; 10175 } else { 10176 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out); 10177 } 10178 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 10179 /* no mbuf's */ 10180 sctp_m_freem(mout); 10181 return; 10182 } 10183 if (iph_out != NULL) { 10184 sctp_route_t ro; 10185 struct sctp_tcb *stcb = NULL; 10186 int ret; 10187 10188 /* zap the stack pointer to the route */ 10189 bzero(&ro, sizeof ro); 10190 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n"); 10191 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh); 10192 /* set IPv4 length */ 10193 iph_out->ip_len = len; 10194 /* out it goes */ 10195 SCTP_ATTACH_CHAIN(o_pak, mout, len); 10196 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id, table_id); 10197 10198 /* Free the route if we got one back */ 10199 if (ro.ro_rt) 10200 RTFREE(ro.ro_rt); 10201 } else if (ip6_out != NULL) { 10202 struct route_in6 ro; 10203 int ret; 10204 struct sctp_tcb *stcb = NULL; 10205 struct ifnet *ifp = NULL; 10206 10207 /* zap the stack pointer to the route */ 10208 bzero(&ro, sizeof(ro)); 10209 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n"); 10210 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh); 10211 ip6_out->ip6_plen = len - sizeof(*ip6_out); 10212 SCTP_ATTACH_CHAIN(o_pak, mout, len); 10213 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id, table_id); 10214 10215 /* Free the route if we got one back */ 10216 if (ro.ro_rt) 10217 RTFREE(ro.ro_rt); 10218 } 10219 SCTP_STAT_INCR(sctps_sendpackets); 10220 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 10221 } 10222 10223 void 10224 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag, 10225 uint32_t vrf_id, uint32_t table_id) 10226 { 10227 struct mbuf *o_pak; 10228 struct sctphdr *ihdr; 10229 int retcode; 10230 struct sctphdr *ohdr; 10231 struct sctp_chunkhdr *ophdr; 10232 struct ip *iph; 10233 struct mbuf *mout; 10234 10235 #ifdef SCTP_DEBUG 10236 struct sockaddr_in6 lsa6, fsa6; 10237 10238 #endif 10239 uint32_t val; 10240 struct mbuf *at; 10241 int len; 10242 10243 iph = mtod(m, struct ip *); 10244 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen); 10245 10246 SCTP_BUF_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT); 10247 if (scm == NULL) { 10248 /* can't send because we can't add a mbuf */ 10249 return; 10250 } 10251 ohdr = mtod(scm, struct sctphdr *); 10252 ohdr->src_port = ihdr->dest_port; 10253 ohdr->dest_port = ihdr->src_port; 10254 ohdr->v_tag = vtag; 10255 ohdr->checksum = 0; 10256 ophdr = (struct sctp_chunkhdr *)(ohdr + 1); 10257 ophdr->chunk_type = SCTP_OPERATION_ERROR; 10258 ophdr->chunk_flags = 0; 10259 len = 0; 10260 at = scm; 10261 while (at) { 10262 len += SCTP_BUF_LEN(at); 10263 at = SCTP_BUF_NEXT(at); 10264 } 10265 ophdr->chunk_length = htons(len - sizeof(struct sctphdr)); 10266 if (len % 4) { 10267 /* need padding */ 10268 uint32_t cpthis = 0; 10269 int padlen; 10270 10271 padlen = 4 - (len % 4); 10272 m_copyback(scm, len, padlen, (caddr_t)&cpthis); 10273 len += padlen; 10274 } 10275 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) { 10276 val = 0; 10277 } else { 10278 val = sctp_calculate_sum(scm, NULL, 0); 10279 } 10280 mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA); 10281 if (mout == NULL) { 10282 sctp_m_freem(scm); 10283 return; 10284 } 10285 SCTP_BUF_NEXT(mout) = scm; 10286 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 10287 sctp_m_freem(mout); 10288 return; 10289 } 10290 ohdr->checksum = val; 10291 if (iph->ip_v == IPVERSION) { 10292 /* V4 */ 10293 struct ip *out; 10294 sctp_route_t ro; 10295 struct sctp_tcb *stcb = NULL; 10296 10297 SCTP_BUF_LEN(mout) = sizeof(struct ip); 10298 len += sizeof(struct ip); 10299 10300 bzero(&ro, sizeof ro); 10301 out = mtod(mout, struct ip *); 10302 out->ip_v = iph->ip_v; 10303 out->ip_hl = (sizeof(struct ip) / 4); 10304 out->ip_tos = iph->ip_tos; 10305 out->ip_id = iph->ip_id; 10306 out->ip_off = 0; 10307 out->ip_ttl = MAXTTL; 10308 out->ip_p = IPPROTO_SCTP; 10309 out->ip_sum = 0; 10310 out->ip_src = iph->ip_dst; 10311 out->ip_dst = iph->ip_src; 10312 out->ip_len = len; 10313 SCTP_ATTACH_CHAIN(o_pak, mout, len); 10314 10315 SCTP_IP_OUTPUT(retcode, o_pak, &ro, stcb, vrf_id, table_id); 10316 10317 SCTP_STAT_INCR(sctps_sendpackets); 10318 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 10319 /* Free the route if we got one back */ 10320 if (ro.ro_rt) 10321 RTFREE(ro.ro_rt); 10322 } else { 10323 /* V6 */ 10324 struct route_in6 ro; 10325 int ret; 10326 struct sctp_tcb *stcb = NULL; 10327 struct ifnet *ifp = NULL; 10328 struct ip6_hdr *out6, *in6; 10329 10330 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr); 10331 len += sizeof(struct ip6_hdr); 10332 bzero(&ro, sizeof ro); 10333 in6 = mtod(m, struct ip6_hdr *); 10334 out6 = mtod(mout, struct ip6_hdr *); 10335 out6->ip6_flow = in6->ip6_flow; 10336 out6->ip6_hlim = ip6_defhlim; 10337 out6->ip6_nxt = IPPROTO_SCTP; 10338 out6->ip6_src = in6->ip6_dst; 10339 out6->ip6_dst = in6->ip6_src; 10340 out6->ip6_plen = len - sizeof(struct ip6_hdr); 10341 10342 #ifdef SCTP_DEBUG 10343 bzero(&lsa6, sizeof(lsa6)); 10344 lsa6.sin6_len = sizeof(lsa6); 10345 lsa6.sin6_family = AF_INET6; 10346 lsa6.sin6_addr = out6->ip6_src; 10347 bzero(&fsa6, sizeof(fsa6)); 10348 fsa6.sin6_len = sizeof(fsa6); 10349 fsa6.sin6_family = AF_INET6; 10350 fsa6.sin6_addr = out6->ip6_dst; 10351 #endif 10352 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_operr_to calling ipv6 output:\n"); 10353 SCTPDBG(SCTP_DEBUG_OUTPUT2, "src: "); 10354 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&lsa6); 10355 SCTPDBG(SCTP_DEBUG_OUTPUT2, "dst "); 10356 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&fsa6); 10357 10358 SCTP_ATTACH_CHAIN(o_pak, mout, len); 10359 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id, table_id); 10360 10361 SCTP_STAT_INCR(sctps_sendpackets); 10362 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 10363 /* Free the route if we got one back */ 10364 if (ro.ro_rt) 10365 RTFREE(ro.ro_rt); 10366 } 10367 } 10368 10369 static struct mbuf * 10370 sctp_copy_resume(struct sctp_stream_queue_pending *sp, 10371 struct uio *uio, 10372 struct sctp_sndrcvinfo *srcv, 10373 int max_send_len, 10374 int user_marks_eor, 10375 int *error, 10376 uint32_t * sndout, 10377 struct mbuf **new_tail) 10378 { 10379 struct mbuf *m; 10380 10381 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, 10382 (M_PKTHDR | (user_marks_eor ? M_EOR : 0))); 10383 if (m == NULL) 10384 *error = ENOMEM; 10385 else 10386 *sndout = m_length(m, NULL); 10387 *new_tail = m_last(m); 10388 return (m); 10389 } 10390 10391 static int 10392 sctp_copy_one(struct sctp_stream_queue_pending *sp, 10393 struct uio *uio, 10394 int resv_upfront) 10395 { 10396 int left; 10397 10398 left = sp->length; 10399 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, 10400 resv_upfront, 0); 10401 if (sp->data == NULL) 10402 return (ENOMEM); 10403 10404 sp->tail_mbuf = m_last(sp->data); 10405 return (0); 10406 } 10407 10408 10409 10410 static struct sctp_stream_queue_pending * 10411 sctp_copy_it_in(struct sctp_tcb *stcb, 10412 struct sctp_association *asoc, 10413 struct sctp_sndrcvinfo *srcv, 10414 struct uio *uio, 10415 struct sctp_nets *net, 10416 int max_send_len, 10417 int user_marks_eor, 10418 int *error, 10419 int non_blocking) 10420 { 10421 /*- 10422 * This routine must be very careful in its work. Protocol 10423 * processing is up and running so care must be taken to spl...() 10424 * when you need to do something that may effect the stcb/asoc. The 10425 * sb is locked however. When data is copied the protocol processing 10426 * should be enabled since this is a slower operation... 10427 */ 10428 struct sctp_stream_queue_pending *sp = NULL; 10429 int resv_in_first; 10430 10431 *error = 0; 10432 /* Unless E_EOR mode is on, we must make a send FIT in one call. */ 10433 if (((user_marks_eor == 0) && non_blocking) && 10434 (uio->uio_resid > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { 10435 /* It will NEVER fit */ 10436 *error = EMSGSIZE; 10437 goto out_now; 10438 } 10439 /* Now can we send this? */ 10440 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 10441 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 10442 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 10443 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 10444 /* got data while shutting down */ 10445 *error = ECONNRESET; 10446 goto out_now; 10447 } 10448 sctp_alloc_a_strmoq(stcb, sp); 10449 if (sp == NULL) { 10450 *error = ENOMEM; 10451 goto out_now; 10452 } 10453 sp->act_flags = 0; 10454 sp->sender_all_done = 0; 10455 sp->sinfo_flags = srcv->sinfo_flags; 10456 sp->timetolive = srcv->sinfo_timetolive; 10457 sp->ppid = srcv->sinfo_ppid; 10458 sp->context = srcv->sinfo_context; 10459 sp->strseq = 0; 10460 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 10461 10462 sp->stream = srcv->sinfo_stream; 10463 sp->length = min(uio->uio_resid, max_send_len); 10464 if ((sp->length == uio->uio_resid) && 10465 ((user_marks_eor == 0) || 10466 (srcv->sinfo_flags & SCTP_EOF) || 10467 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 10468 ) { 10469 sp->msg_is_complete = 1; 10470 } else { 10471 sp->msg_is_complete = 0; 10472 } 10473 sp->sender_all_done = 0; 10474 sp->some_taken = 0; 10475 sp->put_last_out = 0; 10476 resv_in_first = sizeof(struct sctp_data_chunk); 10477 sp->data = sp->tail_mbuf = NULL; 10478 *error = sctp_copy_one(sp, uio, resv_in_first); 10479 if (*error) { 10480 sctp_free_a_strmoq(stcb, sp); 10481 sp = NULL; 10482 } else { 10483 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 10484 sp->net = net; 10485 sp->addr_over = 1; 10486 } else { 10487 sp->net = asoc->primary_destination; 10488 sp->addr_over = 0; 10489 } 10490 atomic_add_int(&sp->net->ref_count, 1); 10491 sctp_set_prsctp_policy(stcb, sp); 10492 } 10493 out_now: 10494 return (sp); 10495 } 10496 10497 10498 int 10499 sctp_sosend(struct socket *so, 10500 struct sockaddr *addr, 10501 struct uio *uio, 10502 struct mbuf *top, 10503 struct mbuf *control, 10504 int flags 10505 , 10506 struct thread *p 10507 ) 10508 { 10509 struct sctp_inpcb *inp; 10510 int error, use_rcvinfo = 0; 10511 struct sctp_sndrcvinfo srcv; 10512 10513 inp = (struct sctp_inpcb *)so->so_pcb; 10514 if (control) { 10515 /* process cmsg snd/rcv info (maybe a assoc-id) */ 10516 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control, 10517 sizeof(srcv))) { 10518 /* got one */ 10519 use_rcvinfo = 1; 10520 } 10521 } 10522 error = sctp_lower_sosend(so, addr, uio, top, 10523 control, 10524 flags, 10525 use_rcvinfo, &srcv 10526 ,p 10527 ); 10528 return (error); 10529 } 10530 10531 10532 int 10533 sctp_lower_sosend(struct socket *so, 10534 struct sockaddr *addr, 10535 struct uio *uio, 10536 struct mbuf *i_pak, 10537 struct mbuf *control, 10538 int flags, 10539 int use_rcvinfo, 10540 struct sctp_sndrcvinfo *srcv 10541 , 10542 struct thread *p 10543 ) 10544 { 10545 unsigned int sndlen, max_len; 10546 int error, len; 10547 struct mbuf *top = NULL; 10548 10549 #if defined(__NetBSD__) || defined(__OpenBSD_) 10550 int s; 10551 10552 #endif 10553 int queue_only = 0, queue_only_for_init = 0; 10554 int free_cnt_applied = 0; 10555 int un_sent = 0; 10556 int now_filled = 0; 10557 struct sctp_block_entry be; 10558 struct sctp_inpcb *inp; 10559 struct sctp_tcb *stcb = NULL; 10560 struct timeval now; 10561 struct sctp_nets *net; 10562 struct sctp_association *asoc; 10563 struct sctp_inpcb *t_inp; 10564 int create_lock_applied = 0; 10565 int nagle_applies = 0; 10566 int some_on_control = 0; 10567 int got_all_of_the_send = 0; 10568 int hold_tcblock = 0; 10569 int non_blocking = 0; 10570 int temp_flags = 0; 10571 10572 error = 0; 10573 net = NULL; 10574 stcb = NULL; 10575 asoc = NULL; 10576 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 10577 if (inp == NULL) { 10578 error = EFAULT; 10579 goto out_unlocked; 10580 } 10581 if ((uio == NULL) && (i_pak == NULL)) { 10582 return (EINVAL); 10583 } 10584 atomic_add_int(&inp->total_sends, 1); 10585 if (uio) 10586 sndlen = uio->uio_resid; 10587 else { 10588 sndlen = SCTP_HEADER_LEN(i_pak); 10589 top = SCTP_HEADER_TO_CHAIN(i_pak); 10590 } 10591 10592 hold_tcblock = 0; 10593 10594 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 10595 (inp->sctp_socket->so_qlimit)) { 10596 /* The listener can NOT send */ 10597 error = EFAULT; 10598 goto out_unlocked; 10599 } 10600 if ((use_rcvinfo) && srcv) { 10601 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) || PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) { 10602 error = EINVAL; 10603 goto out_unlocked; 10604 } 10605 if (srcv->sinfo_flags) 10606 SCTP_STAT_INCR(sctps_sends_with_flags); 10607 10608 if (srcv->sinfo_flags & SCTP_SENDALL) { 10609 /* its a sendall */ 10610 error = sctp_sendall(inp, uio, top, srcv); 10611 top = NULL; 10612 goto out_unlocked; 10613 } 10614 } 10615 /* now we must find the assoc */ 10616 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 10617 SCTP_INP_RLOCK(inp); 10618 stcb = LIST_FIRST(&inp->sctp_asoc_list); 10619 if (stcb == NULL) { 10620 SCTP_INP_RUNLOCK(inp); 10621 error = ENOTCONN; 10622 goto out_unlocked; 10623 } 10624 hold_tcblock = 0; 10625 SCTP_INP_RUNLOCK(inp); 10626 if (addr) { 10627 /* Must locate the net structure if addr given */ 10628 net = sctp_findnet(stcb, addr); 10629 if (net) { 10630 /* validate port was 0 or correct */ 10631 struct sockaddr_in *sin; 10632 10633 sin = (struct sockaddr_in *)addr; 10634 if ((sin->sin_port != 0) && 10635 (sin->sin_port != stcb->rport)) { 10636 net = NULL; 10637 } 10638 } 10639 temp_flags |= SCTP_ADDR_OVER; 10640 } else 10641 net = stcb->asoc.primary_destination; 10642 if (addr && (net == NULL)) { 10643 /* Could not find address, was it legal */ 10644 if (addr->sa_family == AF_INET) { 10645 struct sockaddr_in *sin; 10646 10647 sin = (struct sockaddr_in *)addr; 10648 if (sin->sin_addr.s_addr == 0) { 10649 if ((sin->sin_port == 0) || 10650 (sin->sin_port == stcb->rport)) { 10651 net = stcb->asoc.primary_destination; 10652 } 10653 } 10654 } else { 10655 struct sockaddr_in6 *sin6; 10656 10657 sin6 = (struct sockaddr_in6 *)addr; 10658 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 10659 if ((sin6->sin6_port == 0) || 10660 (sin6->sin6_port == stcb->rport)) { 10661 net = stcb->asoc.primary_destination; 10662 } 10663 } 10664 } 10665 } 10666 if (net == NULL) { 10667 error = EINVAL; 10668 goto out_unlocked; 10669 } 10670 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) { 10671 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0); 10672 if (stcb) { 10673 if (addr) 10674 /* 10675 * Must locate the net structure if addr 10676 * given 10677 */ 10678 net = sctp_findnet(stcb, addr); 10679 else 10680 net = stcb->asoc.primary_destination; 10681 if ((srcv->sinfo_flags & SCTP_ADDR_OVER) && 10682 ((net == NULL) || (addr == NULL))) { 10683 struct sockaddr_in *sin; 10684 10685 if (addr == NULL) { 10686 error = EINVAL; 10687 goto out_unlocked; 10688 } 10689 sin = (struct sockaddr_in *)addr; 10690 /* Validate port is 0 or correct */ 10691 if ((sin->sin_port != 0) && 10692 (sin->sin_port != stcb->rport)) { 10693 net = NULL; 10694 } 10695 } 10696 } 10697 hold_tcblock = 0; 10698 } else if (addr) { 10699 /*- 10700 * Since we did not use findep we must 10701 * increment it, and if we don't find a tcb 10702 * decrement it. 10703 */ 10704 SCTP_INP_WLOCK(inp); 10705 SCTP_INP_INCR_REF(inp); 10706 SCTP_INP_WUNLOCK(inp); 10707 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 10708 if (stcb == NULL) { 10709 SCTP_INP_WLOCK(inp); 10710 SCTP_INP_DECR_REF(inp); 10711 SCTP_INP_WUNLOCK(inp); 10712 } else { 10713 hold_tcblock = 1; 10714 } 10715 } 10716 if ((stcb == NULL) && (addr)) { 10717 /* Possible implicit send? */ 10718 SCTP_ASOC_CREATE_LOCK(inp); 10719 create_lock_applied = 1; 10720 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 10721 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 10722 /* Should I really unlock ? */ 10723 error = EFAULT; 10724 goto out_unlocked; 10725 10726 } 10727 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 10728 (addr->sa_family == AF_INET6)) { 10729 error = EINVAL; 10730 goto out_unlocked; 10731 } 10732 SCTP_INP_WLOCK(inp); 10733 SCTP_INP_INCR_REF(inp); 10734 SCTP_INP_WUNLOCK(inp); 10735 /* With the lock applied look again */ 10736 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 10737 if (stcb == NULL) { 10738 SCTP_INP_WLOCK(inp); 10739 SCTP_INP_DECR_REF(inp); 10740 SCTP_INP_WUNLOCK(inp); 10741 } else { 10742 hold_tcblock = 1; 10743 } 10744 } 10745 if (stcb == NULL) { 10746 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 10747 error = ENOTCONN; 10748 goto out_unlocked; 10749 } else if (addr == NULL) { 10750 error = ENOENT; 10751 goto out_unlocked; 10752 } else { 10753 /* 10754 * UDP style, we must go ahead and start the INIT 10755 * process 10756 */ 10757 uint32_t vrf_id; 10758 10759 if ((use_rcvinfo) && (srcv) && 10760 ((srcv->sinfo_flags & SCTP_ABORT) || 10761 ((srcv->sinfo_flags & SCTP_EOF) && 10762 (uio) && 10763 (uio->uio_resid == 0)))) { 10764 /*- 10765 * User asks to abort a non-existant assoc, 10766 * or EOF a non-existant assoc with no data 10767 */ 10768 error = ENOENT; 10769 goto out_unlocked; 10770 } 10771 /* get an asoc/stcb struct */ 10772 10773 vrf_id = inp->def_vrf_id; 10774 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id); 10775 if (stcb == NULL) { 10776 /* Error is setup for us in the call */ 10777 goto out_unlocked; 10778 } 10779 if (create_lock_applied) { 10780 SCTP_ASOC_CREATE_UNLOCK(inp); 10781 create_lock_applied = 0; 10782 } else { 10783 SCTP_PRINTF("Huh-3? create lock should have been on??\n"); 10784 } 10785 /* 10786 * Turn on queue only flag to prevent data from 10787 * being sent 10788 */ 10789 queue_only = 1; 10790 asoc = &stcb->asoc; 10791 asoc->state = SCTP_STATE_COOKIE_WAIT; 10792 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 10793 10794 /* initialize authentication params for the assoc */ 10795 sctp_initialize_auth_params(inp, stcb); 10796 10797 if (control) { 10798 /* 10799 * see if a init structure exists in cmsg 10800 * headers 10801 */ 10802 struct sctp_initmsg initm; 10803 int i; 10804 10805 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control, 10806 sizeof(initm))) { 10807 /* 10808 * we have an INIT override of the 10809 * default 10810 */ 10811 if (initm.sinit_max_attempts) 10812 asoc->max_init_times = initm.sinit_max_attempts; 10813 if (initm.sinit_num_ostreams) 10814 asoc->pre_open_streams = initm.sinit_num_ostreams; 10815 if (initm.sinit_max_instreams) 10816 asoc->max_inbound_streams = initm.sinit_max_instreams; 10817 if (initm.sinit_max_init_timeo) 10818 asoc->initial_init_rto_max = initm.sinit_max_init_timeo; 10819 if (asoc->streamoutcnt < asoc->pre_open_streams) { 10820 /* Default is NOT correct */ 10821 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n", 10822 asoc->streamoutcnt, asoc->pre_open_streams); 10823 /* 10824 * What happens if this 10825 * fails? we panic ... 10826 */ 10827 { 10828 struct sctp_stream_out *tmp_str; 10829 int had_lock = 0; 10830 10831 if (hold_tcblock) { 10832 had_lock = 1; 10833 SCTP_TCB_UNLOCK(stcb); 10834 } 10835 SCTP_MALLOC(tmp_str, 10836 struct sctp_stream_out *, 10837 (asoc->pre_open_streams * 10838 sizeof(struct sctp_stream_out)), 10839 "StreamsOut"); 10840 if (had_lock) { 10841 SCTP_TCB_LOCK(stcb); 10842 } 10843 if (tmp_str != NULL) { 10844 SCTP_FREE(asoc->strmout); 10845 asoc->strmout = tmp_str; 10846 asoc->streamoutcnt = asoc->pre_open_streams; 10847 } else { 10848 asoc->pre_open_streams = asoc->streamoutcnt; 10849 } 10850 } 10851 for (i = 0; i < asoc->streamoutcnt; i++) { 10852 /*- 10853 * inbound side must be set 10854 * to 0xffff, also NOTE when 10855 * we get the INIT-ACK back 10856 * (for INIT sender) we MUST 10857 * reduce the count 10858 * (streamoutcnt) but first 10859 * check if we sent to any 10860 * of the upper streams that 10861 * were dropped (if some 10862 * were). Those that were 10863 * dropped must be notified 10864 * to the upper layer as 10865 * failed to send. 10866 */ 10867 asoc->strmout[i].next_sequence_sent = 0x0; 10868 TAILQ_INIT(&asoc->strmout[i].outqueue); 10869 asoc->strmout[i].stream_no = i; 10870 asoc->strmout[i].last_msg_incomplete = 0; 10871 asoc->strmout[i].next_spoke.tqe_next = 0; 10872 asoc->strmout[i].next_spoke.tqe_prev = 0; 10873 } 10874 } 10875 } 10876 } 10877 hold_tcblock = 1; 10878 /* out with the INIT */ 10879 queue_only_for_init = 1; 10880 /*- 10881 * we may want to dig in after this call and adjust the MTU 10882 * value. It defaulted to 1500 (constant) but the ro 10883 * structure may now have an update and thus we may need to 10884 * change it BEFORE we append the message. 10885 */ 10886 net = stcb->asoc.primary_destination; 10887 asoc = &stcb->asoc; 10888 } 10889 } 10890 if ((SCTP_SO_IS_NBIO(so) 10891 || (flags & MSG_NBIO) 10892 )) { 10893 non_blocking = 1; 10894 } 10895 asoc = &stcb->asoc; 10896 10897 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) { 10898 if (sndlen > asoc->smallest_mtu) { 10899 error = EMSGSIZE; 10900 goto out_unlocked; 10901 } 10902 } 10903 /* would we block? */ 10904 if (non_blocking) { 10905 if ((SCTP_SB_LIMIT_SND(so) < 10906 (sndlen + stcb->asoc.total_output_queue_size)) || 10907 (stcb->asoc.chunks_on_out_queue > 10908 sctp_max_chunks_on_queue)) { 10909 error = EWOULDBLOCK; 10910 atomic_add_int(&stcb->sctp_ep->total_nospaces, 1); 10911 goto out_unlocked; 10912 } 10913 } 10914 /* Keep the stcb from being freed under our feet */ 10915 atomic_add_int(&stcb->asoc.refcnt, 1); 10916 free_cnt_applied = 1; 10917 10918 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 10919 error = ECONNRESET; 10920 goto out_unlocked; 10921 } 10922 if (create_lock_applied) { 10923 SCTP_ASOC_CREATE_UNLOCK(inp); 10924 create_lock_applied = 0; 10925 } 10926 if (asoc->stream_reset_outstanding) { 10927 /* 10928 * Can't queue any data while stream reset is underway. 10929 */ 10930 error = EAGAIN; 10931 goto out_unlocked; 10932 } 10933 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 10934 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 10935 queue_only = 1; 10936 } 10937 if ((use_rcvinfo == 0) || (srcv == NULL)) { 10938 /* Grab the default stuff from the asoc */ 10939 srcv = &stcb->asoc.def_send; 10940 } 10941 /* we are now done with all control */ 10942 if (control) { 10943 sctp_m_freem(control); 10944 control = NULL; 10945 } 10946 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 10947 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 10948 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 10949 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 10950 if ((use_rcvinfo) && 10951 (srcv->sinfo_flags & SCTP_ABORT)) { 10952 ; 10953 } else { 10954 error = ECONNRESET; 10955 goto out_unlocked; 10956 } 10957 } 10958 /* Ok, we will attempt a msgsnd :> */ 10959 if (p) { 10960 p->td_proc->p_stats->p_ru.ru_msgsnd++; 10961 } 10962 if (stcb) { 10963 if (((srcv->sinfo_flags | temp_flags) & SCTP_ADDR_OVER) == 0) { 10964 net = stcb->asoc.primary_destination; 10965 } 10966 } 10967 if (net == NULL) { 10968 error = EINVAL; 10969 goto out_unlocked; 10970 } 10971 if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) { 10972 /*- 10973 * CMT: Added check for CMT above. net above is the primary 10974 * dest. If CMT is ON, sender should always attempt to send 10975 * with the output routine sctp_fill_outqueue() that loops 10976 * through all destination addresses. Therefore, if CMT is 10977 * ON, queue_only is NOT set to 1 here, so that 10978 * sctp_chunk_output() can be called below. 10979 */ 10980 queue_only = 1; 10981 10982 } else if (asoc->ifp_had_enobuf) { 10983 SCTP_STAT_INCR(sctps_ifnomemqueued); 10984 if (net->flight_size > (net->mtu * 2)) 10985 queue_only = 1; 10986 asoc->ifp_had_enobuf = 0; 10987 } else { 10988 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 10989 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 10990 } 10991 /* Are we aborting? */ 10992 if (srcv->sinfo_flags & SCTP_ABORT) { 10993 struct mbuf *mm; 10994 int tot_demand, tot_out = 0, max; 10995 10996 SCTP_STAT_INCR(sctps_sends_with_abort); 10997 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 10998 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 10999 /* It has to be up before we abort */ 11000 /* how big is the user initiated abort? */ 11001 error = EINVAL; 11002 goto out; 11003 } 11004 if (hold_tcblock) { 11005 SCTP_TCB_UNLOCK(stcb); 11006 hold_tcblock = 0; 11007 } 11008 if (top) { 11009 struct mbuf *cntm; 11010 11011 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA); 11012 11013 cntm = top; 11014 while (cntm) { 11015 tot_out += SCTP_BUF_LEN(cntm); 11016 cntm = SCTP_BUF_NEXT(cntm); 11017 } 11018 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 11019 } else { 11020 /* Must fit in a MTU */ 11021 if (uio) 11022 tot_out = uio->uio_resid; 11023 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 11024 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA); 11025 } 11026 if (mm == NULL) { 11027 error = ENOMEM; 11028 goto out; 11029 } 11030 max = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 11031 max -= sizeof(struct sctp_abort_msg); 11032 if (tot_out > max) { 11033 tot_out = max; 11034 } 11035 if (mm) { 11036 struct sctp_paramhdr *ph; 11037 11038 /* now move forward the data pointer */ 11039 ph = mtod(mm, struct sctp_paramhdr *); 11040 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 11041 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out)); 11042 ph++; 11043 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); 11044 if (top == NULL) { 11045 error = uiomove((caddr_t)ph, (int)tot_out, uio); 11046 if (error) { 11047 /*- 11048 * Here if we can't get his data we 11049 * still abort we just don't get to 11050 * send the users note :-0 11051 */ 11052 sctp_m_freem(mm); 11053 mm = NULL; 11054 } 11055 } else { 11056 SCTP_BUF_NEXT(mm) = top; 11057 } 11058 } 11059 if (hold_tcblock == 0) { 11060 SCTP_TCB_LOCK(stcb); 11061 hold_tcblock = 1; 11062 } 11063 atomic_add_int(&stcb->asoc.refcnt, -1); 11064 free_cnt_applied = 0; 11065 /* release this lock, otherwise we hang on ourselves */ 11066 sctp_abort_an_association(stcb->sctp_ep, stcb, 11067 SCTP_RESPONSE_TO_USER_REQ, 11068 mm); 11069 /* now relock the stcb so everything is sane */ 11070 hold_tcblock = 0; 11071 stcb = NULL; 11072 goto out_unlocked; 11073 } 11074 /* Calculate the maximum we can send */ 11075 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) { 11076 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 11077 } else { 11078 max_len = 0; 11079 } 11080 if (hold_tcblock) { 11081 SCTP_TCB_UNLOCK(stcb); 11082 hold_tcblock = 0; 11083 } 11084 /* Is the stream no. valid? */ 11085 if (srcv->sinfo_stream >= asoc->streamoutcnt) { 11086 /* Invalid stream number */ 11087 error = EINVAL; 11088 goto out_unlocked; 11089 } 11090 if (asoc->strmout == NULL) { 11091 /* huh? software error */ 11092 error = EFAULT; 11093 goto out_unlocked; 11094 } 11095 len = 0; 11096 if (max_len < sctp_add_more_threshold) { 11097 /* No room right no ! */ 11098 SOCKBUF_LOCK(&so->so_snd); 11099 while (SCTP_SB_LIMIT_SND(so) < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) { 11100 #ifdef SCTP_BLK_LOGGING 11101 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, 11102 so, asoc, uio->uio_resid); 11103 #endif 11104 be.error = 0; 11105 stcb->block_entry = &be; 11106 error = sbwait(&so->so_snd); 11107 stcb->block_entry = NULL; 11108 if (error || so->so_error || be.error) { 11109 if (error == 0) { 11110 if (so->so_error) 11111 error = so->so_error; 11112 if (be.error) { 11113 error = be.error; 11114 } 11115 } 11116 SOCKBUF_UNLOCK(&so->so_snd); 11117 goto out_unlocked; 11118 } 11119 #ifdef SCTP_BLK_LOGGING 11120 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 11121 so, asoc, stcb->asoc.total_output_queue_size); 11122 #endif 11123 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 11124 goto out_unlocked; 11125 } 11126 } 11127 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) { 11128 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 11129 } else { 11130 max_len = 0; 11131 } 11132 SOCKBUF_UNLOCK(&so->so_snd); 11133 } 11134 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 11135 goto out_unlocked; 11136 } 11137 atomic_add_int(&stcb->total_sends, 1); 11138 if (top == NULL) { 11139 struct sctp_stream_queue_pending *sp; 11140 11141 #ifdef INVARIANTS 11142 struct sctp_stream_queue_pending *msp; 11143 11144 #endif 11145 struct sctp_stream_out *strm; 11146 uint32_t sndout, initial_out; 11147 int user_marks_eor; 11148 11149 if (uio->uio_resid == 0) { 11150 if (srcv->sinfo_flags & SCTP_EOF) { 11151 got_all_of_the_send = 1; 11152 goto dataless_eof; 11153 } else { 11154 error = EINVAL; 11155 goto out; 11156 } 11157 } 11158 initial_out = uio->uio_resid; 11159 11160 SCTP_TCB_SEND_LOCK(stcb); 11161 if ((asoc->stream_locked) && 11162 (asoc->stream_locked_on != srcv->sinfo_stream)) { 11163 SCTP_TCB_SEND_UNLOCK(stcb); 11164 error = EAGAIN; 11165 goto out; 11166 } 11167 SCTP_TCB_SEND_UNLOCK(stcb); 11168 11169 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 11170 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 11171 if (strm->last_msg_incomplete == 0) { 11172 do_a_copy_in: 11173 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking); 11174 if ((sp == NULL) || (error)) { 11175 goto out; 11176 } 11177 SCTP_TCB_SEND_LOCK(stcb); 11178 #ifdef INVARIANTS 11179 msp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 11180 if (msp && (msp->msg_is_complete == 0)) 11181 panic("Huh, new mesg and old not done?"); 11182 #endif 11183 if (sp->msg_is_complete) { 11184 strm->last_msg_incomplete = 0; 11185 asoc->stream_locked = 0; 11186 } else { 11187 /* 11188 * Just got locked to this guy in case of an 11189 * interupt. 11190 */ 11191 strm->last_msg_incomplete = 1; 11192 asoc->stream_locked = 1; 11193 asoc->stream_locked_on = srcv->sinfo_stream; 11194 sp->sender_all_done = 0; 11195 } 11196 sctp_snd_sb_alloc(stcb, sp->length); 11197 atomic_add_int(&asoc->stream_queue_cnt, 1); 11198 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 11199 sp->strseq = strm->next_sequence_sent; 11200 #ifdef SCTP_LOG_SENDING_STR 11201 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN, 11202 (uintptr_t) stcb, (uintptr_t) sp, 11203 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0); 11204 #endif 11205 strm->next_sequence_sent++; 11206 } else { 11207 SCTP_STAT_INCR(sctps_sends_with_unord); 11208 } 11209 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 11210 if ((strm->next_spoke.tqe_next == NULL) && 11211 (strm->next_spoke.tqe_prev == NULL)) { 11212 /* Not on wheel, insert */ 11213 sctp_insert_on_wheel(stcb, asoc, strm, 1); 11214 } 11215 SCTP_TCB_SEND_UNLOCK(stcb); 11216 } else { 11217 SCTP_TCB_SEND_LOCK(stcb); 11218 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 11219 SCTP_TCB_SEND_UNLOCK(stcb); 11220 if (sp == NULL) { 11221 /* ???? Huh ??? last msg is gone */ 11222 #ifdef INVARIANTS 11223 panic("Warning: Last msg marked incomplete, yet nothing left?"); 11224 #else 11225 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n"); 11226 strm->last_msg_incomplete = 0; 11227 #endif 11228 goto do_a_copy_in; 11229 11230 } 11231 } 11232 while (uio->uio_resid > 0) { 11233 /* How much room do we have? */ 11234 struct mbuf *new_tail, *mm; 11235 11236 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 11237 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 11238 else 11239 max_len = 0; 11240 11241 if ((max_len > sctp_add_more_threshold) || 11242 (uio->uio_resid && (uio->uio_resid < max_len))) { 11243 sndout = 0; 11244 new_tail = NULL; 11245 if (hold_tcblock) { 11246 SCTP_TCB_UNLOCK(stcb); 11247 hold_tcblock = 0; 11248 } 11249 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail); 11250 if ((mm == NULL) || error) { 11251 if (mm) { 11252 sctp_m_freem(mm); 11253 } 11254 goto out; 11255 } 11256 /* Update the mbuf and count */ 11257 SCTP_TCB_SEND_LOCK(stcb); 11258 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 11259 /* 11260 * we need to get out. Peer probably 11261 * aborted. 11262 */ 11263 sctp_m_freem(mm); 11264 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) 11265 error = ECONNRESET; 11266 SCTP_TCB_SEND_UNLOCK(stcb); 11267 goto out; 11268 } 11269 if (sp->tail_mbuf) { 11270 /* tack it to the end */ 11271 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 11272 sp->tail_mbuf = new_tail; 11273 } else { 11274 /* A stolen mbuf */ 11275 sp->data = mm; 11276 sp->tail_mbuf = new_tail; 11277 } 11278 sctp_snd_sb_alloc(stcb, sndout); 11279 atomic_add_int(&sp->length, sndout); 11280 len += sndout; 11281 11282 /* Did we reach EOR? */ 11283 if ((uio->uio_resid == 0) && 11284 ((user_marks_eor == 0) || 11285 (srcv->sinfo_flags & SCTP_EOF) || 11286 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 11287 ) { 11288 sp->msg_is_complete = 1; 11289 } else { 11290 sp->msg_is_complete = 0; 11291 } 11292 SCTP_TCB_SEND_UNLOCK(stcb); 11293 } 11294 if (uio->uio_resid == 0) { 11295 /* got it all? */ 11296 continue; 11297 } 11298 /* PR-SCTP? */ 11299 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) { 11300 /* 11301 * This is ugly but we must assure locking 11302 * order 11303 */ 11304 if (hold_tcblock == 0) { 11305 SCTP_TCB_LOCK(stcb); 11306 hold_tcblock = 1; 11307 } 11308 sctp_prune_prsctp(stcb, asoc, srcv, sndlen); 11309 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 11310 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 11311 else 11312 max_len = 0; 11313 if (max_len > 0) { 11314 continue; 11315 } 11316 SCTP_TCB_UNLOCK(stcb); 11317 hold_tcblock = 0; 11318 } 11319 /* wait for space now */ 11320 if (non_blocking) { 11321 /* Non-blocking io in place out */ 11322 goto skip_out_eof; 11323 } 11324 if ((net->flight_size > net->cwnd) && 11325 (sctp_cmt_on_off == 0)) { 11326 queue_only = 1; 11327 11328 } else if (asoc->ifp_had_enobuf) { 11329 SCTP_STAT_INCR(sctps_ifnomemqueued); 11330 if (net->flight_size > (net->mtu * 2)) { 11331 queue_only = 1; 11332 } else { 11333 queue_only = 0; 11334 } 11335 asoc->ifp_had_enobuf = 0; 11336 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 11337 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 11338 sizeof(struct sctp_data_chunk))); 11339 } else { 11340 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 11341 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 11342 sizeof(struct sctp_data_chunk))); 11343 if (net->flight_size > (net->mtu * stcb->asoc.max_burst)) { 11344 queue_only = 1; 11345 SCTP_STAT_INCR(sctps_send_burst_avoid); 11346 } else if (net->flight_size > net->cwnd) { 11347 queue_only = 1; 11348 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 11349 } else { 11350 queue_only = 0; 11351 } 11352 } 11353 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 11354 (stcb->asoc.total_flight > 0) && 11355 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 11356 ) { 11357 11358 /*- 11359 * Ok, Nagle is set on and we have data outstanding. 11360 * Don't send anything and let SACKs drive out the 11361 * data unless wen have a "full" segment to send. 11362 */ 11363 #ifdef SCTP_NAGLE_LOGGING 11364 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 11365 #endif 11366 SCTP_STAT_INCR(sctps_naglequeued); 11367 nagle_applies = 1; 11368 } else { 11369 #ifdef SCTP_NAGLE_LOGGING 11370 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 11371 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 11372 #endif 11373 SCTP_STAT_INCR(sctps_naglesent); 11374 nagle_applies = 0; 11375 } 11376 /* What about the INIT, send it maybe */ 11377 #ifdef SCTP_BLK_LOGGING 11378 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent); 11379 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, stcb->asoc.total_flight, 11380 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 11381 #endif 11382 if (queue_only_for_init) { 11383 if (hold_tcblock == 0) { 11384 SCTP_TCB_LOCK(stcb); 11385 hold_tcblock = 1; 11386 } 11387 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 11388 /* a collision took us forward? */ 11389 queue_only_for_init = 0; 11390 queue_only = 0; 11391 } else { 11392 sctp_send_initiate(inp, stcb); 11393 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 11394 queue_only_for_init = 0; 11395 queue_only = 1; 11396 } 11397 } 11398 if ((queue_only == 0) && (nagle_applies == 0) 11399 ) { 11400 /*- 11401 * need to start chunk output 11402 * before blocking.. note that if 11403 * a lock is already applied, then 11404 * the input via the net is happening 11405 * and I don't need to start output :-D 11406 */ 11407 if (hold_tcblock == 0) { 11408 if (SCTP_TCB_TRYLOCK(stcb)) { 11409 hold_tcblock = 1; 11410 sctp_chunk_output(inp, 11411 stcb, 11412 SCTP_OUTPUT_FROM_USR_SEND); 11413 11414 } 11415 } else { 11416 sctp_chunk_output(inp, 11417 stcb, 11418 SCTP_OUTPUT_FROM_USR_SEND); 11419 } 11420 if (hold_tcblock == 1) { 11421 SCTP_TCB_UNLOCK(stcb); 11422 hold_tcblock = 0; 11423 } 11424 } 11425 SOCKBUF_LOCK(&so->so_snd); 11426 /*- 11427 * This is a bit strange, but I think it will 11428 * work. The total_output_queue_size is locked and 11429 * protected by the TCB_LOCK, which we just released. 11430 * There is a race that can occur between releasing it 11431 * above, and me getting the socket lock, where sacks 11432 * come in but we have not put the SB_WAIT on the 11433 * so_snd buffer to get the wakeup. After the LOCK 11434 * is applied the sack_processing will also need to 11435 * LOCK the so->so_snd to do the actual sowwakeup(). So 11436 * once we have the socket buffer lock if we recheck the 11437 * size we KNOW we will get to sleep safely with the 11438 * wakeup flag in place. 11439 */ 11440 if (SCTP_SB_LIMIT_SND(so) < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) { 11441 #ifdef SCTP_BLK_LOGGING 11442 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 11443 so, asoc, uio->uio_resid); 11444 #endif 11445 be.error = 0; 11446 stcb->block_entry = &be; 11447 error = sbwait(&so->so_snd); 11448 stcb->block_entry = NULL; 11449 11450 if (error || so->so_error || be.error) { 11451 if (error == 0) { 11452 if (so->so_error) 11453 error = so->so_error; 11454 if (be.error) { 11455 error = be.error; 11456 } 11457 } 11458 SOCKBUF_UNLOCK(&so->so_snd); 11459 goto out_unlocked; 11460 } 11461 #ifdef SCTP_BLK_LOGGING 11462 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 11463 so, asoc, stcb->asoc.total_output_queue_size); 11464 #endif 11465 } 11466 SOCKBUF_UNLOCK(&so->so_snd); 11467 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 11468 goto out_unlocked; 11469 } 11470 } 11471 SCTP_TCB_SEND_LOCK(stcb); 11472 if (sp) { 11473 if (sp->msg_is_complete == 0) { 11474 strm->last_msg_incomplete = 1; 11475 asoc->stream_locked = 1; 11476 asoc->stream_locked_on = srcv->sinfo_stream; 11477 } else { 11478 sp->sender_all_done = 1; 11479 strm->last_msg_incomplete = 0; 11480 asoc->stream_locked = 0; 11481 } 11482 } else { 11483 SCTP_PRINTF("Huh no sp TSNH?\n"); 11484 strm->last_msg_incomplete = 0; 11485 asoc->stream_locked = 0; 11486 } 11487 SCTP_TCB_SEND_UNLOCK(stcb); 11488 if (uio->uio_resid == 0) { 11489 got_all_of_the_send = 1; 11490 } 11491 } else if (top) { 11492 /* We send in a 0, since we do NOT have any locks */ 11493 error = sctp_msg_append(stcb, net, top, srcv, 0); 11494 top = NULL; 11495 } 11496 if (error) { 11497 goto out; 11498 } 11499 dataless_eof: 11500 /* EOF thing ? */ 11501 if ((srcv->sinfo_flags & SCTP_EOF) && 11502 (got_all_of_the_send == 1) && 11503 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) 11504 ) { 11505 SCTP_STAT_INCR(sctps_sends_with_eof); 11506 error = 0; 11507 if (hold_tcblock == 0) { 11508 SCTP_TCB_LOCK(stcb); 11509 hold_tcblock = 1; 11510 } 11511 if (TAILQ_EMPTY(&asoc->send_queue) && 11512 TAILQ_EMPTY(&asoc->sent_queue) && 11513 (asoc->stream_queue_cnt == 0)) { 11514 if (asoc->locked_on_sending) { 11515 goto abort_anyway; 11516 } 11517 /* there is nothing queued to send, so I'm done... */ 11518 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 11519 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 11520 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 11521 /* only send SHUTDOWN the first time through */ 11522 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 11523 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 11524 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 11525 } 11526 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 11527 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 11528 asoc->primary_destination); 11529 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 11530 asoc->primary_destination); 11531 } 11532 } else { 11533 /*- 11534 * we still got (or just got) data to send, so set 11535 * SHUTDOWN_PENDING 11536 */ 11537 /*- 11538 * XXX sockets draft says that SCTP_EOF should be 11539 * sent with no data. currently, we will allow user 11540 * data to be sent first and move to 11541 * SHUTDOWN-PENDING 11542 */ 11543 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 11544 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 11545 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 11546 if (hold_tcblock == 0) { 11547 SCTP_TCB_LOCK(stcb); 11548 hold_tcblock = 1; 11549 } 11550 if (asoc->locked_on_sending) { 11551 /* Locked to send out the data */ 11552 struct sctp_stream_queue_pending *sp; 11553 11554 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 11555 if (sp) { 11556 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 11557 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 11558 } 11559 } 11560 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 11561 if (TAILQ_EMPTY(&asoc->send_queue) && 11562 TAILQ_EMPTY(&asoc->sent_queue) && 11563 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 11564 abort_anyway: 11565 if (free_cnt_applied) { 11566 atomic_add_int(&stcb->asoc.refcnt, -1); 11567 free_cnt_applied = 0; 11568 } 11569 sctp_abort_an_association(stcb->sctp_ep, stcb, 11570 SCTP_RESPONSE_TO_USER_REQ, 11571 NULL); 11572 /* 11573 * now relock the stcb so everything 11574 * is sane 11575 */ 11576 hold_tcblock = 0; 11577 stcb = NULL; 11578 goto out; 11579 } 11580 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 11581 asoc->primary_destination); 11582 } 11583 } 11584 } 11585 skip_out_eof: 11586 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 11587 some_on_control = 1; 11588 } 11589 if ((net->flight_size > net->cwnd) && 11590 (sctp_cmt_on_off == 0)) { 11591 queue_only = 1; 11592 } else if (asoc->ifp_had_enobuf) { 11593 SCTP_STAT_INCR(sctps_ifnomemqueued); 11594 if (net->flight_size > (net->mtu * 2)) { 11595 queue_only = 1; 11596 } else { 11597 queue_only = 0; 11598 } 11599 asoc->ifp_had_enobuf = 0; 11600 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 11601 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 11602 sizeof(struct sctp_data_chunk))); 11603 } else { 11604 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 11605 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 11606 sizeof(struct sctp_data_chunk))); 11607 if (net->flight_size > (net->mtu * stcb->asoc.max_burst)) { 11608 queue_only = 1; 11609 SCTP_STAT_INCR(sctps_send_burst_avoid); 11610 } else if (net->flight_size > net->cwnd) { 11611 queue_only = 1; 11612 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 11613 } else { 11614 queue_only = 0; 11615 } 11616 } 11617 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 11618 (stcb->asoc.total_flight > 0) && 11619 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 11620 ) { 11621 11622 /*- 11623 * Ok, Nagle is set on and we have data outstanding. 11624 * Don't send anything and let SACKs drive out the 11625 * data unless wen have a "full" segment to send. 11626 */ 11627 #ifdef SCTP_NAGLE_LOGGING 11628 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 11629 #endif 11630 SCTP_STAT_INCR(sctps_naglequeued); 11631 nagle_applies = 1; 11632 } else { 11633 #ifdef SCTP_NAGLE_LOGGING 11634 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 11635 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 11636 #endif 11637 SCTP_STAT_INCR(sctps_naglesent); 11638 nagle_applies = 0; 11639 } 11640 if (queue_only_for_init) { 11641 if (hold_tcblock == 0) { 11642 SCTP_TCB_LOCK(stcb); 11643 hold_tcblock = 1; 11644 } 11645 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 11646 /* a collision took us forward? */ 11647 queue_only_for_init = 0; 11648 queue_only = 0; 11649 } else { 11650 sctp_send_initiate(inp, stcb); 11651 if (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING) 11652 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT | 11653 SCTP_STATE_SHUTDOWN_PENDING; 11654 else 11655 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 11656 queue_only_for_init = 0; 11657 queue_only = 1; 11658 } 11659 } 11660 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { 11661 /* we can attempt to send too. */ 11662 if (hold_tcblock == 0) { 11663 /* 11664 * If there is activity recv'ing sacks no need to 11665 * send 11666 */ 11667 if (SCTP_TCB_TRYLOCK(stcb)) { 11668 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 11669 hold_tcblock = 1; 11670 } 11671 } else { 11672 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 11673 } 11674 } else if ((queue_only == 0) && 11675 (stcb->asoc.peers_rwnd == 0) && 11676 (stcb->asoc.total_flight == 0)) { 11677 /* We get to have a probe outstanding */ 11678 if (hold_tcblock == 0) { 11679 hold_tcblock = 1; 11680 SCTP_TCB_LOCK(stcb); 11681 } 11682 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 11683 } else if (some_on_control) { 11684 int num_out, reason, cwnd_full, frag_point; 11685 11686 /* Here we do control only */ 11687 if (hold_tcblock == 0) { 11688 hold_tcblock = 1; 11689 SCTP_TCB_LOCK(stcb); 11690 } 11691 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 11692 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 11693 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); 11694 } 11695 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n", 11696 queue_only, stcb->asoc.peers_rwnd, un_sent, 11697 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, 11698 stcb->asoc.total_output_queue_size); 11699 11700 out: 11701 out_unlocked: 11702 11703 if (create_lock_applied) { 11704 SCTP_ASOC_CREATE_UNLOCK(inp); 11705 create_lock_applied = 0; 11706 } 11707 if ((stcb) && hold_tcblock) { 11708 SCTP_TCB_UNLOCK(stcb); 11709 } 11710 if (stcb && free_cnt_applied) { 11711 atomic_add_int(&stcb->asoc.refcnt, -1); 11712 } 11713 #ifdef INVARIANTS 11714 if (stcb) { 11715 if (mtx_owned(&stcb->tcb_mtx)) { 11716 panic("Leaving with tcb mtx owned?"); 11717 } 11718 if (mtx_owned(&stcb->tcb_send_mtx)) { 11719 panic("Leaving with tcb send mtx owned?"); 11720 } 11721 } 11722 #endif 11723 if (top) { 11724 sctp_m_freem(top); 11725 } 11726 if (control) { 11727 sctp_m_freem(control); 11728 } 11729 return (error); 11730 } 11731 11732 11733 /* 11734 * generate an AUTHentication chunk, if required 11735 */ 11736 struct mbuf * 11737 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 11738 struct sctp_auth_chunk **auth_ret, uint32_t * offset, 11739 struct sctp_tcb *stcb, uint8_t chunk) 11740 { 11741 struct mbuf *m_auth; 11742 struct sctp_auth_chunk *auth; 11743 int chunk_len; 11744 11745 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 11746 (stcb == NULL)) 11747 return (m); 11748 11749 /* sysctl disabled auth? */ 11750 if (sctp_auth_disable) 11751 return (m); 11752 11753 /* peer doesn't do auth... */ 11754 if (!stcb->asoc.peer_supports_auth) { 11755 return (m); 11756 } 11757 /* does the requested chunk require auth? */ 11758 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 11759 return (m); 11760 } 11761 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER); 11762 if (m_auth == NULL) { 11763 /* no mbuf's */ 11764 return (m); 11765 } 11766 /* reserve some space if this will be the first mbuf */ 11767 if (m == NULL) 11768 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 11769 /* fill in the AUTH chunk details */ 11770 auth = mtod(m_auth, struct sctp_auth_chunk *); 11771 bzero(auth, sizeof(*auth)); 11772 auth->ch.chunk_type = SCTP_AUTHENTICATION; 11773 auth->ch.chunk_flags = 0; 11774 chunk_len = sizeof(*auth) + 11775 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 11776 auth->ch.chunk_length = htons(chunk_len); 11777 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 11778 /* key id and hmac digest will be computed and filled in upon send */ 11779 11780 /* save the offset where the auth was inserted into the chain */ 11781 if (m != NULL) { 11782 struct mbuf *cn; 11783 11784 *offset = 0; 11785 cn = m; 11786 while (cn) { 11787 *offset += SCTP_BUF_LEN(cn); 11788 cn = SCTP_BUF_NEXT(cn); 11789 } 11790 } else 11791 *offset = 0; 11792 11793 /* update length and return pointer to the auth chunk */ 11794 SCTP_BUF_LEN(m_auth) = chunk_len; 11795 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 11796 if (auth_ret != NULL) 11797 *auth_ret = auth; 11798 11799 return (m); 11800 } 11801