1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <sys/proc.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_header.h> 40 #include <netinet/sctp_pcb.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_uio.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_auth.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_asconf.h> 48 #include <netinet/sctp_indata.h> 49 #include <netinet/sctp_bsd_addr.h> 50 51 #ifdef SCTP_DEBUG 52 extern uint32_t sctp_debug_on; 53 54 #endif 55 56 57 58 #define SCTP_MAX_GAPS_INARRAY 4 59 struct sack_track { 60 uint8_t right_edge; /* mergable on the right edge */ 61 uint8_t left_edge; /* mergable on the left edge */ 62 uint8_t num_entries; 63 uint8_t spare; 64 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 65 }; 66 67 struct sack_track sack_array[256] = { 68 {0, 0, 0, 0, /* 0x00 */ 69 {{0, 0}, 70 {0, 0}, 71 {0, 0}, 72 {0, 0} 73 } 74 }, 75 {1, 0, 1, 0, /* 0x01 */ 76 {{0, 0}, 77 {0, 0}, 78 {0, 0}, 79 {0, 0} 80 } 81 }, 82 {0, 0, 1, 0, /* 0x02 */ 83 {{1, 1}, 84 {0, 0}, 85 {0, 0}, 86 {0, 0} 87 } 88 }, 89 {1, 0, 1, 0, /* 0x03 */ 90 {{0, 1}, 91 {0, 0}, 92 {0, 0}, 93 {0, 0} 94 } 95 }, 96 {0, 0, 1, 0, /* 0x04 */ 97 {{2, 2}, 98 {0, 0}, 99 {0, 0}, 100 {0, 0} 101 } 102 }, 103 {1, 0, 2, 0, /* 0x05 */ 104 {{0, 0}, 105 {2, 2}, 106 {0, 0}, 107 {0, 0} 108 } 109 }, 110 {0, 0, 1, 0, /* 0x06 */ 111 {{1, 2}, 112 {0, 0}, 113 {0, 0}, 114 {0, 0} 115 } 116 }, 117 {1, 0, 1, 0, /* 0x07 */ 118 {{0, 2}, 119 {0, 0}, 120 {0, 0}, 121 {0, 0} 122 } 123 }, 124 {0, 0, 1, 0, /* 0x08 */ 125 {{3, 3}, 126 {0, 0}, 127 {0, 0}, 128 {0, 0} 129 } 130 }, 131 {1, 0, 2, 0, /* 0x09 */ 132 {{0, 0}, 133 {3, 3}, 134 {0, 0}, 135 {0, 0} 136 } 137 }, 138 {0, 0, 2, 0, /* 0x0a */ 139 {{1, 1}, 140 {3, 3}, 141 {0, 0}, 142 {0, 0} 143 } 144 }, 145 {1, 0, 2, 0, /* 0x0b */ 146 {{0, 1}, 147 {3, 3}, 148 {0, 0}, 149 {0, 0} 150 } 151 }, 152 {0, 0, 1, 0, /* 0x0c */ 153 {{2, 3}, 154 {0, 0}, 155 {0, 0}, 156 {0, 0} 157 } 158 }, 159 {1, 0, 2, 0, /* 0x0d */ 160 {{0, 0}, 161 {2, 3}, 162 {0, 0}, 163 {0, 0} 164 } 165 }, 166 {0, 0, 1, 0, /* 0x0e */ 167 {{1, 3}, 168 {0, 0}, 169 {0, 0}, 170 {0, 0} 171 } 172 }, 173 {1, 0, 1, 0, /* 0x0f */ 174 {{0, 3}, 175 {0, 0}, 176 {0, 0}, 177 {0, 0} 178 } 179 }, 180 {0, 0, 1, 0, /* 0x10 */ 181 {{4, 4}, 182 {0, 0}, 183 {0, 0}, 184 {0, 0} 185 } 186 }, 187 {1, 0, 2, 0, /* 0x11 */ 188 {{0, 0}, 189 {4, 4}, 190 {0, 0}, 191 {0, 0} 192 } 193 }, 194 {0, 0, 2, 0, /* 0x12 */ 195 {{1, 1}, 196 {4, 4}, 197 {0, 0}, 198 {0, 0} 199 } 200 }, 201 {1, 0, 2, 0, /* 0x13 */ 202 {{0, 1}, 203 {4, 4}, 204 {0, 0}, 205 {0, 0} 206 } 207 }, 208 {0, 0, 2, 0, /* 0x14 */ 209 {{2, 2}, 210 {4, 4}, 211 {0, 0}, 212 {0, 0} 213 } 214 }, 215 {1, 0, 3, 0, /* 0x15 */ 216 {{0, 0}, 217 {2, 2}, 218 {4, 4}, 219 {0, 0} 220 } 221 }, 222 {0, 0, 2, 0, /* 0x16 */ 223 {{1, 2}, 224 {4, 4}, 225 {0, 0}, 226 {0, 0} 227 } 228 }, 229 {1, 0, 2, 0, /* 0x17 */ 230 {{0, 2}, 231 {4, 4}, 232 {0, 0}, 233 {0, 0} 234 } 235 }, 236 {0, 0, 1, 0, /* 0x18 */ 237 {{3, 4}, 238 {0, 0}, 239 {0, 0}, 240 {0, 0} 241 } 242 }, 243 {1, 0, 2, 0, /* 0x19 */ 244 {{0, 0}, 245 {3, 4}, 246 {0, 0}, 247 {0, 0} 248 } 249 }, 250 {0, 0, 2, 0, /* 0x1a */ 251 {{1, 1}, 252 {3, 4}, 253 {0, 0}, 254 {0, 0} 255 } 256 }, 257 {1, 0, 2, 0, /* 0x1b */ 258 {{0, 1}, 259 {3, 4}, 260 {0, 0}, 261 {0, 0} 262 } 263 }, 264 {0, 0, 1, 0, /* 0x1c */ 265 {{2, 4}, 266 {0, 0}, 267 {0, 0}, 268 {0, 0} 269 } 270 }, 271 {1, 0, 2, 0, /* 0x1d */ 272 {{0, 0}, 273 {2, 4}, 274 {0, 0}, 275 {0, 0} 276 } 277 }, 278 {0, 0, 1, 0, /* 0x1e */ 279 {{1, 4}, 280 {0, 0}, 281 {0, 0}, 282 {0, 0} 283 } 284 }, 285 {1, 0, 1, 0, /* 0x1f */ 286 {{0, 4}, 287 {0, 0}, 288 {0, 0}, 289 {0, 0} 290 } 291 }, 292 {0, 0, 1, 0, /* 0x20 */ 293 {{5, 5}, 294 {0, 0}, 295 {0, 0}, 296 {0, 0} 297 } 298 }, 299 {1, 0, 2, 0, /* 0x21 */ 300 {{0, 0}, 301 {5, 5}, 302 {0, 0}, 303 {0, 0} 304 } 305 }, 306 {0, 0, 2, 0, /* 0x22 */ 307 {{1, 1}, 308 {5, 5}, 309 {0, 0}, 310 {0, 0} 311 } 312 }, 313 {1, 0, 2, 0, /* 0x23 */ 314 {{0, 1}, 315 {5, 5}, 316 {0, 0}, 317 {0, 0} 318 } 319 }, 320 {0, 0, 2, 0, /* 0x24 */ 321 {{2, 2}, 322 {5, 5}, 323 {0, 0}, 324 {0, 0} 325 } 326 }, 327 {1, 0, 3, 0, /* 0x25 */ 328 {{0, 0}, 329 {2, 2}, 330 {5, 5}, 331 {0, 0} 332 } 333 }, 334 {0, 0, 2, 0, /* 0x26 */ 335 {{1, 2}, 336 {5, 5}, 337 {0, 0}, 338 {0, 0} 339 } 340 }, 341 {1, 0, 2, 0, /* 0x27 */ 342 {{0, 2}, 343 {5, 5}, 344 {0, 0}, 345 {0, 0} 346 } 347 }, 348 {0, 0, 2, 0, /* 0x28 */ 349 {{3, 3}, 350 {5, 5}, 351 {0, 0}, 352 {0, 0} 353 } 354 }, 355 {1, 0, 3, 0, /* 0x29 */ 356 {{0, 0}, 357 {3, 3}, 358 {5, 5}, 359 {0, 0} 360 } 361 }, 362 {0, 0, 3, 0, /* 0x2a */ 363 {{1, 1}, 364 {3, 3}, 365 {5, 5}, 366 {0, 0} 367 } 368 }, 369 {1, 0, 3, 0, /* 0x2b */ 370 {{0, 1}, 371 {3, 3}, 372 {5, 5}, 373 {0, 0} 374 } 375 }, 376 {0, 0, 2, 0, /* 0x2c */ 377 {{2, 3}, 378 {5, 5}, 379 {0, 0}, 380 {0, 0} 381 } 382 }, 383 {1, 0, 3, 0, /* 0x2d */ 384 {{0, 0}, 385 {2, 3}, 386 {5, 5}, 387 {0, 0} 388 } 389 }, 390 {0, 0, 2, 0, /* 0x2e */ 391 {{1, 3}, 392 {5, 5}, 393 {0, 0}, 394 {0, 0} 395 } 396 }, 397 {1, 0, 2, 0, /* 0x2f */ 398 {{0, 3}, 399 {5, 5}, 400 {0, 0}, 401 {0, 0} 402 } 403 }, 404 {0, 0, 1, 0, /* 0x30 */ 405 {{4, 5}, 406 {0, 0}, 407 {0, 0}, 408 {0, 0} 409 } 410 }, 411 {1, 0, 2, 0, /* 0x31 */ 412 {{0, 0}, 413 {4, 5}, 414 {0, 0}, 415 {0, 0} 416 } 417 }, 418 {0, 0, 2, 0, /* 0x32 */ 419 {{1, 1}, 420 {4, 5}, 421 {0, 0}, 422 {0, 0} 423 } 424 }, 425 {1, 0, 2, 0, /* 0x33 */ 426 {{0, 1}, 427 {4, 5}, 428 {0, 0}, 429 {0, 0} 430 } 431 }, 432 {0, 0, 2, 0, /* 0x34 */ 433 {{2, 2}, 434 {4, 5}, 435 {0, 0}, 436 {0, 0} 437 } 438 }, 439 {1, 0, 3, 0, /* 0x35 */ 440 {{0, 0}, 441 {2, 2}, 442 {4, 5}, 443 {0, 0} 444 } 445 }, 446 {0, 0, 2, 0, /* 0x36 */ 447 {{1, 2}, 448 {4, 5}, 449 {0, 0}, 450 {0, 0} 451 } 452 }, 453 {1, 0, 2, 0, /* 0x37 */ 454 {{0, 2}, 455 {4, 5}, 456 {0, 0}, 457 {0, 0} 458 } 459 }, 460 {0, 0, 1, 0, /* 0x38 */ 461 {{3, 5}, 462 {0, 0}, 463 {0, 0}, 464 {0, 0} 465 } 466 }, 467 {1, 0, 2, 0, /* 0x39 */ 468 {{0, 0}, 469 {3, 5}, 470 {0, 0}, 471 {0, 0} 472 } 473 }, 474 {0, 0, 2, 0, /* 0x3a */ 475 {{1, 1}, 476 {3, 5}, 477 {0, 0}, 478 {0, 0} 479 } 480 }, 481 {1, 0, 2, 0, /* 0x3b */ 482 {{0, 1}, 483 {3, 5}, 484 {0, 0}, 485 {0, 0} 486 } 487 }, 488 {0, 0, 1, 0, /* 0x3c */ 489 {{2, 5}, 490 {0, 0}, 491 {0, 0}, 492 {0, 0} 493 } 494 }, 495 {1, 0, 2, 0, /* 0x3d */ 496 {{0, 0}, 497 {2, 5}, 498 {0, 0}, 499 {0, 0} 500 } 501 }, 502 {0, 0, 1, 0, /* 0x3e */ 503 {{1, 5}, 504 {0, 0}, 505 {0, 0}, 506 {0, 0} 507 } 508 }, 509 {1, 0, 1, 0, /* 0x3f */ 510 {{0, 5}, 511 {0, 0}, 512 {0, 0}, 513 {0, 0} 514 } 515 }, 516 {0, 0, 1, 0, /* 0x40 */ 517 {{6, 6}, 518 {0, 0}, 519 {0, 0}, 520 {0, 0} 521 } 522 }, 523 {1, 0, 2, 0, /* 0x41 */ 524 {{0, 0}, 525 {6, 6}, 526 {0, 0}, 527 {0, 0} 528 } 529 }, 530 {0, 0, 2, 0, /* 0x42 */ 531 {{1, 1}, 532 {6, 6}, 533 {0, 0}, 534 {0, 0} 535 } 536 }, 537 {1, 0, 2, 0, /* 0x43 */ 538 {{0, 1}, 539 {6, 6}, 540 {0, 0}, 541 {0, 0} 542 } 543 }, 544 {0, 0, 2, 0, /* 0x44 */ 545 {{2, 2}, 546 {6, 6}, 547 {0, 0}, 548 {0, 0} 549 } 550 }, 551 {1, 0, 3, 0, /* 0x45 */ 552 {{0, 0}, 553 {2, 2}, 554 {6, 6}, 555 {0, 0} 556 } 557 }, 558 {0, 0, 2, 0, /* 0x46 */ 559 {{1, 2}, 560 {6, 6}, 561 {0, 0}, 562 {0, 0} 563 } 564 }, 565 {1, 0, 2, 0, /* 0x47 */ 566 {{0, 2}, 567 {6, 6}, 568 {0, 0}, 569 {0, 0} 570 } 571 }, 572 {0, 0, 2, 0, /* 0x48 */ 573 {{3, 3}, 574 {6, 6}, 575 {0, 0}, 576 {0, 0} 577 } 578 }, 579 {1, 0, 3, 0, /* 0x49 */ 580 {{0, 0}, 581 {3, 3}, 582 {6, 6}, 583 {0, 0} 584 } 585 }, 586 {0, 0, 3, 0, /* 0x4a */ 587 {{1, 1}, 588 {3, 3}, 589 {6, 6}, 590 {0, 0} 591 } 592 }, 593 {1, 0, 3, 0, /* 0x4b */ 594 {{0, 1}, 595 {3, 3}, 596 {6, 6}, 597 {0, 0} 598 } 599 }, 600 {0, 0, 2, 0, /* 0x4c */ 601 {{2, 3}, 602 {6, 6}, 603 {0, 0}, 604 {0, 0} 605 } 606 }, 607 {1, 0, 3, 0, /* 0x4d */ 608 {{0, 0}, 609 {2, 3}, 610 {6, 6}, 611 {0, 0} 612 } 613 }, 614 {0, 0, 2, 0, /* 0x4e */ 615 {{1, 3}, 616 {6, 6}, 617 {0, 0}, 618 {0, 0} 619 } 620 }, 621 {1, 0, 2, 0, /* 0x4f */ 622 {{0, 3}, 623 {6, 6}, 624 {0, 0}, 625 {0, 0} 626 } 627 }, 628 {0, 0, 2, 0, /* 0x50 */ 629 {{4, 4}, 630 {6, 6}, 631 {0, 0}, 632 {0, 0} 633 } 634 }, 635 {1, 0, 3, 0, /* 0x51 */ 636 {{0, 0}, 637 {4, 4}, 638 {6, 6}, 639 {0, 0} 640 } 641 }, 642 {0, 0, 3, 0, /* 0x52 */ 643 {{1, 1}, 644 {4, 4}, 645 {6, 6}, 646 {0, 0} 647 } 648 }, 649 {1, 0, 3, 0, /* 0x53 */ 650 {{0, 1}, 651 {4, 4}, 652 {6, 6}, 653 {0, 0} 654 } 655 }, 656 {0, 0, 3, 0, /* 0x54 */ 657 {{2, 2}, 658 {4, 4}, 659 {6, 6}, 660 {0, 0} 661 } 662 }, 663 {1, 0, 4, 0, /* 0x55 */ 664 {{0, 0}, 665 {2, 2}, 666 {4, 4}, 667 {6, 6} 668 } 669 }, 670 {0, 0, 3, 0, /* 0x56 */ 671 {{1, 2}, 672 {4, 4}, 673 {6, 6}, 674 {0, 0} 675 } 676 }, 677 {1, 0, 3, 0, /* 0x57 */ 678 {{0, 2}, 679 {4, 4}, 680 {6, 6}, 681 {0, 0} 682 } 683 }, 684 {0, 0, 2, 0, /* 0x58 */ 685 {{3, 4}, 686 {6, 6}, 687 {0, 0}, 688 {0, 0} 689 } 690 }, 691 {1, 0, 3, 0, /* 0x59 */ 692 {{0, 0}, 693 {3, 4}, 694 {6, 6}, 695 {0, 0} 696 } 697 }, 698 {0, 0, 3, 0, /* 0x5a */ 699 {{1, 1}, 700 {3, 4}, 701 {6, 6}, 702 {0, 0} 703 } 704 }, 705 {1, 0, 3, 0, /* 0x5b */ 706 {{0, 1}, 707 {3, 4}, 708 {6, 6}, 709 {0, 0} 710 } 711 }, 712 {0, 0, 2, 0, /* 0x5c */ 713 {{2, 4}, 714 {6, 6}, 715 {0, 0}, 716 {0, 0} 717 } 718 }, 719 {1, 0, 3, 0, /* 0x5d */ 720 {{0, 0}, 721 {2, 4}, 722 {6, 6}, 723 {0, 0} 724 } 725 }, 726 {0, 0, 2, 0, /* 0x5e */ 727 {{1, 4}, 728 {6, 6}, 729 {0, 0}, 730 {0, 0} 731 } 732 }, 733 {1, 0, 2, 0, /* 0x5f */ 734 {{0, 4}, 735 {6, 6}, 736 {0, 0}, 737 {0, 0} 738 } 739 }, 740 {0, 0, 1, 0, /* 0x60 */ 741 {{5, 6}, 742 {0, 0}, 743 {0, 0}, 744 {0, 0} 745 } 746 }, 747 {1, 0, 2, 0, /* 0x61 */ 748 {{0, 0}, 749 {5, 6}, 750 {0, 0}, 751 {0, 0} 752 } 753 }, 754 {0, 0, 2, 0, /* 0x62 */ 755 {{1, 1}, 756 {5, 6}, 757 {0, 0}, 758 {0, 0} 759 } 760 }, 761 {1, 0, 2, 0, /* 0x63 */ 762 {{0, 1}, 763 {5, 6}, 764 {0, 0}, 765 {0, 0} 766 } 767 }, 768 {0, 0, 2, 0, /* 0x64 */ 769 {{2, 2}, 770 {5, 6}, 771 {0, 0}, 772 {0, 0} 773 } 774 }, 775 {1, 0, 3, 0, /* 0x65 */ 776 {{0, 0}, 777 {2, 2}, 778 {5, 6}, 779 {0, 0} 780 } 781 }, 782 {0, 0, 2, 0, /* 0x66 */ 783 {{1, 2}, 784 {5, 6}, 785 {0, 0}, 786 {0, 0} 787 } 788 }, 789 {1, 0, 2, 0, /* 0x67 */ 790 {{0, 2}, 791 {5, 6}, 792 {0, 0}, 793 {0, 0} 794 } 795 }, 796 {0, 0, 2, 0, /* 0x68 */ 797 {{3, 3}, 798 {5, 6}, 799 {0, 0}, 800 {0, 0} 801 } 802 }, 803 {1, 0, 3, 0, /* 0x69 */ 804 {{0, 0}, 805 {3, 3}, 806 {5, 6}, 807 {0, 0} 808 } 809 }, 810 {0, 0, 3, 0, /* 0x6a */ 811 {{1, 1}, 812 {3, 3}, 813 {5, 6}, 814 {0, 0} 815 } 816 }, 817 {1, 0, 3, 0, /* 0x6b */ 818 {{0, 1}, 819 {3, 3}, 820 {5, 6}, 821 {0, 0} 822 } 823 }, 824 {0, 0, 2, 0, /* 0x6c */ 825 {{2, 3}, 826 {5, 6}, 827 {0, 0}, 828 {0, 0} 829 } 830 }, 831 {1, 0, 3, 0, /* 0x6d */ 832 {{0, 0}, 833 {2, 3}, 834 {5, 6}, 835 {0, 0} 836 } 837 }, 838 {0, 0, 2, 0, /* 0x6e */ 839 {{1, 3}, 840 {5, 6}, 841 {0, 0}, 842 {0, 0} 843 } 844 }, 845 {1, 0, 2, 0, /* 0x6f */ 846 {{0, 3}, 847 {5, 6}, 848 {0, 0}, 849 {0, 0} 850 } 851 }, 852 {0, 0, 1, 0, /* 0x70 */ 853 {{4, 6}, 854 {0, 0}, 855 {0, 0}, 856 {0, 0} 857 } 858 }, 859 {1, 0, 2, 0, /* 0x71 */ 860 {{0, 0}, 861 {4, 6}, 862 {0, 0}, 863 {0, 0} 864 } 865 }, 866 {0, 0, 2, 0, /* 0x72 */ 867 {{1, 1}, 868 {4, 6}, 869 {0, 0}, 870 {0, 0} 871 } 872 }, 873 {1, 0, 2, 0, /* 0x73 */ 874 {{0, 1}, 875 {4, 6}, 876 {0, 0}, 877 {0, 0} 878 } 879 }, 880 {0, 0, 2, 0, /* 0x74 */ 881 {{2, 2}, 882 {4, 6}, 883 {0, 0}, 884 {0, 0} 885 } 886 }, 887 {1, 0, 3, 0, /* 0x75 */ 888 {{0, 0}, 889 {2, 2}, 890 {4, 6}, 891 {0, 0} 892 } 893 }, 894 {0, 0, 2, 0, /* 0x76 */ 895 {{1, 2}, 896 {4, 6}, 897 {0, 0}, 898 {0, 0} 899 } 900 }, 901 {1, 0, 2, 0, /* 0x77 */ 902 {{0, 2}, 903 {4, 6}, 904 {0, 0}, 905 {0, 0} 906 } 907 }, 908 {0, 0, 1, 0, /* 0x78 */ 909 {{3, 6}, 910 {0, 0}, 911 {0, 0}, 912 {0, 0} 913 } 914 }, 915 {1, 0, 2, 0, /* 0x79 */ 916 {{0, 0}, 917 {3, 6}, 918 {0, 0}, 919 {0, 0} 920 } 921 }, 922 {0, 0, 2, 0, /* 0x7a */ 923 {{1, 1}, 924 {3, 6}, 925 {0, 0}, 926 {0, 0} 927 } 928 }, 929 {1, 0, 2, 0, /* 0x7b */ 930 {{0, 1}, 931 {3, 6}, 932 {0, 0}, 933 {0, 0} 934 } 935 }, 936 {0, 0, 1, 0, /* 0x7c */ 937 {{2, 6}, 938 {0, 0}, 939 {0, 0}, 940 {0, 0} 941 } 942 }, 943 {1, 0, 2, 0, /* 0x7d */ 944 {{0, 0}, 945 {2, 6}, 946 {0, 0}, 947 {0, 0} 948 } 949 }, 950 {0, 0, 1, 0, /* 0x7e */ 951 {{1, 6}, 952 {0, 0}, 953 {0, 0}, 954 {0, 0} 955 } 956 }, 957 {1, 0, 1, 0, /* 0x7f */ 958 {{0, 6}, 959 {0, 0}, 960 {0, 0}, 961 {0, 0} 962 } 963 }, 964 {0, 1, 1, 0, /* 0x80 */ 965 {{7, 7}, 966 {0, 0}, 967 {0, 0}, 968 {0, 0} 969 } 970 }, 971 {1, 1, 2, 0, /* 0x81 */ 972 {{0, 0}, 973 {7, 7}, 974 {0, 0}, 975 {0, 0} 976 } 977 }, 978 {0, 1, 2, 0, /* 0x82 */ 979 {{1, 1}, 980 {7, 7}, 981 {0, 0}, 982 {0, 0} 983 } 984 }, 985 {1, 1, 2, 0, /* 0x83 */ 986 {{0, 1}, 987 {7, 7}, 988 {0, 0}, 989 {0, 0} 990 } 991 }, 992 {0, 1, 2, 0, /* 0x84 */ 993 {{2, 2}, 994 {7, 7}, 995 {0, 0}, 996 {0, 0} 997 } 998 }, 999 {1, 1, 3, 0, /* 0x85 */ 1000 {{0, 0}, 1001 {2, 2}, 1002 {7, 7}, 1003 {0, 0} 1004 } 1005 }, 1006 {0, 1, 2, 0, /* 0x86 */ 1007 {{1, 2}, 1008 {7, 7}, 1009 {0, 0}, 1010 {0, 0} 1011 } 1012 }, 1013 {1, 1, 2, 0, /* 0x87 */ 1014 {{0, 2}, 1015 {7, 7}, 1016 {0, 0}, 1017 {0, 0} 1018 } 1019 }, 1020 {0, 1, 2, 0, /* 0x88 */ 1021 {{3, 3}, 1022 {7, 7}, 1023 {0, 0}, 1024 {0, 0} 1025 } 1026 }, 1027 {1, 1, 3, 0, /* 0x89 */ 1028 {{0, 0}, 1029 {3, 3}, 1030 {7, 7}, 1031 {0, 0} 1032 } 1033 }, 1034 {0, 1, 3, 0, /* 0x8a */ 1035 {{1, 1}, 1036 {3, 3}, 1037 {7, 7}, 1038 {0, 0} 1039 } 1040 }, 1041 {1, 1, 3, 0, /* 0x8b */ 1042 {{0, 1}, 1043 {3, 3}, 1044 {7, 7}, 1045 {0, 0} 1046 } 1047 }, 1048 {0, 1, 2, 0, /* 0x8c */ 1049 {{2, 3}, 1050 {7, 7}, 1051 {0, 0}, 1052 {0, 0} 1053 } 1054 }, 1055 {1, 1, 3, 0, /* 0x8d */ 1056 {{0, 0}, 1057 {2, 3}, 1058 {7, 7}, 1059 {0, 0} 1060 } 1061 }, 1062 {0, 1, 2, 0, /* 0x8e */ 1063 {{1, 3}, 1064 {7, 7}, 1065 {0, 0}, 1066 {0, 0} 1067 } 1068 }, 1069 {1, 1, 2, 0, /* 0x8f */ 1070 {{0, 3}, 1071 {7, 7}, 1072 {0, 0}, 1073 {0, 0} 1074 } 1075 }, 1076 {0, 1, 2, 0, /* 0x90 */ 1077 {{4, 4}, 1078 {7, 7}, 1079 {0, 0}, 1080 {0, 0} 1081 } 1082 }, 1083 {1, 1, 3, 0, /* 0x91 */ 1084 {{0, 0}, 1085 {4, 4}, 1086 {7, 7}, 1087 {0, 0} 1088 } 1089 }, 1090 {0, 1, 3, 0, /* 0x92 */ 1091 {{1, 1}, 1092 {4, 4}, 1093 {7, 7}, 1094 {0, 0} 1095 } 1096 }, 1097 {1, 1, 3, 0, /* 0x93 */ 1098 {{0, 1}, 1099 {4, 4}, 1100 {7, 7}, 1101 {0, 0} 1102 } 1103 }, 1104 {0, 1, 3, 0, /* 0x94 */ 1105 {{2, 2}, 1106 {4, 4}, 1107 {7, 7}, 1108 {0, 0} 1109 } 1110 }, 1111 {1, 1, 4, 0, /* 0x95 */ 1112 {{0, 0}, 1113 {2, 2}, 1114 {4, 4}, 1115 {7, 7} 1116 } 1117 }, 1118 {0, 1, 3, 0, /* 0x96 */ 1119 {{1, 2}, 1120 {4, 4}, 1121 {7, 7}, 1122 {0, 0} 1123 } 1124 }, 1125 {1, 1, 3, 0, /* 0x97 */ 1126 {{0, 2}, 1127 {4, 4}, 1128 {7, 7}, 1129 {0, 0} 1130 } 1131 }, 1132 {0, 1, 2, 0, /* 0x98 */ 1133 {{3, 4}, 1134 {7, 7}, 1135 {0, 0}, 1136 {0, 0} 1137 } 1138 }, 1139 {1, 1, 3, 0, /* 0x99 */ 1140 {{0, 0}, 1141 {3, 4}, 1142 {7, 7}, 1143 {0, 0} 1144 } 1145 }, 1146 {0, 1, 3, 0, /* 0x9a */ 1147 {{1, 1}, 1148 {3, 4}, 1149 {7, 7}, 1150 {0, 0} 1151 } 1152 }, 1153 {1, 1, 3, 0, /* 0x9b */ 1154 {{0, 1}, 1155 {3, 4}, 1156 {7, 7}, 1157 {0, 0} 1158 } 1159 }, 1160 {0, 1, 2, 0, /* 0x9c */ 1161 {{2, 4}, 1162 {7, 7}, 1163 {0, 0}, 1164 {0, 0} 1165 } 1166 }, 1167 {1, 1, 3, 0, /* 0x9d */ 1168 {{0, 0}, 1169 {2, 4}, 1170 {7, 7}, 1171 {0, 0} 1172 } 1173 }, 1174 {0, 1, 2, 0, /* 0x9e */ 1175 {{1, 4}, 1176 {7, 7}, 1177 {0, 0}, 1178 {0, 0} 1179 } 1180 }, 1181 {1, 1, 2, 0, /* 0x9f */ 1182 {{0, 4}, 1183 {7, 7}, 1184 {0, 0}, 1185 {0, 0} 1186 } 1187 }, 1188 {0, 1, 2, 0, /* 0xa0 */ 1189 {{5, 5}, 1190 {7, 7}, 1191 {0, 0}, 1192 {0, 0} 1193 } 1194 }, 1195 {1, 1, 3, 0, /* 0xa1 */ 1196 {{0, 0}, 1197 {5, 5}, 1198 {7, 7}, 1199 {0, 0} 1200 } 1201 }, 1202 {0, 1, 3, 0, /* 0xa2 */ 1203 {{1, 1}, 1204 {5, 5}, 1205 {7, 7}, 1206 {0, 0} 1207 } 1208 }, 1209 {1, 1, 3, 0, /* 0xa3 */ 1210 {{0, 1}, 1211 {5, 5}, 1212 {7, 7}, 1213 {0, 0} 1214 } 1215 }, 1216 {0, 1, 3, 0, /* 0xa4 */ 1217 {{2, 2}, 1218 {5, 5}, 1219 {7, 7}, 1220 {0, 0} 1221 } 1222 }, 1223 {1, 1, 4, 0, /* 0xa5 */ 1224 {{0, 0}, 1225 {2, 2}, 1226 {5, 5}, 1227 {7, 7} 1228 } 1229 }, 1230 {0, 1, 3, 0, /* 0xa6 */ 1231 {{1, 2}, 1232 {5, 5}, 1233 {7, 7}, 1234 {0, 0} 1235 } 1236 }, 1237 {1, 1, 3, 0, /* 0xa7 */ 1238 {{0, 2}, 1239 {5, 5}, 1240 {7, 7}, 1241 {0, 0} 1242 } 1243 }, 1244 {0, 1, 3, 0, /* 0xa8 */ 1245 {{3, 3}, 1246 {5, 5}, 1247 {7, 7}, 1248 {0, 0} 1249 } 1250 }, 1251 {1, 1, 4, 0, /* 0xa9 */ 1252 {{0, 0}, 1253 {3, 3}, 1254 {5, 5}, 1255 {7, 7} 1256 } 1257 }, 1258 {0, 1, 4, 0, /* 0xaa */ 1259 {{1, 1}, 1260 {3, 3}, 1261 {5, 5}, 1262 {7, 7} 1263 } 1264 }, 1265 {1, 1, 4, 0, /* 0xab */ 1266 {{0, 1}, 1267 {3, 3}, 1268 {5, 5}, 1269 {7, 7} 1270 } 1271 }, 1272 {0, 1, 3, 0, /* 0xac */ 1273 {{2, 3}, 1274 {5, 5}, 1275 {7, 7}, 1276 {0, 0} 1277 } 1278 }, 1279 {1, 1, 4, 0, /* 0xad */ 1280 {{0, 0}, 1281 {2, 3}, 1282 {5, 5}, 1283 {7, 7} 1284 } 1285 }, 1286 {0, 1, 3, 0, /* 0xae */ 1287 {{1, 3}, 1288 {5, 5}, 1289 {7, 7}, 1290 {0, 0} 1291 } 1292 }, 1293 {1, 1, 3, 0, /* 0xaf */ 1294 {{0, 3}, 1295 {5, 5}, 1296 {7, 7}, 1297 {0, 0} 1298 } 1299 }, 1300 {0, 1, 2, 0, /* 0xb0 */ 1301 {{4, 5}, 1302 {7, 7}, 1303 {0, 0}, 1304 {0, 0} 1305 } 1306 }, 1307 {1, 1, 3, 0, /* 0xb1 */ 1308 {{0, 0}, 1309 {4, 5}, 1310 {7, 7}, 1311 {0, 0} 1312 } 1313 }, 1314 {0, 1, 3, 0, /* 0xb2 */ 1315 {{1, 1}, 1316 {4, 5}, 1317 {7, 7}, 1318 {0, 0} 1319 } 1320 }, 1321 {1, 1, 3, 0, /* 0xb3 */ 1322 {{0, 1}, 1323 {4, 5}, 1324 {7, 7}, 1325 {0, 0} 1326 } 1327 }, 1328 {0, 1, 3, 0, /* 0xb4 */ 1329 {{2, 2}, 1330 {4, 5}, 1331 {7, 7}, 1332 {0, 0} 1333 } 1334 }, 1335 {1, 1, 4, 0, /* 0xb5 */ 1336 {{0, 0}, 1337 {2, 2}, 1338 {4, 5}, 1339 {7, 7} 1340 } 1341 }, 1342 {0, 1, 3, 0, /* 0xb6 */ 1343 {{1, 2}, 1344 {4, 5}, 1345 {7, 7}, 1346 {0, 0} 1347 } 1348 }, 1349 {1, 1, 3, 0, /* 0xb7 */ 1350 {{0, 2}, 1351 {4, 5}, 1352 {7, 7}, 1353 {0, 0} 1354 } 1355 }, 1356 {0, 1, 2, 0, /* 0xb8 */ 1357 {{3, 5}, 1358 {7, 7}, 1359 {0, 0}, 1360 {0, 0} 1361 } 1362 }, 1363 {1, 1, 3, 0, /* 0xb9 */ 1364 {{0, 0}, 1365 {3, 5}, 1366 {7, 7}, 1367 {0, 0} 1368 } 1369 }, 1370 {0, 1, 3, 0, /* 0xba */ 1371 {{1, 1}, 1372 {3, 5}, 1373 {7, 7}, 1374 {0, 0} 1375 } 1376 }, 1377 {1, 1, 3, 0, /* 0xbb */ 1378 {{0, 1}, 1379 {3, 5}, 1380 {7, 7}, 1381 {0, 0} 1382 } 1383 }, 1384 {0, 1, 2, 0, /* 0xbc */ 1385 {{2, 5}, 1386 {7, 7}, 1387 {0, 0}, 1388 {0, 0} 1389 } 1390 }, 1391 {1, 1, 3, 0, /* 0xbd */ 1392 {{0, 0}, 1393 {2, 5}, 1394 {7, 7}, 1395 {0, 0} 1396 } 1397 }, 1398 {0, 1, 2, 0, /* 0xbe */ 1399 {{1, 5}, 1400 {7, 7}, 1401 {0, 0}, 1402 {0, 0} 1403 } 1404 }, 1405 {1, 1, 2, 0, /* 0xbf */ 1406 {{0, 5}, 1407 {7, 7}, 1408 {0, 0}, 1409 {0, 0} 1410 } 1411 }, 1412 {0, 1, 1, 0, /* 0xc0 */ 1413 {{6, 7}, 1414 {0, 0}, 1415 {0, 0}, 1416 {0, 0} 1417 } 1418 }, 1419 {1, 1, 2, 0, /* 0xc1 */ 1420 {{0, 0}, 1421 {6, 7}, 1422 {0, 0}, 1423 {0, 0} 1424 } 1425 }, 1426 {0, 1, 2, 0, /* 0xc2 */ 1427 {{1, 1}, 1428 {6, 7}, 1429 {0, 0}, 1430 {0, 0} 1431 } 1432 }, 1433 {1, 1, 2, 0, /* 0xc3 */ 1434 {{0, 1}, 1435 {6, 7}, 1436 {0, 0}, 1437 {0, 0} 1438 } 1439 }, 1440 {0, 1, 2, 0, /* 0xc4 */ 1441 {{2, 2}, 1442 {6, 7}, 1443 {0, 0}, 1444 {0, 0} 1445 } 1446 }, 1447 {1, 1, 3, 0, /* 0xc5 */ 1448 {{0, 0}, 1449 {2, 2}, 1450 {6, 7}, 1451 {0, 0} 1452 } 1453 }, 1454 {0, 1, 2, 0, /* 0xc6 */ 1455 {{1, 2}, 1456 {6, 7}, 1457 {0, 0}, 1458 {0, 0} 1459 } 1460 }, 1461 {1, 1, 2, 0, /* 0xc7 */ 1462 {{0, 2}, 1463 {6, 7}, 1464 {0, 0}, 1465 {0, 0} 1466 } 1467 }, 1468 {0, 1, 2, 0, /* 0xc8 */ 1469 {{3, 3}, 1470 {6, 7}, 1471 {0, 0}, 1472 {0, 0} 1473 } 1474 }, 1475 {1, 1, 3, 0, /* 0xc9 */ 1476 {{0, 0}, 1477 {3, 3}, 1478 {6, 7}, 1479 {0, 0} 1480 } 1481 }, 1482 {0, 1, 3, 0, /* 0xca */ 1483 {{1, 1}, 1484 {3, 3}, 1485 {6, 7}, 1486 {0, 0} 1487 } 1488 }, 1489 {1, 1, 3, 0, /* 0xcb */ 1490 {{0, 1}, 1491 {3, 3}, 1492 {6, 7}, 1493 {0, 0} 1494 } 1495 }, 1496 {0, 1, 2, 0, /* 0xcc */ 1497 {{2, 3}, 1498 {6, 7}, 1499 {0, 0}, 1500 {0, 0} 1501 } 1502 }, 1503 {1, 1, 3, 0, /* 0xcd */ 1504 {{0, 0}, 1505 {2, 3}, 1506 {6, 7}, 1507 {0, 0} 1508 } 1509 }, 1510 {0, 1, 2, 0, /* 0xce */ 1511 {{1, 3}, 1512 {6, 7}, 1513 {0, 0}, 1514 {0, 0} 1515 } 1516 }, 1517 {1, 1, 2, 0, /* 0xcf */ 1518 {{0, 3}, 1519 {6, 7}, 1520 {0, 0}, 1521 {0, 0} 1522 } 1523 }, 1524 {0, 1, 2, 0, /* 0xd0 */ 1525 {{4, 4}, 1526 {6, 7}, 1527 {0, 0}, 1528 {0, 0} 1529 } 1530 }, 1531 {1, 1, 3, 0, /* 0xd1 */ 1532 {{0, 0}, 1533 {4, 4}, 1534 {6, 7}, 1535 {0, 0} 1536 } 1537 }, 1538 {0, 1, 3, 0, /* 0xd2 */ 1539 {{1, 1}, 1540 {4, 4}, 1541 {6, 7}, 1542 {0, 0} 1543 } 1544 }, 1545 {1, 1, 3, 0, /* 0xd3 */ 1546 {{0, 1}, 1547 {4, 4}, 1548 {6, 7}, 1549 {0, 0} 1550 } 1551 }, 1552 {0, 1, 3, 0, /* 0xd4 */ 1553 {{2, 2}, 1554 {4, 4}, 1555 {6, 7}, 1556 {0, 0} 1557 } 1558 }, 1559 {1, 1, 4, 0, /* 0xd5 */ 1560 {{0, 0}, 1561 {2, 2}, 1562 {4, 4}, 1563 {6, 7} 1564 } 1565 }, 1566 {0, 1, 3, 0, /* 0xd6 */ 1567 {{1, 2}, 1568 {4, 4}, 1569 {6, 7}, 1570 {0, 0} 1571 } 1572 }, 1573 {1, 1, 3, 0, /* 0xd7 */ 1574 {{0, 2}, 1575 {4, 4}, 1576 {6, 7}, 1577 {0, 0} 1578 } 1579 }, 1580 {0, 1, 2, 0, /* 0xd8 */ 1581 {{3, 4}, 1582 {6, 7}, 1583 {0, 0}, 1584 {0, 0} 1585 } 1586 }, 1587 {1, 1, 3, 0, /* 0xd9 */ 1588 {{0, 0}, 1589 {3, 4}, 1590 {6, 7}, 1591 {0, 0} 1592 } 1593 }, 1594 {0, 1, 3, 0, /* 0xda */ 1595 {{1, 1}, 1596 {3, 4}, 1597 {6, 7}, 1598 {0, 0} 1599 } 1600 }, 1601 {1, 1, 3, 0, /* 0xdb */ 1602 {{0, 1}, 1603 {3, 4}, 1604 {6, 7}, 1605 {0, 0} 1606 } 1607 }, 1608 {0, 1, 2, 0, /* 0xdc */ 1609 {{2, 4}, 1610 {6, 7}, 1611 {0, 0}, 1612 {0, 0} 1613 } 1614 }, 1615 {1, 1, 3, 0, /* 0xdd */ 1616 {{0, 0}, 1617 {2, 4}, 1618 {6, 7}, 1619 {0, 0} 1620 } 1621 }, 1622 {0, 1, 2, 0, /* 0xde */ 1623 {{1, 4}, 1624 {6, 7}, 1625 {0, 0}, 1626 {0, 0} 1627 } 1628 }, 1629 {1, 1, 2, 0, /* 0xdf */ 1630 {{0, 4}, 1631 {6, 7}, 1632 {0, 0}, 1633 {0, 0} 1634 } 1635 }, 1636 {0, 1, 1, 0, /* 0xe0 */ 1637 {{5, 7}, 1638 {0, 0}, 1639 {0, 0}, 1640 {0, 0} 1641 } 1642 }, 1643 {1, 1, 2, 0, /* 0xe1 */ 1644 {{0, 0}, 1645 {5, 7}, 1646 {0, 0}, 1647 {0, 0} 1648 } 1649 }, 1650 {0, 1, 2, 0, /* 0xe2 */ 1651 {{1, 1}, 1652 {5, 7}, 1653 {0, 0}, 1654 {0, 0} 1655 } 1656 }, 1657 {1, 1, 2, 0, /* 0xe3 */ 1658 {{0, 1}, 1659 {5, 7}, 1660 {0, 0}, 1661 {0, 0} 1662 } 1663 }, 1664 {0, 1, 2, 0, /* 0xe4 */ 1665 {{2, 2}, 1666 {5, 7}, 1667 {0, 0}, 1668 {0, 0} 1669 } 1670 }, 1671 {1, 1, 3, 0, /* 0xe5 */ 1672 {{0, 0}, 1673 {2, 2}, 1674 {5, 7}, 1675 {0, 0} 1676 } 1677 }, 1678 {0, 1, 2, 0, /* 0xe6 */ 1679 {{1, 2}, 1680 {5, 7}, 1681 {0, 0}, 1682 {0, 0} 1683 } 1684 }, 1685 {1, 1, 2, 0, /* 0xe7 */ 1686 {{0, 2}, 1687 {5, 7}, 1688 {0, 0}, 1689 {0, 0} 1690 } 1691 }, 1692 {0, 1, 2, 0, /* 0xe8 */ 1693 {{3, 3}, 1694 {5, 7}, 1695 {0, 0}, 1696 {0, 0} 1697 } 1698 }, 1699 {1, 1, 3, 0, /* 0xe9 */ 1700 {{0, 0}, 1701 {3, 3}, 1702 {5, 7}, 1703 {0, 0} 1704 } 1705 }, 1706 {0, 1, 3, 0, /* 0xea */ 1707 {{1, 1}, 1708 {3, 3}, 1709 {5, 7}, 1710 {0, 0} 1711 } 1712 }, 1713 {1, 1, 3, 0, /* 0xeb */ 1714 {{0, 1}, 1715 {3, 3}, 1716 {5, 7}, 1717 {0, 0} 1718 } 1719 }, 1720 {0, 1, 2, 0, /* 0xec */ 1721 {{2, 3}, 1722 {5, 7}, 1723 {0, 0}, 1724 {0, 0} 1725 } 1726 }, 1727 {1, 1, 3, 0, /* 0xed */ 1728 {{0, 0}, 1729 {2, 3}, 1730 {5, 7}, 1731 {0, 0} 1732 } 1733 }, 1734 {0, 1, 2, 0, /* 0xee */ 1735 {{1, 3}, 1736 {5, 7}, 1737 {0, 0}, 1738 {0, 0} 1739 } 1740 }, 1741 {1, 1, 2, 0, /* 0xef */ 1742 {{0, 3}, 1743 {5, 7}, 1744 {0, 0}, 1745 {0, 0} 1746 } 1747 }, 1748 {0, 1, 1, 0, /* 0xf0 */ 1749 {{4, 7}, 1750 {0, 0}, 1751 {0, 0}, 1752 {0, 0} 1753 } 1754 }, 1755 {1, 1, 2, 0, /* 0xf1 */ 1756 {{0, 0}, 1757 {4, 7}, 1758 {0, 0}, 1759 {0, 0} 1760 } 1761 }, 1762 {0, 1, 2, 0, /* 0xf2 */ 1763 {{1, 1}, 1764 {4, 7}, 1765 {0, 0}, 1766 {0, 0} 1767 } 1768 }, 1769 {1, 1, 2, 0, /* 0xf3 */ 1770 {{0, 1}, 1771 {4, 7}, 1772 {0, 0}, 1773 {0, 0} 1774 } 1775 }, 1776 {0, 1, 2, 0, /* 0xf4 */ 1777 {{2, 2}, 1778 {4, 7}, 1779 {0, 0}, 1780 {0, 0} 1781 } 1782 }, 1783 {1, 1, 3, 0, /* 0xf5 */ 1784 {{0, 0}, 1785 {2, 2}, 1786 {4, 7}, 1787 {0, 0} 1788 } 1789 }, 1790 {0, 1, 2, 0, /* 0xf6 */ 1791 {{1, 2}, 1792 {4, 7}, 1793 {0, 0}, 1794 {0, 0} 1795 } 1796 }, 1797 {1, 1, 2, 0, /* 0xf7 */ 1798 {{0, 2}, 1799 {4, 7}, 1800 {0, 0}, 1801 {0, 0} 1802 } 1803 }, 1804 {0, 1, 1, 0, /* 0xf8 */ 1805 {{3, 7}, 1806 {0, 0}, 1807 {0, 0}, 1808 {0, 0} 1809 } 1810 }, 1811 {1, 1, 2, 0, /* 0xf9 */ 1812 {{0, 0}, 1813 {3, 7}, 1814 {0, 0}, 1815 {0, 0} 1816 } 1817 }, 1818 {0, 1, 2, 0, /* 0xfa */ 1819 {{1, 1}, 1820 {3, 7}, 1821 {0, 0}, 1822 {0, 0} 1823 } 1824 }, 1825 {1, 1, 2, 0, /* 0xfb */ 1826 {{0, 1}, 1827 {3, 7}, 1828 {0, 0}, 1829 {0, 0} 1830 } 1831 }, 1832 {0, 1, 1, 0, /* 0xfc */ 1833 {{2, 7}, 1834 {0, 0}, 1835 {0, 0}, 1836 {0, 0} 1837 } 1838 }, 1839 {1, 1, 2, 0, /* 0xfd */ 1840 {{0, 0}, 1841 {2, 7}, 1842 {0, 0}, 1843 {0, 0} 1844 } 1845 }, 1846 {0, 1, 1, 0, /* 0xfe */ 1847 {{1, 7}, 1848 {0, 0}, 1849 {0, 0}, 1850 {0, 0} 1851 } 1852 }, 1853 {1, 1, 1, 0, /* 0xff */ 1854 {{0, 7}, 1855 {0, 0}, 1856 {0, 0}, 1857 {0, 0} 1858 } 1859 } 1860 }; 1861 1862 1863 1864 1865 extern int sctp_peer_chunk_oh; 1866 1867 static int 1868 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize) 1869 { 1870 struct cmsghdr cmh; 1871 int tlen, at; 1872 1873 tlen = SCTP_BUF_LEN(control); 1874 at = 0; 1875 /* 1876 * Independent of how many mbufs, find the c_type inside the control 1877 * structure and copy out the data. 1878 */ 1879 while (at < tlen) { 1880 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 1881 /* not enough room for one more we are done. */ 1882 return (0); 1883 } 1884 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 1885 if ((cmh.cmsg_len + at) > tlen) { 1886 /* 1887 * this is real messed up since there is not enough 1888 * data here to cover the cmsg header. We are done. 1889 */ 1890 return (0); 1891 } 1892 if ((cmh.cmsg_level == IPPROTO_SCTP) && 1893 (c_type == cmh.cmsg_type)) { 1894 /* found the one we want, copy it out */ 1895 at += CMSG_ALIGN(sizeof(struct cmsghdr)); 1896 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) { 1897 /* 1898 * space of cmsg_len after header not big 1899 * enough 1900 */ 1901 return (0); 1902 } 1903 m_copydata(control, at, cpsize, data); 1904 return (1); 1905 } else { 1906 at += CMSG_ALIGN(cmh.cmsg_len); 1907 if (cmh.cmsg_len == 0) { 1908 break; 1909 } 1910 } 1911 } 1912 /* not found */ 1913 return (0); 1914 } 1915 1916 1917 extern int sctp_mbuf_threshold_count; 1918 1919 1920 __inline struct mbuf * 1921 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, 1922 int how, int allonebuf, int type) 1923 { 1924 struct mbuf *m = NULL; 1925 int aloc_size; 1926 int index = 0; 1927 int mbuf_threshold; 1928 1929 if (want_header) { 1930 MGETHDR(m, how, type); 1931 } else { 1932 MGET(m, how, type); 1933 } 1934 if (m == NULL) { 1935 return (NULL); 1936 } 1937 if (allonebuf == 0) 1938 mbuf_threshold = sctp_mbuf_threshold_count; 1939 else 1940 mbuf_threshold = 1; 1941 1942 1943 if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) { 1944 try_again: 1945 index = 4; 1946 if (space_needed <= MCLBYTES) { 1947 aloc_size = MCLBYTES; 1948 } else if (space_needed <= MJUMPAGESIZE) { 1949 aloc_size = MJUMPAGESIZE; 1950 index = 5; 1951 } else if (space_needed <= MJUM9BYTES) { 1952 aloc_size = MJUM9BYTES; 1953 index = 6; 1954 } else { 1955 aloc_size = MJUM16BYTES; 1956 index = 7; 1957 } 1958 m_cljget(m, how, aloc_size); 1959 if (m == NULL) { 1960 return (NULL); 1961 } 1962 if (SCTP_BUF_IS_EXTENDED(m) == 0) { 1963 if ((aloc_size != MCLBYTES) && 1964 (allonebuf == 0)) { 1965 aloc_size -= 10; 1966 goto try_again; 1967 } 1968 sctp_m_freem(m); 1969 return (NULL); 1970 } 1971 } 1972 SCTP_BUF_LEN(m) = 0; 1973 SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL; 1974 #ifdef SCTP_MBUF_LOGGING 1975 if (SCTP_BUF_IS_EXTENDED(m)) { 1976 sctp_log_mb(m, SCTP_MBUF_IALLOC); 1977 } 1978 #endif 1979 return (m); 1980 } 1981 1982 1983 static struct mbuf * 1984 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset, 1985 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in) 1986 { 1987 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 1988 struct sctp_state_cookie *stc; 1989 struct sctp_paramhdr *ph; 1990 uint8_t *signature; 1991 int sig_offset; 1992 uint16_t cookie_sz; 1993 1994 mret = NULL; 1995 1996 1997 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 1998 sizeof(struct sctp_paramhdr)), 0, M_DONTWAIT, 1, MT_DATA); 1999 if (mret == NULL) { 2000 return (NULL); 2001 } 2002 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT); 2003 if (copy_init == NULL) { 2004 sctp_m_freem(mret); 2005 return (NULL); 2006 } 2007 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 2008 M_DONTWAIT); 2009 if (copy_initack == NULL) { 2010 sctp_m_freem(mret); 2011 sctp_m_freem(copy_init); 2012 return (NULL); 2013 } 2014 /* easy side we just drop it on the end */ 2015 ph = mtod(mret, struct sctp_paramhdr *); 2016 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 2017 sizeof(struct sctp_paramhdr); 2018 stc = (struct sctp_state_cookie *)((caddr_t)ph + 2019 sizeof(struct sctp_paramhdr)); 2020 ph->param_type = htons(SCTP_STATE_COOKIE); 2021 ph->param_length = 0; /* fill in at the end */ 2022 /* Fill in the stc cookie data */ 2023 *stc = *stc_in; 2024 2025 /* tack the INIT and then the INIT-ACK onto the chain */ 2026 cookie_sz = 0; 2027 m_at = mret; 2028 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2029 cookie_sz += SCTP_BUF_LEN(m_at); 2030 if (SCTP_BUF_NEXT(m_at) == NULL) { 2031 SCTP_BUF_NEXT(m_at) = copy_init; 2032 break; 2033 } 2034 } 2035 2036 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2037 cookie_sz += SCTP_BUF_LEN(m_at); 2038 if (SCTP_BUF_NEXT(m_at) == NULL) { 2039 SCTP_BUF_NEXT(m_at) = copy_initack; 2040 break; 2041 } 2042 } 2043 2044 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2045 cookie_sz += SCTP_BUF_LEN(m_at); 2046 if (SCTP_BUF_NEXT(m_at) == NULL) { 2047 break; 2048 } 2049 } 2050 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA); 2051 if (sig == NULL) { 2052 /* no space, so free the entire chain */ 2053 sctp_m_freem(mret); 2054 return (NULL); 2055 } 2056 SCTP_BUF_LEN(sig) = 0; 2057 SCTP_BUF_NEXT(m_at) = sig; 2058 sig_offset = 0; 2059 signature = (uint8_t *) (mtod(sig, caddr_t)+sig_offset); 2060 /* Time to sign the cookie */ 2061 sctp_hmac_m(SCTP_HMAC, 2062 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 2063 SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr), 2064 (uint8_t *) signature); 2065 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; 2066 cookie_sz += SCTP_SIGNATURE_SIZE; 2067 2068 ph->param_length = htons(cookie_sz); 2069 return (mret); 2070 } 2071 2072 2073 static __inline uint8_t 2074 sctp_get_ect(struct sctp_tcb *stcb, 2075 struct sctp_tmit_chunk *chk) 2076 { 2077 uint8_t this_random; 2078 2079 /* Huh? */ 2080 if (sctp_ecn_enable == 0) 2081 return (0); 2082 2083 if (sctp_ecn_nonce == 0) 2084 /* no nonce, always return ECT0 */ 2085 return (SCTP_ECT0_BIT); 2086 2087 if (stcb->asoc.peer_supports_ecn_nonce == 0) { 2088 /* Peer does NOT support it, so we send a ECT0 only */ 2089 return (SCTP_ECT0_BIT); 2090 } 2091 if (chk == NULL) 2092 return (SCTP_ECT0_BIT); 2093 2094 if (((stcb->asoc.hb_random_idx == 3) && 2095 (stcb->asoc.hb_ect_randombit > 7)) || 2096 (stcb->asoc.hb_random_idx > 3)) { 2097 uint32_t rndval; 2098 2099 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 2100 memcpy(stcb->asoc.hb_random_values, &rndval, 2101 sizeof(stcb->asoc.hb_random_values)); 2102 this_random = stcb->asoc.hb_random_values[0]; 2103 stcb->asoc.hb_random_idx = 0; 2104 stcb->asoc.hb_ect_randombit = 0; 2105 } else { 2106 if (stcb->asoc.hb_ect_randombit > 7) { 2107 stcb->asoc.hb_ect_randombit = 0; 2108 stcb->asoc.hb_random_idx++; 2109 } 2110 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 2111 } 2112 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) { 2113 if (chk != NULL) 2114 /* ECN Nonce stuff */ 2115 chk->rec.data.ect_nonce = SCTP_ECT1_BIT; 2116 stcb->asoc.hb_ect_randombit++; 2117 return (SCTP_ECT1_BIT); 2118 } else { 2119 stcb->asoc.hb_ect_randombit++; 2120 return (SCTP_ECT0_BIT); 2121 } 2122 } 2123 2124 extern int sctp_no_csum_on_loopback; 2125 2126 static int 2127 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 2128 struct sctp_tcb *stcb, /* may be NULL */ 2129 struct sctp_nets *net, 2130 struct sockaddr *to, 2131 struct mbuf *m, 2132 uint32_t auth_offset, 2133 struct sctp_auth_chunk *auth, 2134 int nofragment_flag, 2135 int ecn_ok, 2136 struct sctp_tmit_chunk *chk, 2137 int out_of_asoc_ok) 2138 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 2139 { 2140 /* 2141 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet 2142 * header WITH a SCTPHDR but no IP header, endpoint inp and sa 2143 * structure. - fill in the HMAC digest of any AUTH chunk in the 2144 * packet - calculate SCTP checksum and fill in - prepend a IP 2145 * address header - if boundall use INADDR_ANY - if boundspecific do 2146 * source address selection - set fragmentation option for ipV4 - On 2147 * return from IP output, check/adjust mtu size - of output 2148 * interface and smallest_mtu size as well. 2149 */ 2150 /* Will need ifdefs around this */ 2151 struct mbuf *o_pak; 2152 2153 struct sctphdr *sctphdr; 2154 int packet_length; 2155 int o_flgs; 2156 uint32_t csum; 2157 int ret; 2158 unsigned int have_mtu; 2159 struct route *ro; 2160 2161 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 2162 sctp_m_freem(m); 2163 return (EFAULT); 2164 } 2165 /* fill in the HMAC digest for any AUTH chunk in the packet */ 2166 if ((auth != NULL) && (stcb != NULL)) { 2167 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb); 2168 } 2169 /* Calculate the csum and fill in the length of the packet */ 2170 sctphdr = mtod(m, struct sctphdr *); 2171 have_mtu = 0; 2172 if (sctp_no_csum_on_loopback && 2173 (stcb) && 2174 (stcb->asoc.loopback_scope)) { 2175 sctphdr->checksum = 0; 2176 /* 2177 * This can probably now be taken out since my audit shows 2178 * no more bad pktlen's coming in. But we will wait a while 2179 * yet. 2180 */ 2181 packet_length = sctp_calculate_len(m); 2182 } else { 2183 sctphdr->checksum = 0; 2184 csum = sctp_calculate_sum(m, &packet_length, 0); 2185 sctphdr->checksum = csum; 2186 } 2187 2188 if (to->sa_family == AF_INET) { 2189 struct ip *ip; 2190 struct route iproute; 2191 uint8_t tos_value; 2192 2193 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip)); 2194 if (o_pak == NULL) { 2195 /* failed to prepend data, give up */ 2196 sctp_m_freem(m); 2197 return (ENOMEM); 2198 } 2199 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip); 2200 packet_length += sizeof(struct ip); 2201 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 2202 ip = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *); 2203 ip->ip_v = IPVERSION; 2204 ip->ip_hl = (sizeof(struct ip) >> 2); 2205 if (net) { 2206 tos_value = net->tos_flowlabel & 0x000000ff; 2207 } else { 2208 tos_value = inp->ip_inp.inp.inp_ip_tos; 2209 } 2210 if (nofragment_flag) { 2211 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__) 2212 ip->ip_off = IP_DF; 2213 #else 2214 ip->ip_off = htons(IP_DF); 2215 #endif 2216 } else 2217 ip->ip_off = 0; 2218 2219 2220 /* FreeBSD has a function for ip_id's */ 2221 ip->ip_id = ip_newid(); 2222 2223 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 2224 ip->ip_len = SCTP_HEADER_LEN(o_pak); 2225 if (stcb) { 2226 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 2227 /* Enable ECN */ 2228 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk)); 2229 } else { 2230 /* No ECN */ 2231 ip->ip_tos = (u_char)(tos_value & 0xfc); 2232 } 2233 } else { 2234 /* no association at all */ 2235 ip->ip_tos = (tos_value & 0xfc); 2236 } 2237 ip->ip_p = IPPROTO_SCTP; 2238 ip->ip_sum = 0; 2239 if (net == NULL) { 2240 ro = &iproute; 2241 memset(&iproute, 0, sizeof(iproute)); 2242 memcpy(&ro->ro_dst, to, to->sa_len); 2243 } else { 2244 ro = (struct route *)&net->ro; 2245 } 2246 /* Now the address selection part */ 2247 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 2248 2249 /* call the routine to select the src address */ 2250 if (net) { 2251 if (net->src_addr_selected == 0) { 2252 /* Cache the source address */ 2253 ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr = sctp_ipv4_source_address_selection(inp, 2254 stcb, 2255 ro, net, out_of_asoc_ok); 2256 if (ro->ro_rt) 2257 net->src_addr_selected = 1; 2258 } 2259 ip->ip_src = ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr; 2260 } else { 2261 ip->ip_src = sctp_ipv4_source_address_selection(inp, 2262 stcb, ro, net, out_of_asoc_ok); 2263 } 2264 2265 /* 2266 * If source address selection fails and we find no route 2267 * then the ip_output should fail as well with a 2268 * NO_ROUTE_TO_HOST type error. We probably should catch 2269 * that somewhere and abort the association right away 2270 * (assuming this is an INIT being sent). 2271 */ 2272 if ((ro->ro_rt == NULL)) { 2273 /* 2274 * src addr selection failed to find a route (or 2275 * valid source addr), so we can't get there from 2276 * here! 2277 */ 2278 #ifdef SCTP_DEBUG 2279 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 2280 printf("low_level_output: dropped v4 packet- no valid source addr\n"); 2281 printf("Destination was %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr))); 2282 } 2283 #endif /* SCTP_DEBUG */ 2284 if (net) { 2285 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) 2286 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 2287 stcb, 2288 SCTP_FAILED_THRESHOLD, 2289 (void *)net); 2290 net->dest_state &= ~SCTP_ADDR_REACHABLE; 2291 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 2292 if (stcb) { 2293 if (net == stcb->asoc.primary_destination) { 2294 /* need a new primary */ 2295 struct sctp_nets *alt; 2296 2297 alt = sctp_find_alternate_net(stcb, net, 0); 2298 if (alt != net) { 2299 if (sctp_set_primary_addr(stcb, 2300 (struct sockaddr *)NULL, 2301 alt) == 0) { 2302 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 2303 net->src_addr_selected = 0; 2304 } 2305 } 2306 } 2307 } 2308 } 2309 sctp_m_freem(o_pak); 2310 return (EHOSTUNREACH); 2311 } else { 2312 have_mtu = ro->ro_rt->rt_ifp->if_mtu; 2313 } 2314 if (inp->sctp_socket) { 2315 o_flgs = (IP_RAWOUTPUT | (inp->sctp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST))); 2316 } else { 2317 o_flgs = IP_RAWOUTPUT; 2318 } 2319 #ifdef SCTP_DEBUG 2320 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2321 printf("Calling ipv4 output routine from low level src addr:%x\n", 2322 (uint32_t) (ntohl(ip->ip_src.s_addr))); 2323 printf("Destination is %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr))); 2324 printf("RTP route is %p through\n", ro->ro_rt); 2325 } 2326 #endif 2327 2328 if ((have_mtu) && (net) && (have_mtu > net->mtu)) { 2329 ro->ro_rt->rt_ifp->if_mtu = net->mtu; 2330 } 2331 if (ro != &iproute) { 2332 memcpy(&iproute, ro, sizeof(*ro)); 2333 } 2334 ret = ip_output(o_pak, inp->ip_inp.inp.inp_options, 2335 ro, o_flgs, inp->ip_inp.inp.inp_moptions 2336 ,(struct inpcb *)NULL 2337 ); 2338 if ((ro->ro_rt) && (have_mtu) && (net) && (have_mtu > net->mtu)) { 2339 ro->ro_rt->rt_ifp->if_mtu = have_mtu; 2340 } 2341 SCTP_STAT_INCR(sctps_sendpackets); 2342 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 2343 if (ret) 2344 SCTP_STAT_INCR(sctps_senderrors); 2345 #ifdef SCTP_DEBUG 2346 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2347 printf("Ip output returns %d\n", ret); 2348 } 2349 #endif 2350 if (net == NULL) { 2351 /* free tempy routes */ 2352 if (ro->ro_rt) 2353 RTFREE(ro->ro_rt); 2354 } else { 2355 /* PMTU check versus smallest asoc MTU goes here */ 2356 if (ro->ro_rt != NULL) { 2357 if (ro->ro_rt->rt_rmx.rmx_mtu && 2358 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) { 2359 sctp_mtu_size_reset(inp, &stcb->asoc, 2360 ro->ro_rt->rt_rmx.rmx_mtu); 2361 } 2362 } else { 2363 /* route was freed */ 2364 net->src_addr_selected = 0; 2365 } 2366 } 2367 return (ret); 2368 } 2369 #ifdef INET6 2370 else if (to->sa_family == AF_INET6) { 2371 uint32_t flowlabel; 2372 struct ip6_hdr *ip6h; 2373 2374 struct route_in6 ip6route; 2375 2376 struct ifnet *ifp; 2377 u_char flowTop; 2378 uint16_t flowBottom; 2379 u_char tosBottom, tosTop; 2380 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 2381 struct sockaddr_in6 lsa6_storage; 2382 int prev_scope = 0; 2383 int error; 2384 u_short prev_port = 0; 2385 2386 if (net != NULL) { 2387 flowlabel = net->tos_flowlabel; 2388 } else { 2389 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 2390 } 2391 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr)); 2392 if (o_pak == NULL) { 2393 /* failed to prepend data, give up */ 2394 sctp_m_freem(m); 2395 return (ENOMEM); 2396 } 2397 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr); 2398 packet_length += sizeof(struct ip6_hdr); 2399 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 2400 ip6h = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *); 2401 /* 2402 * We assume here that inp_flow is in host byte order within 2403 * the TCB! 2404 */ 2405 flowBottom = flowlabel & 0x0000ffff; 2406 flowTop = ((flowlabel & 0x000f0000) >> 16); 2407 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION); 2408 /* protect *sin6 from overwrite */ 2409 sin6 = (struct sockaddr_in6 *)to; 2410 tmp = *sin6; 2411 sin6 = &tmp; 2412 2413 /* KAME hack: embed scopeid */ 2414 if (sa6_embedscope(sin6, ip6_use_defzone) != 0) 2415 return (EINVAL); 2416 if (net == NULL) { 2417 memset(&ip6route, 0, sizeof(ip6route)); 2418 ro = (struct route *)&ip6route; 2419 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 2420 } else { 2421 ro = (struct route *)&net->ro; 2422 } 2423 if (stcb != NULL) { 2424 if ((stcb->asoc.ecn_allowed) && ecn_ok) { 2425 /* Enable ECN */ 2426 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4); 2427 } else { 2428 /* No ECN */ 2429 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 2430 } 2431 } else { 2432 /* we could get no asoc if it is a O-O-T-B packet */ 2433 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4); 2434 } 2435 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom)); 2436 ip6h->ip6_nxt = IPPROTO_SCTP; 2437 ip6h->ip6_plen = (SCTP_HEADER_LEN(o_pak) - sizeof(struct ip6_hdr)); 2438 ip6h->ip6_dst = sin6->sin6_addr; 2439 2440 /* 2441 * Add SRC address selection here: we can only reuse to a 2442 * limited degree the kame src-addr-sel, since we can try 2443 * their selection but it may not be bound. 2444 */ 2445 bzero(&lsa6_tmp, sizeof(lsa6_tmp)); 2446 lsa6_tmp.sin6_family = AF_INET6; 2447 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 2448 lsa6 = &lsa6_tmp; 2449 if (net) { 2450 if (net->src_addr_selected == 0) { 2451 /* Cache the source address */ 2452 ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr = sctp_ipv6_source_address_selection(inp, 2453 stcb, ro, net, out_of_asoc_ok); 2454 2455 if (ro->ro_rt) 2456 net->src_addr_selected = 1; 2457 } 2458 lsa6->sin6_addr = ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr; 2459 } else { 2460 lsa6->sin6_addr = sctp_ipv6_source_address_selection( 2461 inp, stcb, ro, net, out_of_asoc_ok); 2462 } 2463 lsa6->sin6_port = inp->sctp_lport; 2464 2465 if ((ro->ro_rt == NULL)) { 2466 /* 2467 * src addr selection failed to find a route (or 2468 * valid source addr), so we can't get there from 2469 * here! 2470 */ 2471 #ifdef SCTP_DEBUG 2472 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 2473 printf("low_level_output: dropped v6 pkt- no valid source addr\n"); 2474 } 2475 #endif 2476 sctp_m_freem(o_pak); 2477 if (net) { 2478 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) 2479 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 2480 stcb, 2481 SCTP_FAILED_THRESHOLD, 2482 (void *)net); 2483 net->dest_state &= ~SCTP_ADDR_REACHABLE; 2484 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 2485 if (stcb) { 2486 if (net == stcb->asoc.primary_destination) { 2487 /* need a new primary */ 2488 struct sctp_nets *alt; 2489 2490 alt = sctp_find_alternate_net(stcb, net, 0); 2491 if (alt != net) { 2492 if (sctp_set_primary_addr(stcb, 2493 (struct sockaddr *)NULL, 2494 alt) == 0) { 2495 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 2496 net->src_addr_selected = 0; 2497 } 2498 } 2499 } 2500 } 2501 } 2502 return (EHOSTUNREACH); 2503 } 2504 /* 2505 * XXX: sa6 may not have a valid sin6_scope_id in the 2506 * non-SCOPEDROUTING case. 2507 */ 2508 bzero(&lsa6_storage, sizeof(lsa6_storage)); 2509 lsa6_storage.sin6_family = AF_INET6; 2510 lsa6_storage.sin6_len = sizeof(lsa6_storage); 2511 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 2512 sctp_m_freem(o_pak); 2513 return (error); 2514 } 2515 /* XXX */ 2516 lsa6_storage.sin6_addr = lsa6->sin6_addr; 2517 lsa6_storage.sin6_port = inp->sctp_lport; 2518 lsa6 = &lsa6_storage; 2519 ip6h->ip6_src = lsa6->sin6_addr; 2520 2521 /* 2522 * We set the hop limit now since there is a good chance 2523 * that our ro pointer is now filled 2524 */ 2525 ip6h->ip6_hlim = in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp, 2526 (ro ? 2527 (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) : 2528 (NULL))); 2529 o_flgs = 0; 2530 ifp = ro->ro_rt->rt_ifp; 2531 #ifdef SCTP_DEBUG 2532 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2533 /* Copy to be sure something bad is not happening */ 2534 sin6->sin6_addr = ip6h->ip6_dst; 2535 lsa6->sin6_addr = ip6h->ip6_src; 2536 2537 printf("Calling ipv6 output routine from low level\n"); 2538 printf("src: "); 2539 sctp_print_address((struct sockaddr *)lsa6); 2540 printf("dst: "); 2541 sctp_print_address((struct sockaddr *)sin6); 2542 } 2543 #endif /* SCTP_DEBUG */ 2544 if (net) { 2545 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 2546 /* preserve the port and scope for link local send */ 2547 prev_scope = sin6->sin6_scope_id; 2548 prev_port = sin6->sin6_port; 2549 } 2550 ret = ip6_output(o_pak, ((struct in6pcb *)inp)->in6p_outputopts, 2551 (struct route_in6 *)ro, 2552 o_flgs, 2553 ((struct in6pcb *)inp)->in6p_moptions, 2554 &ifp 2555 ,NULL 2556 ); 2557 if (net) { 2558 /* for link local this must be done */ 2559 sin6->sin6_scope_id = prev_scope; 2560 sin6->sin6_port = prev_port; 2561 } 2562 #ifdef SCTP_DEBUG 2563 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 2564 printf("return from send is %d\n", ret); 2565 } 2566 #endif /* SCTP_DEBUG_OUTPUT */ 2567 SCTP_STAT_INCR(sctps_sendpackets); 2568 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 2569 if (ret) 2570 SCTP_STAT_INCR(sctps_senderrors); 2571 if (net == NULL) { 2572 /* Now if we had a temp route free it */ 2573 if (ro->ro_rt) { 2574 RTFREE(ro->ro_rt); 2575 } 2576 } else { 2577 /* PMTU check versus smallest asoc MTU goes here */ 2578 if (ro->ro_rt == NULL) { 2579 /* Route was freed */ 2580 net->src_addr_selected = 0; 2581 } 2582 if (ro->ro_rt != NULL) { 2583 if (ro->ro_rt->rt_rmx.rmx_mtu && 2584 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) { 2585 sctp_mtu_size_reset(inp, 2586 &stcb->asoc, 2587 ro->ro_rt->rt_rmx.rmx_mtu); 2588 } 2589 } else if (ifp) { 2590 if (ND_IFINFO(ifp)->linkmtu && 2591 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 2592 sctp_mtu_size_reset(inp, 2593 &stcb->asoc, 2594 ND_IFINFO(ifp)->linkmtu); 2595 } 2596 } 2597 } 2598 return (ret); 2599 } 2600 #endif 2601 else { 2602 #ifdef SCTP_DEBUG 2603 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 2604 printf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family); 2605 } 2606 #endif 2607 sctp_m_freem(m); 2608 return (EFAULT); 2609 } 2610 } 2611 2612 2613 void 2614 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 2615 { 2616 struct mbuf *m, *m_at, *m_last; 2617 struct sctp_nets *net; 2618 struct sctp_init_msg *initm; 2619 struct sctp_supported_addr_param *sup_addr; 2620 struct sctp_ecn_supported_param *ecn; 2621 struct sctp_prsctp_supported_param *prsctp; 2622 struct sctp_ecn_nonce_supported_param *ecn_nonce; 2623 struct sctp_supported_chunk_types_param *pr_supported; 2624 int cnt_inits_to = 0; 2625 int padval, ret; 2626 int num_ext; 2627 int p_len; 2628 2629 /* INIT's always go to the primary (and usually ONLY address) */ 2630 m_last = NULL; 2631 net = stcb->asoc.primary_destination; 2632 if (net == NULL) { 2633 net = TAILQ_FIRST(&stcb->asoc.nets); 2634 if (net == NULL) { 2635 /* TSNH */ 2636 return; 2637 } 2638 /* we confirm any address we send an INIT to */ 2639 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2640 sctp_set_primary_addr(stcb, NULL, net); 2641 } else { 2642 /* we confirm any address we send an INIT to */ 2643 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2644 } 2645 #ifdef SCTP_DEBUG 2646 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 2647 printf("Sending INIT\n"); 2648 } 2649 #endif 2650 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) { 2651 /* 2652 * special hook, if we are sending to link local it will not 2653 * show up in our private address count. 2654 */ 2655 struct sockaddr_in6 *sin6l; 2656 2657 sin6l = &net->ro._l_addr.sin6; 2658 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr)) 2659 cnt_inits_to = 1; 2660 } 2661 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 2662 /* This case should not happen */ 2663 return; 2664 } 2665 /* start the INIT timer */ 2666 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net)) { 2667 /* we are hosed since I can't start the INIT timer? */ 2668 return; 2669 } 2670 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA); 2671 if (m == NULL) { 2672 /* No memory, INIT timer will re-attempt. */ 2673 return; 2674 } 2675 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 2676 /* Now lets put the SCTP header in place */ 2677 initm = mtod(m, struct sctp_init_msg *); 2678 initm->sh.src_port = inp->sctp_lport; 2679 initm->sh.dest_port = stcb->rport; 2680 initm->sh.v_tag = 0; 2681 initm->sh.checksum = 0; /* calculate later */ 2682 /* now the chunk header */ 2683 initm->msg.ch.chunk_type = SCTP_INITIATION; 2684 initm->msg.ch.chunk_flags = 0; 2685 /* fill in later from mbuf we build */ 2686 initm->msg.ch.chunk_length = 0; 2687 /* place in my tag */ 2688 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag); 2689 /* set up some of the credits. */ 2690 initm->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat, 2691 SCTP_MINIMAL_RWND)); 2692 2693 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 2694 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 2695 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number); 2696 /* now the address restriction */ 2697 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm + 2698 sizeof(*initm)); 2699 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 2700 /* we support 2 types IPv6/IPv4 */ 2701 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + 2702 sizeof(uint16_t)); 2703 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS); 2704 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS); 2705 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t); 2706 2707 if (inp->sctp_ep.adaptation_layer_indicator) { 2708 struct sctp_adaptation_layer_indication *ali; 2709 2710 ali = (struct sctp_adaptation_layer_indication *)( 2711 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t)); 2712 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 2713 ali->ph.param_length = htons(sizeof(*ali)); 2714 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 2715 SCTP_BUF_LEN(m) += sizeof(*ali); 2716 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 2717 sizeof(*ali)); 2718 } else { 2719 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr + 2720 sizeof(*sup_addr) + sizeof(uint16_t)); 2721 } 2722 2723 /* now any cookie time extensions */ 2724 if (stcb->asoc.cookie_preserve_req) { 2725 struct sctp_cookie_perserve_param *cookie_preserve; 2726 2727 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn); 2728 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 2729 cookie_preserve->ph.param_length = htons( 2730 sizeof(*cookie_preserve)); 2731 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 2732 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve); 2733 ecn = (struct sctp_ecn_supported_param *)( 2734 (caddr_t)cookie_preserve + sizeof(*cookie_preserve)); 2735 stcb->asoc.cookie_preserve_req = 0; 2736 } 2737 /* ECN parameter */ 2738 if (sctp_ecn_enable == 1) { 2739 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 2740 ecn->ph.param_length = htons(sizeof(*ecn)); 2741 SCTP_BUF_LEN(m) += sizeof(*ecn); 2742 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 2743 sizeof(*ecn)); 2744 } else { 2745 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 2746 } 2747 /* And now tell the peer we do pr-sctp */ 2748 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 2749 prsctp->ph.param_length = htons(sizeof(*prsctp)); 2750 SCTP_BUF_LEN(m) += sizeof(*prsctp); 2751 2752 /* And now tell the peer we do all the extensions */ 2753 pr_supported = (struct sctp_supported_chunk_types_param *) 2754 ((caddr_t)prsctp + sizeof(*prsctp)); 2755 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 2756 num_ext = 0; 2757 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 2758 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 2759 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 2760 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 2761 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 2762 if (!sctp_auth_disable) 2763 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 2764 p_len = sizeof(*pr_supported) + num_ext; 2765 pr_supported->ph.param_length = htons(p_len); 2766 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 2767 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 2768 2769 /* ECN nonce: And now tell the peer we support ECN nonce */ 2770 if (sctp_ecn_nonce) { 2771 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 2772 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 2773 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 2774 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 2775 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 2776 } 2777 /* add authentication parameters */ 2778 if (!sctp_auth_disable) { 2779 struct sctp_auth_random *random; 2780 struct sctp_auth_hmac_algo *hmacs; 2781 struct sctp_auth_chunk_list *chunks; 2782 2783 /* attach RANDOM parameter, if available */ 2784 if (stcb->asoc.authinfo.random != NULL) { 2785 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2786 random->ph.param_type = htons(SCTP_RANDOM); 2787 p_len = sizeof(*random) + stcb->asoc.authinfo.random_len; 2788 random->ph.param_length = htons(p_len); 2789 bcopy(stcb->asoc.authinfo.random->key, random->random_data, 2790 stcb->asoc.authinfo.random_len); 2791 /* zero out any padding required */ 2792 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len); 2793 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 2794 } 2795 /* add HMAC_ALGO parameter */ 2796 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2797 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs, 2798 (uint8_t *) hmacs->hmac_ids); 2799 if (p_len > 0) { 2800 p_len += sizeof(*hmacs); 2801 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 2802 hmacs->ph.param_length = htons(p_len); 2803 /* zero out any padding required */ 2804 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 2805 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 2806 } 2807 /* add CHUNKS parameter */ 2808 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2809 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, 2810 chunks->chunk_types); 2811 if (p_len > 0) { 2812 p_len += sizeof(*chunks); 2813 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 2814 chunks->ph.param_length = htons(p_len); 2815 /* zero out any padding required */ 2816 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 2817 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 2818 } 2819 } 2820 m_at = m; 2821 /* now the addresses */ 2822 { 2823 struct sctp_scoping scp; 2824 2825 /* 2826 * To optimize this we could put the scoping stuff into a 2827 * structure and remove the individual uint8's from the 2828 * assoc structure. Then we could just pass in the address 2829 * within the stcb.. but for now this is a quick hack to get 2830 * the address stuff teased apart. 2831 */ 2832 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal; 2833 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal; 2834 scp.loopback_scope = stcb->asoc.loopback_scope; 2835 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope; 2836 scp.local_scope = stcb->asoc.local_scope; 2837 scp.site_scope = stcb->asoc.site_scope; 2838 2839 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 2840 } 2841 2842 2843 /* calulate the size and update pkt header and chunk header */ 2844 p_len = 0; 2845 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2846 if (SCTP_BUF_NEXT(m_at) == NULL) 2847 m_last = m_at; 2848 p_len += SCTP_BUF_LEN(m_at); 2849 } 2850 initm->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr))); 2851 /* 2852 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return 2853 * here since the timer will drive a retranmission. 2854 */ 2855 2856 /* I don't expect this to execute but we will be safe here */ 2857 padval = p_len % 4; 2858 if ((padval) && (m_last)) { 2859 /* 2860 * The compiler worries that m_last may not be set even 2861 * though I think it is impossible :-> however we add m_last 2862 * here just in case. 2863 */ 2864 int ret; 2865 2866 ret = sctp_add_pad_tombuf(m_last, (4 - padval)); 2867 if (ret) { 2868 /* Houston we have a problem, no space */ 2869 sctp_m_freem(m); 2870 return; 2871 } 2872 p_len += padval; 2873 } 2874 ret = sctp_lowlevel_chunk_output(inp, stcb, net, 2875 (struct sockaddr *)&net->ro._l_addr, 2876 m, 0, NULL, 0, 0, NULL, 0); 2877 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 2878 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 2879 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 2880 } 2881 2882 struct mbuf * 2883 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 2884 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp) 2885 { 2886 /* 2887 * Given a mbuf containing an INIT or INIT-ACK with the param_offset 2888 * being equal to the beginning of the params i.e. (iphlen + 2889 * sizeof(struct sctp_init_msg) parse through the parameters to the 2890 * end of the mbuf verifying that all parameters are known. 2891 * 2892 * For unknown parameters build and return a mbuf with 2893 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 2894 * processing this chunk stop, and set *abort_processing to 1. 2895 * 2896 * By having param_offset be pre-set to where parameters begin it is 2897 * hoped that this routine may be reused in the future by new 2898 * features. 2899 */ 2900 struct sctp_paramhdr *phdr, params; 2901 2902 struct mbuf *mat, *op_err; 2903 char tempbuf[2048]; 2904 int at, limit, pad_needed; 2905 uint16_t ptype, plen; 2906 int err_at; 2907 2908 *abort_processing = 0; 2909 mat = in_initpkt; 2910 err_at = 0; 2911 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 2912 at = param_offset; 2913 op_err = NULL; 2914 2915 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 2916 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 2917 ptype = ntohs(phdr->param_type); 2918 plen = ntohs(phdr->param_length); 2919 limit -= SCTP_SIZE32(plen); 2920 if (plen < sizeof(struct sctp_paramhdr)) { 2921 #ifdef SCTP_DEBUG 2922 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 2923 printf("sctp_output.c:Impossible length in parameter < %d\n", plen); 2924 } 2925 #endif 2926 *abort_processing = 1; 2927 break; 2928 } 2929 /* 2930 * All parameters for all chunks that we know/understand are 2931 * listed here. We process them other places and make 2932 * appropriate stop actions per the upper bits. However this 2933 * is the generic routine processor's can call to get back 2934 * an operr.. to either incorporate (init-ack) or send. 2935 */ 2936 if ((ptype == SCTP_HEARTBEAT_INFO) || 2937 (ptype == SCTP_IPV4_ADDRESS) || 2938 (ptype == SCTP_IPV6_ADDRESS) || 2939 (ptype == SCTP_STATE_COOKIE) || 2940 (ptype == SCTP_UNRECOG_PARAM) || 2941 (ptype == SCTP_COOKIE_PRESERVE) || 2942 (ptype == SCTP_SUPPORTED_ADDRTYPE) || 2943 (ptype == SCTP_PRSCTP_SUPPORTED) || 2944 (ptype == SCTP_ADD_IP_ADDRESS) || 2945 (ptype == SCTP_DEL_IP_ADDRESS) || 2946 (ptype == SCTP_ECN_CAPABLE) || 2947 (ptype == SCTP_ULP_ADAPTATION) || 2948 (ptype == SCTP_ERROR_CAUSE_IND) || 2949 (ptype == SCTP_RANDOM) || 2950 (ptype == SCTP_CHUNK_LIST) || 2951 (ptype == SCTP_CHUNK_LIST) || 2952 (ptype == SCTP_SET_PRIM_ADDR) || 2953 (ptype == SCTP_SUCCESS_REPORT) || 2954 (ptype == SCTP_ULP_ADAPTATION) || 2955 (ptype == SCTP_SUPPORTED_CHUNK_EXT) || 2956 (ptype == SCTP_ECN_NONCE_SUPPORTED) 2957 ) { 2958 /* no skip it */ 2959 at += SCTP_SIZE32(plen); 2960 } else if (ptype == SCTP_HOSTNAME_ADDRESS) { 2961 /* We can NOT handle HOST NAME addresses!! */ 2962 int l_len; 2963 2964 #ifdef SCTP_DEBUG 2965 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 2966 printf("Can't handle hostname addresses.. abort processing\n"); 2967 } 2968 #endif 2969 *abort_processing = 1; 2970 if (op_err == NULL) { 2971 /* Ok need to try to get a mbuf */ 2972 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 2973 l_len += plen; 2974 l_len += sizeof(struct sctp_paramhdr); 2975 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 2976 if (op_err) { 2977 SCTP_BUF_LEN(op_err) = 0; 2978 /* 2979 * pre-reserve space for ip and sctp 2980 * header and chunk hdr 2981 */ 2982 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 2983 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 2984 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 2985 } 2986 } 2987 if (op_err) { 2988 /* If we have space */ 2989 struct sctp_paramhdr s; 2990 2991 if (err_at % 4) { 2992 uint32_t cpthis = 0; 2993 2994 pad_needed = 4 - (err_at % 4); 2995 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 2996 err_at += pad_needed; 2997 } 2998 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 2999 s.param_length = htons(sizeof(s) + plen); 3000 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 3001 err_at += sizeof(s); 3002 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen); 3003 if (phdr == NULL) { 3004 sctp_m_freem(op_err); 3005 /* 3006 * we are out of memory but we still 3007 * need to have a look at what to do 3008 * (the system is in trouble 3009 * though). 3010 */ 3011 return (NULL); 3012 } 3013 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 3014 err_at += plen; 3015 } 3016 return (op_err); 3017 } else { 3018 /* 3019 * we do not recognize the parameter figure out what 3020 * we do. 3021 */ 3022 if ((ptype & 0x4000) == 0x4000) { 3023 /* Report bit is set?? */ 3024 if (op_err == NULL) { 3025 int l_len; 3026 3027 /* Ok need to try to get an mbuf */ 3028 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 3029 l_len += plen; 3030 l_len += sizeof(struct sctp_paramhdr); 3031 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 3032 if (op_err) { 3033 SCTP_BUF_LEN(op_err) = 0; 3034 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 3035 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 3036 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 3037 } 3038 } 3039 if (op_err) { 3040 /* If we have space */ 3041 struct sctp_paramhdr s; 3042 3043 if (err_at % 4) { 3044 uint32_t cpthis = 0; 3045 3046 pad_needed = 4 - (err_at % 4); 3047 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 3048 err_at += pad_needed; 3049 } 3050 s.param_type = htons(SCTP_UNRECOG_PARAM); 3051 s.param_length = htons(sizeof(s) + plen); 3052 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 3053 err_at += sizeof(s); 3054 if (plen > sizeof(tempbuf)) { 3055 plen = sizeof(tempbuf); 3056 } 3057 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen); 3058 if (phdr == NULL) { 3059 sctp_m_freem(op_err); 3060 /* 3061 * we are out of memory but 3062 * we still need to have a 3063 * look at what to do (the 3064 * system is in trouble 3065 * though). 3066 */ 3067 goto more_processing; 3068 } 3069 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 3070 err_at += plen; 3071 } 3072 } 3073 more_processing: 3074 if ((ptype & 0x8000) == 0x0000) { 3075 return (op_err); 3076 } else { 3077 /* skip this chunk and continue processing */ 3078 at += SCTP_SIZE32(plen); 3079 } 3080 3081 } 3082 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 3083 } 3084 return (op_err); 3085 } 3086 3087 static int 3088 sctp_are_there_new_addresses(struct sctp_association *asoc, 3089 struct mbuf *in_initpkt, int iphlen, int offset) 3090 { 3091 /* 3092 * Given a INIT packet, look through the packet to verify that there 3093 * are NO new addresses. As we go through the parameters add reports 3094 * of any un-understood parameters that require an error. Also we 3095 * must return (1) to drop the packet if we see a un-understood 3096 * parameter that tells us to drop the chunk. 3097 */ 3098 struct sockaddr_in sin4, *sa4; 3099 struct sockaddr_in6 sin6, *sa6; 3100 struct sockaddr *sa_touse; 3101 struct sockaddr *sa; 3102 struct sctp_paramhdr *phdr, params; 3103 struct ip *iph; 3104 struct mbuf *mat; 3105 uint16_t ptype, plen; 3106 int err_at; 3107 uint8_t fnd; 3108 struct sctp_nets *net; 3109 3110 memset(&sin4, 0, sizeof(sin4)); 3111 memset(&sin6, 0, sizeof(sin6)); 3112 sin4.sin_family = AF_INET; 3113 sin4.sin_len = sizeof(sin4); 3114 sin6.sin6_family = AF_INET6; 3115 sin6.sin6_len = sizeof(sin6); 3116 3117 sa_touse = NULL; 3118 /* First what about the src address of the pkt ? */ 3119 iph = mtod(in_initpkt, struct ip *); 3120 if (iph->ip_v == IPVERSION) { 3121 /* source addr is IPv4 */ 3122 sin4.sin_addr = iph->ip_src; 3123 sa_touse = (struct sockaddr *)&sin4; 3124 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3125 /* source addr is IPv6 */ 3126 struct ip6_hdr *ip6h; 3127 3128 ip6h = mtod(in_initpkt, struct ip6_hdr *); 3129 sin6.sin6_addr = ip6h->ip6_src; 3130 sa_touse = (struct sockaddr *)&sin6; 3131 } else { 3132 return (1); 3133 } 3134 3135 fnd = 0; 3136 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3137 sa = (struct sockaddr *)&net->ro._l_addr; 3138 if (sa->sa_family == sa_touse->sa_family) { 3139 if (sa->sa_family == AF_INET) { 3140 sa4 = (struct sockaddr_in *)sa; 3141 if (sa4->sin_addr.s_addr == 3142 sin4.sin_addr.s_addr) { 3143 fnd = 1; 3144 break; 3145 } 3146 } else if (sa->sa_family == AF_INET6) { 3147 sa6 = (struct sockaddr_in6 *)sa; 3148 if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr, 3149 &sin6.sin6_addr)) { 3150 fnd = 1; 3151 break; 3152 } 3153 } 3154 } 3155 } 3156 if (fnd == 0) { 3157 /* New address added! no need to look futher. */ 3158 return (1); 3159 } 3160 /* Ok so far lets munge through the rest of the packet */ 3161 mat = in_initpkt; 3162 err_at = 0; 3163 sa_touse = NULL; 3164 offset += sizeof(struct sctp_init_chunk); 3165 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 3166 while (phdr) { 3167 ptype = ntohs(phdr->param_type); 3168 plen = ntohs(phdr->param_length); 3169 if (ptype == SCTP_IPV4_ADDRESS) { 3170 struct sctp_ipv4addr_param *p4, p4_buf; 3171 3172 phdr = sctp_get_next_param(mat, offset, 3173 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 3174 if (plen != sizeof(struct sctp_ipv4addr_param) || 3175 phdr == NULL) { 3176 return (1); 3177 } 3178 p4 = (struct sctp_ipv4addr_param *)phdr; 3179 sin4.sin_addr.s_addr = p4->addr; 3180 sa_touse = (struct sockaddr *)&sin4; 3181 } else if (ptype == SCTP_IPV6_ADDRESS) { 3182 struct sctp_ipv6addr_param *p6, p6_buf; 3183 3184 phdr = sctp_get_next_param(mat, offset, 3185 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 3186 if (plen != sizeof(struct sctp_ipv6addr_param) || 3187 phdr == NULL) { 3188 return (1); 3189 } 3190 p6 = (struct sctp_ipv6addr_param *)phdr; 3191 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 3192 sizeof(p6->addr)); 3193 sa_touse = (struct sockaddr *)&sin4; 3194 } 3195 if (sa_touse) { 3196 /* ok, sa_touse points to one to check */ 3197 fnd = 0; 3198 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3199 sa = (struct sockaddr *)&net->ro._l_addr; 3200 if (sa->sa_family != sa_touse->sa_family) { 3201 continue; 3202 } 3203 if (sa->sa_family == AF_INET) { 3204 sa4 = (struct sockaddr_in *)sa; 3205 if (sa4->sin_addr.s_addr == 3206 sin4.sin_addr.s_addr) { 3207 fnd = 1; 3208 break; 3209 } 3210 } else if (sa->sa_family == AF_INET6) { 3211 sa6 = (struct sockaddr_in6 *)sa; 3212 if (SCTP6_ARE_ADDR_EQUAL( 3213 &sa6->sin6_addr, &sin6.sin6_addr)) { 3214 fnd = 1; 3215 break; 3216 } 3217 } 3218 } 3219 if (!fnd) { 3220 /* New addr added! no need to look further */ 3221 return (1); 3222 } 3223 } 3224 offset += SCTP_SIZE32(plen); 3225 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 3226 } 3227 return (0); 3228 } 3229 3230 /* 3231 * Given a MBUF chain that was sent into us containing an INIT. Build a 3232 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 3233 * a pullup to include IPv6/4header, SCTP header and initial part of INIT 3234 * message (i.e. the struct sctp_init_msg). 3235 */ 3236 void 3237 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3238 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh, 3239 struct sctp_init_chunk *init_chk) 3240 { 3241 struct sctp_association *asoc; 3242 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *m_last; 3243 struct sctp_init_msg *initackm_out; 3244 struct sctp_ecn_supported_param *ecn; 3245 struct sctp_prsctp_supported_param *prsctp; 3246 struct sctp_ecn_nonce_supported_param *ecn_nonce; 3247 struct sctp_supported_chunk_types_param *pr_supported; 3248 struct sockaddr_storage store; 3249 struct sockaddr_in *sin; 3250 struct sockaddr_in6 *sin6; 3251 struct route *ro; 3252 struct ip *iph; 3253 struct ip6_hdr *ip6; 3254 struct sockaddr *to; 3255 struct sctp_state_cookie stc; 3256 struct sctp_nets *net = NULL; 3257 int cnt_inits_to = 0; 3258 uint16_t his_limit, i_want; 3259 int abort_flag, padval, sz_of; 3260 int num_ext; 3261 int p_len; 3262 3263 if (stcb) { 3264 asoc = &stcb->asoc; 3265 } else { 3266 asoc = NULL; 3267 } 3268 m_last = NULL; 3269 if ((asoc != NULL) && 3270 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && 3271 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) { 3272 /* new addresses, out of here in non-cookie-wait states */ 3273 /* 3274 * Send a ABORT, we don't add the new address error clause 3275 * though we even set the T bit and copy in the 0 tag.. this 3276 * looks no different than if no listener was present. 3277 */ 3278 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL); 3279 return; 3280 } 3281 abort_flag = 0; 3282 op_err = sctp_arethere_unrecognized_parameters(init_pkt, 3283 (offset + sizeof(struct sctp_init_chunk)), 3284 &abort_flag, (struct sctp_chunkhdr *)init_chk); 3285 if (abort_flag) { 3286 sctp_send_abort(init_pkt, iphlen, sh, init_chk->init.initiate_tag, op_err); 3287 return; 3288 } 3289 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3290 if (m == NULL) { 3291 /* No memory, INIT timer will re-attempt. */ 3292 if (op_err) 3293 sctp_m_freem(op_err); 3294 return; 3295 } 3296 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg); 3297 3298 /* the time I built cookie */ 3299 SCTP_GETTIME_TIMEVAL(&stc.time_entered); 3300 3301 /* populate any tie tags */ 3302 if (asoc != NULL) { 3303 /* unlock before tag selections */ 3304 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 3305 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 3306 stc.cookie_life = asoc->cookie_life; 3307 net = asoc->primary_destination; 3308 } else { 3309 stc.tie_tag_my_vtag = 0; 3310 stc.tie_tag_peer_vtag = 0; 3311 /* life I will award this cookie */ 3312 stc.cookie_life = inp->sctp_ep.def_cookie_life; 3313 } 3314 3315 /* copy in the ports for later check */ 3316 stc.myport = sh->dest_port; 3317 stc.peerport = sh->src_port; 3318 3319 /* 3320 * If we wanted to honor cookie life extentions, we would add to 3321 * stc.cookie_life. For now we should NOT honor any extension 3322 */ 3323 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 3324 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3325 struct inpcb *in_inp; 3326 3327 /* Its a V6 socket */ 3328 in_inp = (struct inpcb *)inp; 3329 stc.ipv6_addr_legal = 1; 3330 /* Now look at the binding flag to see if V4 will be legal */ 3331 if (SCTP_IPV6_V6ONLY(in_inp) == 0) { 3332 stc.ipv4_addr_legal = 1; 3333 } else { 3334 /* V4 addresses are NOT legal on the association */ 3335 stc.ipv4_addr_legal = 0; 3336 } 3337 } else { 3338 /* Its a V4 socket, no - V6 */ 3339 stc.ipv4_addr_legal = 1; 3340 stc.ipv6_addr_legal = 0; 3341 } 3342 3343 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 3344 stc.ipv4_scope = 1; 3345 #else 3346 stc.ipv4_scope = 0; 3347 #endif 3348 /* now for scope setup */ 3349 memset((caddr_t)&store, 0, sizeof(store)); 3350 sin = (struct sockaddr_in *)&store; 3351 sin6 = (struct sockaddr_in6 *)&store; 3352 if (net == NULL) { 3353 to = (struct sockaddr *)&store; 3354 iph = mtod(init_pkt, struct ip *); 3355 if (iph->ip_v == IPVERSION) { 3356 struct in_addr addr; 3357 struct route iproute; 3358 3359 sin->sin_family = AF_INET; 3360 sin->sin_len = sizeof(struct sockaddr_in); 3361 sin->sin_port = sh->src_port; 3362 sin->sin_addr = iph->ip_src; 3363 /* lookup address */ 3364 stc.address[0] = sin->sin_addr.s_addr; 3365 stc.address[1] = 0; 3366 stc.address[2] = 0; 3367 stc.address[3] = 0; 3368 stc.addr_type = SCTP_IPV4_ADDRESS; 3369 /* local from address */ 3370 memset(&iproute, 0, sizeof(iproute)); 3371 ro = &iproute; 3372 memcpy(&ro->ro_dst, sin, sizeof(*sin)); 3373 addr = sctp_ipv4_source_address_selection(inp, NULL, 3374 ro, NULL, 0); 3375 if (ro->ro_rt) { 3376 RTFREE(ro->ro_rt); 3377 } 3378 stc.laddress[0] = addr.s_addr; 3379 stc.laddress[1] = 0; 3380 stc.laddress[2] = 0; 3381 stc.laddress[3] = 0; 3382 stc.laddr_type = SCTP_IPV4_ADDRESS; 3383 /* scope_id is only for v6 */ 3384 stc.scope_id = 0; 3385 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE 3386 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 3387 stc.ipv4_scope = 1; 3388 } 3389 #else 3390 stc.ipv4_scope = 1; 3391 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 3392 /* Must use the address in this case */ 3393 if (sctp_is_address_on_local_host((struct sockaddr *)sin)) { 3394 stc.loopback_scope = 1; 3395 stc.ipv4_scope = 1; 3396 stc.site_scope = 1; 3397 stc.local_scope = 1; 3398 } 3399 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 3400 struct in6_addr addr; 3401 3402 struct route_in6 iproute6; 3403 3404 ip6 = mtod(init_pkt, struct ip6_hdr *); 3405 sin6->sin6_family = AF_INET6; 3406 sin6->sin6_len = sizeof(struct sockaddr_in6); 3407 sin6->sin6_port = sh->src_port; 3408 sin6->sin6_addr = ip6->ip6_src; 3409 /* lookup address */ 3410 memcpy(&stc.address, &sin6->sin6_addr, 3411 sizeof(struct in6_addr)); 3412 sin6->sin6_scope_id = 0; 3413 stc.addr_type = SCTP_IPV6_ADDRESS; 3414 stc.scope_id = 0; 3415 if (sctp_is_address_on_local_host((struct sockaddr *)sin6)) { 3416 stc.loopback_scope = 1; 3417 stc.local_scope = 1; 3418 stc.site_scope = 1; 3419 stc.ipv4_scope = 1; 3420 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 3421 /* 3422 * If the new destination is a LINK_LOCAL we 3423 * must have common both site and local 3424 * scope. Don't set local scope though since 3425 * we must depend on the source to be added 3426 * implicitly. We cannot assure just because 3427 * we share one link that all links are 3428 * common. 3429 */ 3430 stc.local_scope = 0; 3431 stc.site_scope = 1; 3432 stc.ipv4_scope = 1; 3433 /* 3434 * we start counting for the private address 3435 * stuff at 1. since the link local we 3436 * source from won't show up in our scoped 3437 * count. 3438 */ 3439 cnt_inits_to = 1; 3440 /* pull out the scope_id from incoming pkt */ 3441 /* FIX ME: does this have scope from rcvif? */ 3442 (void)sa6_recoverscope(sin6); 3443 3444 sa6_embedscope(sin6, ip6_use_defzone); 3445 stc.scope_id = sin6->sin6_scope_id; 3446 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 3447 /* 3448 * If the new destination is SITE_LOCAL then 3449 * we must have site scope in common. 3450 */ 3451 stc.site_scope = 1; 3452 } 3453 /* local from address */ 3454 memset(&iproute6, 0, sizeof(iproute6)); 3455 ro = (struct route *)&iproute6; 3456 memcpy(&ro->ro_dst, sin6, sizeof(*sin6)); 3457 addr = sctp_ipv6_source_address_selection(inp, NULL, 3458 ro, NULL, 0); 3459 if (ro->ro_rt) { 3460 RTFREE(ro->ro_rt); 3461 } 3462 memcpy(&stc.laddress, &addr, sizeof(struct in6_addr)); 3463 stc.laddr_type = SCTP_IPV6_ADDRESS; 3464 } 3465 } else { 3466 /* set the scope per the existing tcb */ 3467 struct sctp_nets *lnet; 3468 3469 stc.loopback_scope = asoc->loopback_scope; 3470 stc.ipv4_scope = asoc->ipv4_local_scope; 3471 stc.site_scope = asoc->site_scope; 3472 stc.local_scope = asoc->local_scope; 3473 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 3474 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 3475 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 3476 /* 3477 * if we have a LL address, start 3478 * counting at 1. 3479 */ 3480 cnt_inits_to = 1; 3481 } 3482 } 3483 } 3484 3485 /* use the net pointer */ 3486 to = (struct sockaddr *)&net->ro._l_addr; 3487 if (to->sa_family == AF_INET) { 3488 sin = (struct sockaddr_in *)to; 3489 stc.address[0] = sin->sin_addr.s_addr; 3490 stc.address[1] = 0; 3491 stc.address[2] = 0; 3492 stc.address[3] = 0; 3493 stc.addr_type = SCTP_IPV4_ADDRESS; 3494 if (net->src_addr_selected == 0) { 3495 /* 3496 * strange case here, the INIT should have 3497 * did the selection. 3498 */ 3499 net->ro._s_addr.sin.sin_addr = 3500 sctp_ipv4_source_address_selection(inp, 3501 stcb, (struct route *)&net->ro, net, 0); 3502 net->src_addr_selected = 1; 3503 3504 } 3505 stc.laddress[0] = net->ro._s_addr.sin.sin_addr.s_addr; 3506 stc.laddress[1] = 0; 3507 stc.laddress[2] = 0; 3508 stc.laddress[3] = 0; 3509 stc.laddr_type = SCTP_IPV4_ADDRESS; 3510 } else if (to->sa_family == AF_INET6) { 3511 sin6 = (struct sockaddr_in6 *)to; 3512 memcpy(&stc.address, &sin6->sin6_addr, 3513 sizeof(struct in6_addr)); 3514 stc.addr_type = SCTP_IPV6_ADDRESS; 3515 if (net->src_addr_selected == 0) { 3516 /* 3517 * strange case here, the INIT should have 3518 * did the selection. 3519 */ 3520 net->ro._s_addr.sin6.sin6_addr = 3521 sctp_ipv6_source_address_selection(inp, 3522 stcb, (struct route *)&net->ro, net, 0); 3523 net->src_addr_selected = 1; 3524 } 3525 memcpy(&stc.laddress, &net->ro._l_addr.sin6.sin6_addr, 3526 sizeof(struct in6_addr)); 3527 stc.laddr_type = SCTP_IPV6_ADDRESS; 3528 } 3529 } 3530 /* Now lets put the SCTP header in place */ 3531 initackm_out = mtod(m, struct sctp_init_msg *); 3532 initackm_out->sh.src_port = inp->sctp_lport; 3533 initackm_out->sh.dest_port = sh->src_port; 3534 initackm_out->sh.v_tag = init_chk->init.initiate_tag; 3535 /* Save it off for quick ref */ 3536 stc.peers_vtag = init_chk->init.initiate_tag; 3537 initackm_out->sh.checksum = 0; /* calculate later */ 3538 /* who are we */ 3539 memcpy(stc.identification, SCTP_VERSION_STRING, 3540 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 3541 /* now the chunk header */ 3542 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK; 3543 initackm_out->msg.ch.chunk_flags = 0; 3544 /* fill in later from mbuf we build */ 3545 initackm_out->msg.ch.chunk_length = 0; 3546 /* place in my tag */ 3547 if ((asoc != NULL) && 3548 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 3549 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || 3550 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { 3551 /* re-use the v-tags and init-seq here */ 3552 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag); 3553 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number); 3554 } else { 3555 uint32_t vtag; 3556 3557 if (asoc) { 3558 atomic_add_int(&asoc->refcnt, 1); 3559 SCTP_TCB_UNLOCK(stcb); 3560 vtag = sctp_select_a_tag(inp); 3561 initackm_out->msg.init.initiate_tag = htonl(vtag); 3562 /* get a TSN to use too */ 3563 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 3564 SCTP_TCB_LOCK(stcb); 3565 atomic_add_int(&asoc->refcnt, -1); 3566 } else { 3567 vtag = sctp_select_a_tag(inp); 3568 initackm_out->msg.init.initiate_tag = htonl(vtag); 3569 /* get a TSN to use too */ 3570 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 3571 } 3572 } 3573 /* save away my tag to */ 3574 stc.my_vtag = initackm_out->msg.init.initiate_tag; 3575 3576 /* set up some of the credits. */ 3577 initackm_out->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND)); 3578 /* set what I want */ 3579 his_limit = ntohs(init_chk->init.num_inbound_streams); 3580 /* choose what I want */ 3581 if (asoc != NULL) { 3582 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { 3583 i_want = asoc->streamoutcnt; 3584 } else { 3585 i_want = inp->sctp_ep.pre_open_stream_count; 3586 } 3587 } else { 3588 i_want = inp->sctp_ep.pre_open_stream_count; 3589 } 3590 if (his_limit < i_want) { 3591 /* I Want more :< */ 3592 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams; 3593 } else { 3594 /* I can have what I want :> */ 3595 initackm_out->msg.init.num_outbound_streams = htons(i_want); 3596 } 3597 /* tell him his limt. */ 3598 initackm_out->msg.init.num_inbound_streams = 3599 htons(inp->sctp_ep.max_open_streams_intome); 3600 /* setup the ECN pointer */ 3601 3602 if (inp->sctp_ep.adaptation_layer_indicator) { 3603 struct sctp_adaptation_layer_indication *ali; 3604 3605 ali = (struct sctp_adaptation_layer_indication *)( 3606 (caddr_t)initackm_out + sizeof(*initackm_out)); 3607 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 3608 ali->ph.param_length = htons(sizeof(*ali)); 3609 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 3610 SCTP_BUF_LEN(m) += sizeof(*ali); 3611 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + 3612 sizeof(*ali)); 3613 } else { 3614 ecn = (struct sctp_ecn_supported_param *)( 3615 (caddr_t)initackm_out + sizeof(*initackm_out)); 3616 } 3617 3618 /* ECN parameter */ 3619 if (sctp_ecn_enable == 1) { 3620 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 3621 ecn->ph.param_length = htons(sizeof(*ecn)); 3622 SCTP_BUF_LEN(m) += sizeof(*ecn); 3623 3624 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 3625 sizeof(*ecn)); 3626 } else { 3627 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 3628 } 3629 /* And now tell the peer we do pr-sctp */ 3630 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 3631 prsctp->ph.param_length = htons(sizeof(*prsctp)); 3632 SCTP_BUF_LEN(m) += sizeof(*prsctp); 3633 3634 /* And now tell the peer we do all the extensions */ 3635 pr_supported = (struct sctp_supported_chunk_types_param *) 3636 ((caddr_t)prsctp + sizeof(*prsctp)); 3637 3638 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 3639 num_ext = 0; 3640 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 3641 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 3642 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 3643 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 3644 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 3645 if (!sctp_auth_disable) 3646 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 3647 p_len = sizeof(*pr_supported) + num_ext; 3648 pr_supported->ph.param_length = htons(p_len); 3649 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 3650 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3651 3652 /* ECN nonce: And now tell the peer we support ECN nonce */ 3653 if (sctp_ecn_nonce) { 3654 ecn_nonce = (struct sctp_ecn_nonce_supported_param *) 3655 ((caddr_t)pr_supported + SCTP_SIZE32(p_len)); 3656 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED); 3657 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce)); 3658 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce); 3659 } 3660 /* add authentication parameters */ 3661 if (!sctp_auth_disable) { 3662 struct sctp_auth_random *random; 3663 struct sctp_auth_hmac_algo *hmacs; 3664 struct sctp_auth_chunk_list *chunks; 3665 uint16_t random_len; 3666 3667 /* generate and add RANDOM parameter */ 3668 random_len = sctp_auth_random_len; 3669 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3670 random->ph.param_type = htons(SCTP_RANDOM); 3671 p_len = sizeof(*random) + random_len; 3672 random->ph.param_length = htons(p_len); 3673 SCTP_READ_RANDOM(random->random_data, random_len); 3674 /* zero out any padding required */ 3675 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len); 3676 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3677 3678 /* add HMAC_ALGO parameter */ 3679 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3680 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 3681 (uint8_t *) hmacs->hmac_ids); 3682 if (p_len > 0) { 3683 p_len += sizeof(*hmacs); 3684 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 3685 hmacs->ph.param_length = htons(p_len); 3686 /* zero out any padding required */ 3687 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 3688 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3689 } 3690 /* add CHUNKS parameter */ 3691 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 3692 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 3693 chunks->chunk_types); 3694 if (p_len > 0) { 3695 p_len += sizeof(*chunks); 3696 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 3697 chunks->ph.param_length = htons(p_len); 3698 /* zero out any padding required */ 3699 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 3700 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 3701 } 3702 } 3703 m_at = m; 3704 /* now the addresses */ 3705 { 3706 struct sctp_scoping scp; 3707 3708 /* 3709 * To optimize this we could put the scoping stuff into a 3710 * structure and remove the individual uint8's from the stc 3711 * structure. Then we could just pass in the address within 3712 * the stc.. but for now this is a quick hack to get the 3713 * address stuff teased apart. 3714 */ 3715 scp.ipv4_addr_legal = stc.ipv4_addr_legal; 3716 scp.ipv6_addr_legal = stc.ipv6_addr_legal; 3717 scp.loopback_scope = stc.loopback_scope; 3718 scp.ipv4_local_scope = stc.ipv4_scope; 3719 scp.local_scope = stc.local_scope; 3720 scp.site_scope = stc.site_scope; 3721 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 3722 } 3723 3724 /* tack on the operational error if present */ 3725 if (op_err) { 3726 struct mbuf *ol; 3727 int llen; 3728 3729 llen = 0; 3730 ol = op_err; 3731 while (ol) { 3732 llen += SCTP_BUF_LEN(ol); 3733 ol = SCTP_BUF_NEXT(ol); 3734 } 3735 if (llen % 4) { 3736 /* must add a pad to the param */ 3737 uint32_t cpthis = 0; 3738 int padlen; 3739 3740 padlen = 4 - (llen % 4); 3741 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis); 3742 } 3743 while (SCTP_BUF_NEXT(m_at) != NULL) { 3744 m_at = SCTP_BUF_NEXT(m_at); 3745 } 3746 SCTP_BUF_NEXT(m_at) = op_err; 3747 while (SCTP_BUF_NEXT(m_at) != NULL) { 3748 m_at = SCTP_BUF_NEXT(m_at); 3749 } 3750 } 3751 /* Get total size of init packet */ 3752 sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length)); 3753 /* pre-calulate the size and update pkt header and chunk header */ 3754 p_len = 0; 3755 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 3756 p_len += SCTP_BUF_LEN(m_tmp); 3757 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 3758 /* m_tmp should now point to last one */ 3759 break; 3760 } 3761 } 3762 /* 3763 * Figure now the size of the cookie. We know the size of the 3764 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK, 3765 * COOKIE-STRUCTURE and SIGNATURE. 3766 */ 3767 3768 /* 3769 * take our earlier INIT calc and add in the sz we just calculated 3770 * minus the size of the sctphdr (its not included in chunk size 3771 */ 3772 3773 /* add once for the INIT-ACK */ 3774 sz_of += (p_len - sizeof(struct sctphdr)); 3775 3776 /* add a second time for the INIT-ACK in the cookie */ 3777 sz_of += (p_len - sizeof(struct sctphdr)); 3778 3779 /* Now add the cookie header and cookie message struct */ 3780 sz_of += sizeof(struct sctp_state_cookie_param); 3781 /* ...and add the size of our signature */ 3782 sz_of += SCTP_SIGNATURE_SIZE; 3783 initackm_out->msg.ch.chunk_length = htons(sz_of); 3784 3785 /* Now we must build a cookie */ 3786 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 3787 sizeof(struct sctphdr), &stc); 3788 if (m_cookie == NULL) { 3789 /* memory problem */ 3790 sctp_m_freem(m); 3791 return; 3792 } 3793 /* Now append the cookie to the end and update the space/size */ 3794 SCTP_BUF_NEXT(m_tmp) = m_cookie; 3795 for (; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 3796 p_len += SCTP_BUF_LEN(m_tmp); 3797 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 3798 /* m_tmp should now point to last one */ 3799 m_last = m_tmp; 3800 break; 3801 } 3802 } 3803 3804 /* 3805 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return 3806 * here since the timer will drive a retranmission. 3807 */ 3808 padval = p_len % 4; 3809 if ((padval) && (m_last)) { 3810 /* see my previous comments on m_last */ 3811 int ret; 3812 3813 ret = sctp_add_pad_tombuf(m_last, (4 - padval)); 3814 if (ret) { 3815 /* Houston we have a problem, no space */ 3816 sctp_m_freem(m); 3817 return; 3818 } 3819 p_len += padval; 3820 } 3821 sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 3822 NULL, 0); 3823 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 3824 } 3825 3826 3827 void 3828 sctp_insert_on_wheel(struct sctp_tcb *stcb, 3829 struct sctp_association *asoc, 3830 struct sctp_stream_out *strq, int holds_lock) 3831 { 3832 struct sctp_stream_out *stre, *strn; 3833 3834 if (holds_lock == 0) 3835 SCTP_TCB_SEND_LOCK(stcb); 3836 if ((strq->next_spoke.tqe_next) || 3837 (strq->next_spoke.tqe_prev)) { 3838 /* already on wheel */ 3839 goto outof_here; 3840 } 3841 stre = TAILQ_FIRST(&asoc->out_wheel); 3842 if (stre == NULL) { 3843 /* only one on wheel */ 3844 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke); 3845 goto outof_here; 3846 } 3847 for (; stre; stre = strn) { 3848 strn = TAILQ_NEXT(stre, next_spoke); 3849 if (stre->stream_no > strq->stream_no) { 3850 TAILQ_INSERT_BEFORE(stre, strq, next_spoke); 3851 goto outof_here; 3852 } else if (stre->stream_no == strq->stream_no) { 3853 /* huh, should not happen */ 3854 goto outof_here; 3855 } else if (strn == NULL) { 3856 /* next one is null */ 3857 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq, 3858 next_spoke); 3859 } 3860 } 3861 outof_here: 3862 if (holds_lock == 0) 3863 SCTP_TCB_SEND_UNLOCK(stcb); 3864 3865 3866 } 3867 3868 static void 3869 sctp_remove_from_wheel(struct sctp_tcb *stcb, 3870 struct sctp_association *asoc, 3871 struct sctp_stream_out *strq) 3872 { 3873 /* take off and then setup so we know it is not on the wheel */ 3874 SCTP_TCB_SEND_LOCK(stcb); 3875 if (TAILQ_FIRST(&strq->outqueue)) { 3876 /* more was added */ 3877 SCTP_TCB_SEND_UNLOCK(stcb); 3878 return; 3879 } 3880 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke); 3881 strq->next_spoke.tqe_next = NULL; 3882 strq->next_spoke.tqe_prev = NULL; 3883 SCTP_TCB_SEND_UNLOCK(stcb); 3884 } 3885 3886 static void 3887 sctp_prune_prsctp(struct sctp_tcb *stcb, 3888 struct sctp_association *asoc, 3889 struct sctp_sndrcvinfo *srcv, 3890 int dataout) 3891 { 3892 int freed_spc = 0; 3893 struct sctp_tmit_chunk *chk, *nchk; 3894 3895 SCTP_TCB_LOCK_ASSERT(stcb); 3896 if ((asoc->peer_supports_prsctp) && 3897 (asoc->sent_queue_cnt_removeable > 0)) { 3898 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3899 /* 3900 * Look for chunks marked with the PR_SCTP flag AND 3901 * the buffer space flag. If the one being sent is 3902 * equal or greater priority then purge the old one 3903 * and free some space. 3904 */ 3905 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 3906 /* 3907 * This one is PR-SCTP AND buffer space 3908 * limited type 3909 */ 3910 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 3911 /* 3912 * Lower numbers equates to higher 3913 * priority so if the one we are 3914 * looking at has a larger or equal 3915 * priority we want to drop the data 3916 * and NOT retransmit it. 3917 */ 3918 if (chk->data) { 3919 /* 3920 * We release the book_size 3921 * if the mbuf is here 3922 */ 3923 int ret_spc; 3924 int cause; 3925 3926 if (chk->sent > SCTP_DATAGRAM_UNSENT) 3927 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT; 3928 else 3929 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT; 3930 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 3931 cause, 3932 &asoc->sent_queue); 3933 freed_spc += ret_spc; 3934 if (freed_spc >= dataout) { 3935 return; 3936 } 3937 } /* if chunk was present */ 3938 } /* if of sufficent priority */ 3939 } /* if chunk has enabled */ 3940 } /* tailqforeach */ 3941 3942 chk = TAILQ_FIRST(&asoc->send_queue); 3943 while (chk) { 3944 nchk = TAILQ_NEXT(chk, sctp_next); 3945 /* Here we must move to the sent queue and mark */ 3946 if (PR_SCTP_TTL_ENABLED(chk->flags)) { 3947 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 3948 if (chk->data) { 3949 /* 3950 * We release the book_size 3951 * if the mbuf is here 3952 */ 3953 int ret_spc; 3954 3955 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 3956 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT, 3957 &asoc->send_queue); 3958 3959 freed_spc += ret_spc; 3960 if (freed_spc >= dataout) { 3961 return; 3962 } 3963 } /* end if chk->data */ 3964 } /* end if right class */ 3965 } /* end if chk pr-sctp */ 3966 chk = nchk; 3967 } /* end while (chk) */ 3968 } /* if enabled in asoc */ 3969 } 3970 3971 __inline int 3972 sctp_get_frag_point(struct sctp_tcb *stcb, 3973 struct sctp_association *asoc) 3974 { 3975 int siz, ovh; 3976 3977 /* 3978 * For endpoints that have both v6 and v4 addresses we must reserve 3979 * room for the ipv6 header, for those that are only dealing with V4 3980 * we use a larger frag point. 3981 */ 3982 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3983 ovh = SCTP_MED_OVERHEAD; 3984 } else { 3985 ovh = SCTP_MED_V4_OVERHEAD; 3986 } 3987 3988 if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu) 3989 siz = asoc->smallest_mtu - ovh; 3990 else 3991 siz = (stcb->sctp_ep->sctp_frag_point - ovh); 3992 /* 3993 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { 3994 */ 3995 /* A data chunk MUST fit in a cluster */ 3996 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ 3997 /* } */ 3998 3999 /* adjust for an AUTH chunk if DATA requires auth */ 4000 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) 4001 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 4002 4003 if (siz % 4) { 4004 /* make it an even word boundary please */ 4005 siz -= (siz % 4); 4006 } 4007 return (siz); 4008 } 4009 extern unsigned int sctp_max_chunks_on_queue; 4010 4011 static void 4012 sctp_set_prsctp_policy(struct sctp_tcb *stcb, 4013 struct sctp_stream_queue_pending *sp) 4014 { 4015 sp->pr_sctp_on = 0; 4016 if (stcb->asoc.peer_supports_prsctp) { 4017 /* 4018 * We assume that the user wants PR_SCTP_TTL if the user 4019 * provides a positive lifetime but does not specify any 4020 * PR_SCTP policy. This is a BAD assumption and causes 4021 * problems at least with the U-Vancovers MPI folks. I will 4022 * change this to be no policy means NO PR-SCTP. 4023 */ 4024 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 4025 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 4026 sp->pr_sctp_on = 1; 4027 } else { 4028 return; 4029 } 4030 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 4031 case CHUNK_FLAGS_PR_SCTP_BUF: 4032 /* 4033 * Time to live is a priority stored in tv_sec when 4034 * doing the buffer drop thing. 4035 */ 4036 sp->ts.tv_sec = sp->timetolive; 4037 sp->ts.tv_usec = 0; 4038 break; 4039 case CHUNK_FLAGS_PR_SCTP_TTL: 4040 { 4041 struct timeval tv; 4042 4043 SCTP_GETTIME_TIMEVAL(&sp->ts); 4044 tv.tv_sec = sp->timetolive / 1000; 4045 tv.tv_usec = (sp->timetolive * 1000) % 1000000; 4046 timevaladd(&sp->ts, &tv); 4047 } 4048 break; 4049 case CHUNK_FLAGS_PR_SCTP_RTX: 4050 /* 4051 * Time to live is a the number or retransmissions 4052 * stored in tv_sec. 4053 */ 4054 sp->ts.tv_sec = sp->timetolive; 4055 sp->ts.tv_usec = 0; 4056 break; 4057 default: 4058 #ifdef SCTP_DEBUG 4059 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) { 4060 printf("Unknown PR_SCTP policy %u.\n", PR_SCTP_POLICY(sp->sinfo_flags)); 4061 } 4062 #endif 4063 break; 4064 } 4065 } 4066 } 4067 4068 static int 4069 sctp_msg_append(struct sctp_tcb *stcb, 4070 struct sctp_nets *net, 4071 struct mbuf *m, 4072 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) 4073 { 4074 int error = 0, holds_lock; 4075 struct mbuf *at; 4076 struct sctp_stream_queue_pending *sp = NULL; 4077 struct sctp_stream_out *strm; 4078 4079 /* 4080 * Given an mbuf chain, put it into the association send queue and 4081 * place it on the wheel 4082 */ 4083 holds_lock = hold_stcb_lock; 4084 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 4085 /* Invalid stream number */ 4086 error = EINVAL; 4087 goto out_now; 4088 } 4089 if ((stcb->asoc.stream_locked) && 4090 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 4091 error = EAGAIN; 4092 goto out_now; 4093 } 4094 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 4095 /* Now can we send this? */ 4096 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || 4097 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 4098 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 4099 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 4100 /* got data while shutting down */ 4101 error = ECONNRESET; 4102 goto out_now; 4103 } 4104 sp = (struct sctp_stream_queue_pending *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq); 4105 if (sp == NULL) { 4106 error = ENOMEM; 4107 goto out_now; 4108 } 4109 SCTP_INCR_STRMOQ_COUNT(); 4110 sp->sinfo_flags = srcv->sinfo_flags; 4111 sp->timetolive = srcv->sinfo_timetolive; 4112 sp->ppid = srcv->sinfo_ppid; 4113 sp->context = srcv->sinfo_context; 4114 sp->strseq = 0; 4115 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 4116 sp->net = net; 4117 sp->addr_over = 1; 4118 } else { 4119 sp->net = stcb->asoc.primary_destination; 4120 sp->addr_over = 0; 4121 } 4122 atomic_add_int(&sp->net->ref_count, 1); 4123 SCTP_GETTIME_TIMEVAL(&sp->ts); 4124 sp->stream = srcv->sinfo_stream; 4125 sp->msg_is_complete = 1; 4126 sp->some_taken = 0; 4127 sp->data = m; 4128 sp->tail_mbuf = NULL; 4129 sp->length = 0; 4130 at = m; 4131 sctp_set_prsctp_policy(stcb, sp); 4132 /* 4133 * We could in theory (for sendall) pass the length in, but we would 4134 * still have to hunt through the chain since we need to setup the 4135 * tail_mbuf 4136 */ 4137 while (at) { 4138 if (SCTP_BUF_NEXT(at) == NULL) 4139 sp->tail_mbuf = at; 4140 sp->length += SCTP_BUF_LEN(at); 4141 at = SCTP_BUF_NEXT(at); 4142 } 4143 SCTP_TCB_SEND_LOCK(stcb); 4144 sctp_snd_sb_alloc(stcb, sp->length); 4145 stcb->asoc.stream_queue_cnt++; 4146 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 4147 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 4148 sp->strseq = strm->next_sequence_sent; 4149 strm->next_sequence_sent++; 4150 } 4151 if ((strm->next_spoke.tqe_next == NULL) && 4152 (strm->next_spoke.tqe_prev == NULL)) { 4153 /* Not on wheel, insert */ 4154 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1); 4155 } 4156 m = NULL; 4157 SCTP_TCB_SEND_UNLOCK(stcb); 4158 out_now: 4159 if (m) { 4160 sctp_m_freem(m); 4161 } 4162 return (error); 4163 } 4164 4165 4166 static struct mbuf * 4167 sctp_copy_mbufchain(struct mbuf *clonechain, 4168 struct mbuf *outchain, 4169 struct mbuf **endofchain, 4170 int can_take_mbuf, 4171 int sizeofcpy, 4172 uint8_t copy_by_ref) 4173 { 4174 struct mbuf *m; 4175 struct mbuf *appendchain; 4176 caddr_t cp; 4177 int len; 4178 4179 if (endofchain == NULL) { 4180 /* error */ 4181 error_out: 4182 if (outchain) 4183 sctp_m_freem(outchain); 4184 return (NULL); 4185 } 4186 if (can_take_mbuf) { 4187 appendchain = clonechain; 4188 } else { 4189 if (!copy_by_ref && (sizeofcpy <= ((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN)))) { 4190 /* Its not in a cluster */ 4191 if (*endofchain == NULL) { 4192 /* lets get a mbuf cluster */ 4193 if (outchain == NULL) { 4194 /* This is the general case */ 4195 new_mbuf: 4196 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 4197 if (outchain == NULL) { 4198 goto error_out; 4199 } 4200 SCTP_BUF_LEN(outchain) = 0; 4201 *endofchain = outchain; 4202 /* get the prepend space */ 4203 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4)); 4204 } else { 4205 /* 4206 * We really should not get a NULL 4207 * in endofchain 4208 */ 4209 /* find end */ 4210 m = outchain; 4211 while (m) { 4212 if (SCTP_BUF_NEXT(m) == NULL) { 4213 *endofchain = m; 4214 break; 4215 } 4216 m = SCTP_BUF_NEXT(m); 4217 } 4218 /* sanity */ 4219 if (*endofchain == NULL) { 4220 /* 4221 * huh, TSNH XXX maybe we 4222 * should panic 4223 */ 4224 sctp_m_freem(outchain); 4225 goto new_mbuf; 4226 } 4227 } 4228 /* get the new end of length */ 4229 len = M_TRAILINGSPACE(*endofchain); 4230 } else { 4231 /* how much is left at the end? */ 4232 len = M_TRAILINGSPACE(*endofchain); 4233 } 4234 /* Find the end of the data, for appending */ 4235 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain))); 4236 4237 /* Now lets copy it out */ 4238 if (len >= sizeofcpy) { 4239 /* It all fits, copy it in */ 4240 m_copydata(clonechain, 0, sizeofcpy, cp); 4241 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 4242 } else { 4243 /* fill up the end of the chain */ 4244 if (len > 0) { 4245 m_copydata(clonechain, 0, len, cp); 4246 SCTP_BUF_LEN((*endofchain)) += len; 4247 /* now we need another one */ 4248 sizeofcpy -= len; 4249 } 4250 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 4251 if (m == NULL) { 4252 /* We failed */ 4253 goto error_out; 4254 } 4255 SCTP_BUF_NEXT((*endofchain)) = m; 4256 *endofchain = m; 4257 cp = mtod((*endofchain), caddr_t); 4258 m_copydata(clonechain, len, sizeofcpy, cp); 4259 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 4260 } 4261 return (outchain); 4262 } else { 4263 /* copy the old fashion way */ 4264 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT); 4265 } 4266 } 4267 if (appendchain == NULL) { 4268 /* error */ 4269 if (outchain) 4270 sctp_m_freem(outchain); 4271 return (NULL); 4272 } 4273 if (outchain) { 4274 /* tack on to the end */ 4275 if (*endofchain != NULL) { 4276 SCTP_BUF_NEXT(((*endofchain))) = appendchain; 4277 } else { 4278 m = outchain; 4279 while (m) { 4280 if (SCTP_BUF_NEXT(m) == NULL) { 4281 SCTP_BUF_NEXT(m) = appendchain; 4282 break; 4283 } 4284 m = SCTP_BUF_NEXT(m); 4285 } 4286 } 4287 /* 4288 * save off the end and update the end-chain postion 4289 */ 4290 m = appendchain; 4291 while (m) { 4292 if (SCTP_BUF_NEXT(m) == NULL) { 4293 *endofchain = m; 4294 break; 4295 } 4296 m = SCTP_BUF_NEXT(m); 4297 } 4298 return (outchain); 4299 } else { 4300 /* save off the end and update the end-chain postion */ 4301 m = appendchain; 4302 while (m) { 4303 if (SCTP_BUF_NEXT(m) == NULL) { 4304 *endofchain = m; 4305 break; 4306 } 4307 m = SCTP_BUF_NEXT(m); 4308 } 4309 return (appendchain); 4310 } 4311 } 4312 4313 int 4314 sctp_med_chunk_output(struct sctp_inpcb *inp, 4315 struct sctp_tcb *stcb, 4316 struct sctp_association *asoc, 4317 int *num_out, 4318 int *reason_code, 4319 int control_only, int *cwnd_full, int from_where, 4320 struct timeval *now, int *now_filled, int frag_point); 4321 4322 static void 4323 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 4324 uint32_t val) 4325 { 4326 struct sctp_copy_all *ca; 4327 struct mbuf *m; 4328 int ret = 0; 4329 int added_control = 0; 4330 int un_sent, do_chunk_output = 1; 4331 struct sctp_association *asoc; 4332 4333 ca = (struct sctp_copy_all *)ptr; 4334 if (ca->m == NULL) { 4335 return; 4336 } 4337 if (ca->inp != inp) { 4338 /* TSNH */ 4339 return; 4340 } 4341 if ((ca->m) && ca->sndlen) { 4342 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT); 4343 if (m == NULL) { 4344 /* can't copy so we are done */ 4345 ca->cnt_failed++; 4346 return; 4347 } 4348 } else { 4349 m = NULL; 4350 } 4351 SCTP_TCB_LOCK_ASSERT(stcb); 4352 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 4353 /* Abort this assoc with m as the user defined reason */ 4354 if (m) { 4355 struct sctp_paramhdr *ph; 4356 4357 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT); 4358 if (m) { 4359 ph = mtod(m, struct sctp_paramhdr *); 4360 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4361 ph->param_length = htons(ca->sndlen); 4362 } 4363 /* 4364 * We add one here to keep the assoc from 4365 * dis-appearing on us. 4366 */ 4367 atomic_add_int(&stcb->asoc.refcnt, 1); 4368 sctp_abort_an_association(inp, stcb, 4369 SCTP_RESPONSE_TO_USER_REQ, 4370 m); 4371 /* 4372 * sctp_abort_an_association calls sctp_free_asoc() 4373 * free association will NOT free it since we 4374 * incremented the refcnt .. we do this to prevent 4375 * it being freed and things getting tricky since we 4376 * could end up (from free_asoc) calling inpcb_free 4377 * which would get a recursive lock call to the 4378 * iterator lock.. But as a consequence of that the 4379 * stcb will return to us un-locked.. since 4380 * free_asoc returns with either no TCB or the TCB 4381 * unlocked, we must relock.. to unlock in the 4382 * iterator timer :-0 4383 */ 4384 SCTP_TCB_LOCK(stcb); 4385 atomic_add_int(&stcb->asoc.refcnt, -1); 4386 goto no_chunk_output; 4387 } 4388 } else { 4389 if (m) { 4390 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m, 4391 &ca->sndrcv, 1); 4392 } 4393 asoc = &stcb->asoc; 4394 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 4395 /* shutdown this assoc */ 4396 if (TAILQ_EMPTY(&asoc->send_queue) && 4397 TAILQ_EMPTY(&asoc->sent_queue) && 4398 (asoc->stream_queue_cnt == 0)) { 4399 if (asoc->locked_on_sending) { 4400 goto abort_anyway; 4401 } 4402 /* 4403 * there is nothing queued to send, so I'm 4404 * done... 4405 */ 4406 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 4407 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 4408 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4409 /* 4410 * only send SHUTDOWN the first time 4411 * through 4412 */ 4413 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 4414 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4415 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4416 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 4417 asoc->primary_destination); 4418 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 4419 asoc->primary_destination); 4420 added_control = 1; 4421 do_chunk_output = 0; 4422 } 4423 } else { 4424 /* 4425 * we still got (or just got) data to send, 4426 * so set SHUTDOWN_PENDING 4427 */ 4428 /* 4429 * XXX sockets draft says that SCTP_EOF 4430 * should be sent with no data. currently, 4431 * we will allow user data to be sent first 4432 * and move to SHUTDOWN-PENDING 4433 */ 4434 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 4435 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 4436 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4437 if (asoc->locked_on_sending) { 4438 /* 4439 * Locked to send out the 4440 * data 4441 */ 4442 struct sctp_stream_queue_pending *sp; 4443 4444 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 4445 if (sp) { 4446 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 4447 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4448 } 4449 } 4450 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 4451 if (TAILQ_EMPTY(&asoc->send_queue) && 4452 TAILQ_EMPTY(&asoc->sent_queue) && 4453 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4454 abort_anyway: 4455 atomic_add_int(&stcb->asoc.refcnt, 1); 4456 sctp_abort_an_association(stcb->sctp_ep, stcb, 4457 SCTP_RESPONSE_TO_USER_REQ, 4458 NULL); 4459 atomic_add_int(&stcb->asoc.refcnt, -1); 4460 goto no_chunk_output; 4461 } 4462 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 4463 asoc->primary_destination); 4464 } 4465 } 4466 4467 } 4468 } 4469 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 4470 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 4471 4472 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 4473 (stcb->asoc.total_flight > 0) && 4474 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 4475 ) { 4476 do_chunk_output = 0; 4477 } 4478 if (do_chunk_output) 4479 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 4480 else if (added_control) { 4481 int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0; 4482 struct timeval now; 4483 int frag_point; 4484 4485 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 4486 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 4487 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); 4488 } 4489 no_chunk_output: 4490 if (ret) { 4491 ca->cnt_failed++; 4492 } else { 4493 ca->cnt_sent++; 4494 } 4495 } 4496 4497 static void 4498 sctp_sendall_completes(void *ptr, uint32_t val) 4499 { 4500 struct sctp_copy_all *ca; 4501 4502 ca = (struct sctp_copy_all *)ptr; 4503 /* 4504 * Do a notify here? Kacheong suggests that the notify be done at 4505 * the send time.. so you would push up a notification if any send 4506 * failed. Don't know if this is feasable since the only failures we 4507 * have is "memory" related and if you cannot get an mbuf to send 4508 * the data you surely can't get an mbuf to send up to notify the 4509 * user you can't send the data :-> 4510 */ 4511 4512 /* now free everything */ 4513 sctp_m_freem(ca->m); 4514 SCTP_FREE(ca); 4515 } 4516 4517 4518 #define MC_ALIGN(m, len) do { \ 4519 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ 4520 } while (0) 4521 4522 4523 4524 static struct mbuf * 4525 sctp_copy_out_all(struct uio *uio, int len) 4526 { 4527 struct mbuf *ret, *at; 4528 int left, willcpy, cancpy, error; 4529 4530 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA); 4531 if (ret == NULL) { 4532 /* TSNH */ 4533 return (NULL); 4534 } 4535 left = len; 4536 SCTP_BUF_LEN(ret) = 0; 4537 /* save space for the data chunk header */ 4538 cancpy = M_TRAILINGSPACE(ret); 4539 willcpy = min(cancpy, left); 4540 at = ret; 4541 while (left > 0) { 4542 /* Align data to the end */ 4543 error = uiomove(mtod(at, caddr_t), willcpy, uio); 4544 if (error) { 4545 err_out_now: 4546 sctp_m_freem(at); 4547 return (NULL); 4548 } 4549 SCTP_BUF_LEN(at) = willcpy; 4550 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 4551 left -= willcpy; 4552 if (left > 0) { 4553 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA); 4554 if (SCTP_BUF_NEXT(at) == NULL) { 4555 goto err_out_now; 4556 } 4557 at = SCTP_BUF_NEXT(at); 4558 SCTP_BUF_LEN(at) = 0; 4559 cancpy = M_TRAILINGSPACE(at); 4560 willcpy = min(cancpy, left); 4561 } 4562 } 4563 return (ret); 4564 } 4565 4566 static int 4567 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 4568 struct sctp_sndrcvinfo *srcv) 4569 { 4570 int ret; 4571 struct sctp_copy_all *ca; 4572 4573 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 4574 "CopyAll"); 4575 if (ca == NULL) { 4576 sctp_m_freem(m); 4577 return (ENOMEM); 4578 } 4579 memset(ca, 0, sizeof(struct sctp_copy_all)); 4580 4581 ca->inp = inp; 4582 ca->sndrcv = *srcv; 4583 /* 4584 * take off the sendall flag, it would be bad if we failed to do 4585 * this :-0 4586 */ 4587 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 4588 /* get length and mbuf chain */ 4589 if (uio) { 4590 ca->sndlen = uio->uio_resid; 4591 ca->m = sctp_copy_out_all(uio, ca->sndlen); 4592 if (ca->m == NULL) { 4593 SCTP_FREE(ca); 4594 return (ENOMEM); 4595 } 4596 } else { 4597 /* Gather the length of the send */ 4598 struct mbuf *mat; 4599 4600 mat = m; 4601 ca->sndlen = 0; 4602 while (m) { 4603 ca->sndlen += SCTP_BUF_LEN(m); 4604 m = SCTP_BUF_NEXT(m); 4605 } 4606 ca->m = m; 4607 } 4608 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, 4609 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE, 4610 (void *)ca, 0, 4611 sctp_sendall_completes, inp, 1); 4612 if (ret) { 4613 #ifdef SCTP_DEBUG 4614 printf("Failed to initiate iterator for sendall\n"); 4615 #endif 4616 SCTP_FREE(ca); 4617 return (EFAULT); 4618 } 4619 return (0); 4620 } 4621 4622 4623 void 4624 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 4625 { 4626 struct sctp_tmit_chunk *chk, *nchk; 4627 4628 chk = TAILQ_FIRST(&asoc->control_send_queue); 4629 while (chk) { 4630 nchk = TAILQ_NEXT(chk, sctp_next); 4631 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 4632 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 4633 if (chk->data) { 4634 sctp_m_freem(chk->data); 4635 chk->data = NULL; 4636 } 4637 asoc->ctrl_queue_cnt--; 4638 if (chk->whoTo) 4639 sctp_free_remote_addr(chk->whoTo); 4640 sctp_free_a_chunk(stcb, chk); 4641 } 4642 chk = nchk; 4643 } 4644 } 4645 4646 void 4647 sctp_toss_old_asconf(struct sctp_tcb *stcb) 4648 { 4649 struct sctp_association *asoc; 4650 struct sctp_tmit_chunk *chk, *chk_tmp; 4651 4652 asoc = &stcb->asoc; 4653 for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL; 4654 chk = chk_tmp) { 4655 /* get next chk */ 4656 chk_tmp = TAILQ_NEXT(chk, sctp_next); 4657 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */ 4658 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 4659 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 4660 if (chk->data) { 4661 sctp_m_freem(chk->data); 4662 chk->data = NULL; 4663 } 4664 asoc->ctrl_queue_cnt--; 4665 if (chk->whoTo) 4666 sctp_free_remote_addr(chk->whoTo); 4667 sctp_free_a_chunk(stcb, chk); 4668 } 4669 } 4670 } 4671 4672 4673 static __inline void 4674 sctp_clean_up_datalist(struct sctp_tcb *stcb, 4675 4676 struct sctp_association *asoc, 4677 struct sctp_tmit_chunk **data_list, 4678 int bundle_at, 4679 struct sctp_nets *net) 4680 { 4681 int i; 4682 struct sctp_tmit_chunk *tp1; 4683 4684 for (i = 0; i < bundle_at; i++) { 4685 /* off of the send queue */ 4686 if (i) { 4687 /* 4688 * Any chunk NOT 0 you zap the time chunk 0 gets 4689 * zapped or set based on if a RTO measurment is 4690 * needed. 4691 */ 4692 data_list[i]->do_rtt = 0; 4693 } 4694 /* record time */ 4695 data_list[i]->sent_rcv_time = net->last_sent_time; 4696 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; 4697 TAILQ_REMOVE(&asoc->send_queue, 4698 data_list[i], 4699 sctp_next); 4700 /* on to the sent queue */ 4701 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 4702 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq, 4703 data_list[i]->rec.data.TSN_seq, MAX_TSN))) { 4704 struct sctp_tmit_chunk *tpp; 4705 4706 /* need to move back */ 4707 back_up_more: 4708 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 4709 if (tpp == NULL) { 4710 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 4711 goto all_done; 4712 } 4713 tp1 = tpp; 4714 if (compare_with_wrap(tp1->rec.data.TSN_seq, 4715 data_list[i]->rec.data.TSN_seq, MAX_TSN)) { 4716 goto back_up_more; 4717 } 4718 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 4719 } else { 4720 TAILQ_INSERT_TAIL(&asoc->sent_queue, 4721 data_list[i], 4722 sctp_next); 4723 } 4724 all_done: 4725 /* This does not lower until the cum-ack passes it */ 4726 asoc->sent_queue_cnt++; 4727 asoc->send_queue_cnt--; 4728 if ((asoc->peers_rwnd <= 0) && 4729 (asoc->total_flight == 0) && 4730 (bundle_at == 1)) { 4731 /* Mark the chunk as being a window probe */ 4732 SCTP_STAT_INCR(sctps_windowprobed); 4733 data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE; 4734 } else { 4735 data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE; 4736 } 4737 #ifdef SCTP_AUDITING_ENABLED 4738 sctp_audit_log(0xC2, 3); 4739 #endif 4740 data_list[i]->sent = SCTP_DATAGRAM_SENT; 4741 data_list[i]->snd_count = 1; 4742 data_list[i]->rec.data.chunk_was_revoked = 0; 4743 #ifdef SCTP_FLIGHT_LOGGING 4744 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 4745 data_list[i]->whoTo->flight_size, 4746 data_list[i]->book_size, 4747 (uintptr_t) stcb, 4748 data_list[i]->rec.data.TSN_seq); 4749 #endif 4750 net->flight_size += data_list[i]->book_size; 4751 asoc->total_flight += data_list[i]->book_size; 4752 asoc->total_flight_count++; 4753 #ifdef SCTP_LOG_RWND 4754 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 4755 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 4756 #endif 4757 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 4758 (uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh)); 4759 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4760 /* SWS sender side engages */ 4761 asoc->peers_rwnd = 0; 4762 } 4763 } 4764 } 4765 4766 static __inline void 4767 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc) 4768 { 4769 struct sctp_tmit_chunk *chk, *nchk; 4770 4771 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 4772 chk; chk = nchk) { 4773 nchk = TAILQ_NEXT(chk, sctp_next); 4774 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 4775 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 4776 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 4777 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 4778 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 4779 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 4780 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 4781 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 4782 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 4783 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 4784 /* Stray chunks must be cleaned up */ 4785 clean_up_anyway: 4786 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 4787 if (chk->data) { 4788 sctp_m_freem(chk->data); 4789 chk->data = NULL; 4790 } 4791 asoc->ctrl_queue_cnt--; 4792 sctp_free_remote_addr(chk->whoTo); 4793 sctp_free_a_chunk(stcb, chk); 4794 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 4795 /* special handling, we must look into the param */ 4796 if (chk != asoc->str_reset) { 4797 goto clean_up_anyway; 4798 } 4799 } 4800 } 4801 } 4802 4803 extern int sctp_min_split_point; 4804 4805 static __inline int 4806 sctp_can_we_split_this(struct sctp_tcb *stcb, 4807 struct sctp_stream_queue_pending *sp, 4808 int goal_mtu, int frag_point, int eeor_on) 4809 { 4810 /* 4811 * Make a decision on if I should split a msg into multiple parts. 4812 */ 4813 if (goal_mtu < sctp_min_split_point) { 4814 /* you don't want enough */ 4815 return (0); 4816 } 4817 if (sp->msg_is_complete == 0) { 4818 if (eeor_on) { 4819 /* 4820 * If we are doing EEOR we need to always send it if 4821 * its the entire thing. 4822 */ 4823 if (goal_mtu >= sp->length) 4824 return (sp->length); 4825 } else { 4826 if (goal_mtu >= sp->length) { 4827 /* 4828 * If we cannot fill the amount needed there 4829 * is no sense of splitting the chunk. 4830 */ 4831 return (0); 4832 } 4833 } 4834 /* 4835 * If we reach here sp->length is larger than the goal_mtu. 4836 * Do we wish to split it for the sake of packet putting 4837 * together? 4838 */ 4839 if (goal_mtu >= min(sctp_min_split_point, stcb->asoc.smallest_mtu)) { 4840 /* Its ok to split it */ 4841 return (min(goal_mtu, frag_point)); 4842 } 4843 } else { 4844 /* We can always split a complete message to make it fit */ 4845 if (goal_mtu >= sp->length) 4846 /* Take it all */ 4847 return (sp->length); 4848 4849 return (min(goal_mtu, frag_point)); 4850 } 4851 /* Nope, can't split */ 4852 return (0); 4853 4854 } 4855 4856 static int 4857 sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, 4858 struct sctp_stream_out *strq, 4859 int goal_mtu, 4860 int frag_point, 4861 int *locked, 4862 int *giveup, 4863 int eeor_mode) 4864 { 4865 /* Move from the stream to the send_queue keeping track of the total */ 4866 struct sctp_association *asoc; 4867 struct sctp_stream_queue_pending *sp; 4868 struct sctp_tmit_chunk *chk; 4869 struct sctp_data_chunk *dchkh; 4870 int to_move; 4871 uint8_t rcv_flags = 0; 4872 uint8_t some_taken; 4873 uint8_t took_all = 0; 4874 4875 SCTP_TCB_LOCK_ASSERT(stcb); 4876 asoc = &stcb->asoc; 4877 sp = TAILQ_FIRST(&strq->outqueue); 4878 if (sp == NULL) { 4879 *locked = 0; 4880 SCTP_TCB_SEND_LOCK(stcb); 4881 if (strq->last_msg_incomplete) { 4882 printf("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 4883 strq->stream_no, strq->last_msg_incomplete); 4884 strq->last_msg_incomplete = 0; 4885 } 4886 SCTP_TCB_SEND_UNLOCK(stcb); 4887 return (0); 4888 } 4889 SCTP_TCB_SEND_LOCK(stcb); 4890 if ((sp->length == 0) && (sp->msg_is_complete == 0)) { 4891 /* Must wait for more data, must be last msg */ 4892 *locked = 1; 4893 *giveup = 1; 4894 SCTP_TCB_SEND_UNLOCK(stcb); 4895 return (0); 4896 } else if (sp->length == 0) { 4897 /* This should not happen */ 4898 panic("sp length is 0?"); 4899 } 4900 some_taken = sp->some_taken; 4901 if ((goal_mtu >= sp->length) && (sp->msg_is_complete)) { 4902 /* It all fits and its a complete msg, no brainer */ 4903 to_move = min(sp->length, frag_point); 4904 if (to_move == sp->length) { 4905 /* Getting it all */ 4906 if (sp->some_taken) { 4907 rcv_flags |= SCTP_DATA_LAST_FRAG; 4908 } else { 4909 rcv_flags |= SCTP_DATA_NOT_FRAG; 4910 } 4911 } else { 4912 /* Not getting it all, frag point overrides */ 4913 if (sp->some_taken == 0) { 4914 rcv_flags |= SCTP_DATA_FIRST_FRAG; 4915 } 4916 sp->some_taken = 1; 4917 } 4918 } else { 4919 to_move = sctp_can_we_split_this(stcb, sp, goal_mtu, 4920 frag_point, eeor_mode); 4921 if (to_move) { 4922 if (to_move >= sp->length) { 4923 to_move = sp->length; 4924 } 4925 if (sp->some_taken == 0) { 4926 rcv_flags |= SCTP_DATA_FIRST_FRAG; 4927 } 4928 sp->some_taken = 1; 4929 } else { 4930 if (sp->some_taken) { 4931 *locked = 1; 4932 } 4933 *giveup = 1; 4934 SCTP_TCB_SEND_UNLOCK(stcb); 4935 return (0); 4936 } 4937 } 4938 SCTP_TCB_SEND_UNLOCK(stcb); 4939 /* If we reach here, we can copy out a chunk */ 4940 sctp_alloc_a_chunk(stcb, chk); 4941 if (chk == NULL) { 4942 /* No chunk memory */ 4943 out_gu: 4944 *giveup = 1; 4945 return (0); 4946 } 4947 /* 4948 * Setup for unordered if needed by looking at the user sent info 4949 * flags. 4950 */ 4951 if (sp->sinfo_flags & SCTP_UNORDERED) { 4952 rcv_flags |= SCTP_DATA_UNORDERED; 4953 } 4954 /* clear out the chunk before setting up */ 4955 memset(chk, sizeof(*chk), 0); 4956 chk->rec.data.rcv_flags = rcv_flags; 4957 SCTP_TCB_SEND_LOCK(stcb); 4958 if (SCTP_BUF_IS_EXTENDED(sp->data)) { 4959 chk->copy_by_ref = 1; 4960 } else { 4961 chk->copy_by_ref = 0; 4962 } 4963 if (to_move >= sp->length) { 4964 /* we can steal the whole thing */ 4965 chk->data = sp->data; 4966 chk->last_mbuf = sp->tail_mbuf; 4967 /* register the stealing */ 4968 sp->data = sp->tail_mbuf = NULL; 4969 took_all = 1; 4970 } else { 4971 struct mbuf *m; 4972 4973 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT); 4974 chk->last_mbuf = NULL; 4975 if (chk->data == NULL) { 4976 sp->some_taken = some_taken; 4977 sctp_free_a_chunk(stcb, chk); 4978 SCTP_TCB_SEND_UNLOCK(stcb); 4979 goto out_gu; 4980 } 4981 /* Pull off the data */ 4982 m_adj(sp->data, to_move); 4983 /* Now lets work our way down and compact it */ 4984 m = sp->data; 4985 while (m && (SCTP_BUF_LEN(m) == 0)) { 4986 sp->data = SCTP_BUF_NEXT(m); 4987 SCTP_BUF_NEXT(m) = NULL; 4988 if (sp->tail_mbuf == m) { 4989 /* freeing tail */ 4990 sp->tail_mbuf = sp->data; 4991 } 4992 sctp_m_free(m); 4993 m = sp->data; 4994 } 4995 } 4996 if (to_move > sp->length) { 4997 panic("Huh, how can to_move be larger?"); 4998 } else { 4999 sp->length -= to_move; 5000 } 5001 5002 if (M_LEADINGSPACE(chk->data) < sizeof(struct sctp_data_chunk)) { 5003 /* Not enough room for a chunk header, get some */ 5004 struct mbuf *m; 5005 5006 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA); 5007 if (m == NULL) { 5008 /* 5009 * we're in trouble here. _PREPEND below will free 5010 * all the data if there is no leading space, so we 5011 * must put the data back and restore. 5012 */ 5013 if (took_all) { 5014 /* unsteal the data */ 5015 sp->data = chk->data; 5016 sp->tail_mbuf = chk->last_mbuf; 5017 } else { 5018 struct mbuf *m; 5019 5020 /* reassemble the data */ 5021 m = sp->data; 5022 sp->data = chk->data; 5023 SCTP_BUF_NEXT(sp->data) = m; 5024 } 5025 sp->some_taken = some_taken; 5026 sp->length += to_move; 5027 chk->data = NULL; 5028 sctp_free_a_chunk(stcb, chk); 5029 SCTP_TCB_SEND_UNLOCK(stcb); 5030 goto out_gu; 5031 } else { 5032 SCTP_BUF_LEN(m) = 0; 5033 SCTP_BUF_NEXT(m) = chk->data; 5034 chk->data = m; 5035 M_ALIGN(chk->data, 4); 5036 } 5037 } 5038 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT); 5039 if (chk->data == NULL) { 5040 /* HELP */ 5041 sctp_free_a_chunk(stcb, chk); 5042 SCTP_TCB_SEND_UNLOCK(stcb); 5043 goto out_gu; 5044 } 5045 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); 5046 chk->book_size = chk->send_size = (to_move + 5047 sizeof(struct sctp_data_chunk)); 5048 chk->book_size_scale = 0; 5049 chk->sent = SCTP_DATAGRAM_UNSENT; 5050 5051 /* 5052 * get last_mbuf and counts of mb useage This is ugly but hopefully 5053 * its only one mbuf. 5054 */ 5055 if (chk->last_mbuf == NULL) { 5056 chk->last_mbuf = chk->data; 5057 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 5058 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 5059 } 5060 } 5061 chk->flags = 0; 5062 chk->asoc = &stcb->asoc; 5063 chk->pad_inplace = 0; 5064 chk->no_fr_allowed = 0; 5065 chk->rec.data.stream_seq = sp->strseq; 5066 chk->rec.data.stream_number = sp->stream; 5067 chk->rec.data.payloadtype = sp->ppid; 5068 chk->rec.data.context = sp->context; 5069 chk->rec.data.doing_fast_retransmit = 0; 5070 chk->rec.data.ect_nonce = 0; /* ECN Nonce */ 5071 5072 chk->rec.data.timetodrop = sp->ts; 5073 chk->flags = sp->act_flags; 5074 chk->addr_over = sp->addr_over; 5075 5076 chk->whoTo = net; 5077 atomic_add_int(&chk->whoTo->ref_count, 1); 5078 5079 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); 5080 #ifdef SCTP_LOG_SENDING_STR 5081 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 5082 (uintptr_t) stcb, (uintptr_t) sp, 5083 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), 5084 chk->rec.data.TSN_seq); 5085 #endif 5086 5087 dchkh = mtod(chk->data, struct sctp_data_chunk *); 5088 /* 5089 * Put the rest of the things in place now. Size was done earlier in 5090 * previous loop prior to padding. 5091 */ 5092 dchkh->ch.chunk_type = SCTP_DATA; 5093 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 5094 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); 5095 dchkh->dp.stream_id = htons(strq->stream_no); 5096 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); 5097 dchkh->dp.protocol_id = chk->rec.data.payloadtype; 5098 dchkh->ch.chunk_length = htons(chk->send_size); 5099 /* Now advance the chk->send_size by the actual pad needed. */ 5100 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 5101 /* need a pad */ 5102 struct mbuf *lm; 5103 int pads; 5104 5105 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 5106 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) { 5107 chk->pad_inplace = 1; 5108 } 5109 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) { 5110 /* pad added an mbuf */ 5111 chk->last_mbuf = lm; 5112 } 5113 chk->send_size += pads; 5114 } 5115 /* We only re-set the policy if it is on */ 5116 if (sp->pr_sctp_on) { 5117 sctp_set_prsctp_policy(stcb, sp); 5118 } 5119 if (sp->msg_is_complete && (sp->length == 0)) { 5120 /* All done pull and kill the message */ 5121 asoc->stream_queue_cnt--; 5122 TAILQ_REMOVE(&strq->outqueue, sp, next); 5123 sctp_free_remote_addr(sp->net); 5124 if (sp->data) { 5125 sctp_m_freem(sp->data); 5126 sp->data = NULL; 5127 } 5128 sctp_free_a_strmoq(stcb, sp); 5129 5130 /* we can't be locked to it */ 5131 *locked = 0; 5132 stcb->asoc.locked_on_sending = NULL; 5133 } else { 5134 /* more to go, we are locked */ 5135 *locked = 1; 5136 } 5137 asoc->chunks_on_out_queue++; 5138 if (sp->pr_sctp_on) { 5139 asoc->pr_sctp_cnt++; 5140 chk->pr_sctp_on = 1; 5141 } else { 5142 chk->pr_sctp_on = 0; 5143 } 5144 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 5145 asoc->send_queue_cnt++; 5146 SCTP_TCB_SEND_UNLOCK(stcb); 5147 return (to_move); 5148 } 5149 5150 5151 static struct sctp_stream_out * 5152 sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc) 5153 { 5154 struct sctp_stream_out *strq; 5155 5156 /* Find the next stream to use */ 5157 if (asoc->last_out_stream == NULL) { 5158 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 5159 if (asoc->last_out_stream == NULL) { 5160 /* huh nothing on the wheel, TSNH */ 5161 return (NULL); 5162 } 5163 goto done_it; 5164 } 5165 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke); 5166 done_it: 5167 if (strq == NULL) { 5168 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel); 5169 } 5170 return (strq); 5171 5172 } 5173 5174 static void 5175 sctp_fill_outqueue(struct sctp_tcb *stcb, 5176 struct sctp_nets *net, int frag_point, int eeor_mode) 5177 { 5178 struct sctp_association *asoc; 5179 struct sctp_stream_out *strq, *strqn; 5180 int goal_mtu, moved_how_much, total_moved = 0; 5181 int locked, giveup; 5182 struct sctp_stream_queue_pending *sp; 5183 5184 SCTP_TCB_LOCK_ASSERT(stcb); 5185 asoc = &stcb->asoc; 5186 #ifdef AF_INET6 5187 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 5188 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 5189 } else { 5190 /* ?? not sure what else to do */ 5191 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 5192 } 5193 #else 5194 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 5195 mtu_fromwheel = 0; 5196 #endif 5197 /* Need an allowance for the data chunk header too */ 5198 goal_mtu -= sizeof(struct sctp_data_chunk); 5199 5200 /* must make even word boundary */ 5201 goal_mtu &= 0xfffffffc; 5202 if (asoc->locked_on_sending) { 5203 /* We are stuck on one stream until the message completes. */ 5204 strqn = strq = asoc->locked_on_sending; 5205 locked = 1; 5206 } else { 5207 strqn = strq = sctp_select_a_stream(stcb, asoc); 5208 locked = 0; 5209 } 5210 5211 while ((goal_mtu > 0) && strq) { 5212 sp = TAILQ_FIRST(&strq->outqueue); 5213 /* 5214 * If CMT is off, we must validate that the stream in 5215 * question has the first item pointed towards are network 5216 * destionation requested by the caller. Note that if we 5217 * turn out to be locked to a stream (assigning TSN's then 5218 * we must stop, since we cannot look for another stream 5219 * with data to send to that destination). In CMT's case, by 5220 * skipping this check, we will send one data packet towards 5221 * the requested net. 5222 */ 5223 if (sp == NULL) { 5224 break; 5225 } 5226 if ((sp->net != net) && (sctp_cmt_on_off == 0)) { 5227 /* none for this network */ 5228 if (locked) { 5229 break; 5230 } else { 5231 strq = sctp_select_a_stream(stcb, asoc); 5232 if (strq == NULL) 5233 /* none left */ 5234 break; 5235 if (strqn == strq) { 5236 /* I have circled */ 5237 break; 5238 } 5239 continue; 5240 } 5241 } 5242 giveup = 0; 5243 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked, 5244 &giveup, eeor_mode); 5245 asoc->last_out_stream = strq; 5246 if (locked) { 5247 asoc->locked_on_sending = strq; 5248 if ((moved_how_much == 0) || (giveup)) 5249 /* no more to move for now */ 5250 break; 5251 } else { 5252 asoc->locked_on_sending = NULL; 5253 if (TAILQ_FIRST(&strq->outqueue) == NULL) { 5254 sctp_remove_from_wheel(stcb, asoc, strq); 5255 } 5256 if (giveup) { 5257 break; 5258 } 5259 strq = sctp_select_a_stream(stcb, asoc); 5260 if (strq == NULL) { 5261 break; 5262 } 5263 } 5264 total_moved += moved_how_much; 5265 goal_mtu -= moved_how_much; 5266 goal_mtu &= 0xfffffffc; 5267 } 5268 if (total_moved == 0) { 5269 if ((sctp_cmt_on_off == 0) && 5270 (net == stcb->asoc.primary_destination)) { 5271 /* ran dry for primary network net */ 5272 SCTP_STAT_INCR(sctps_primary_randry); 5273 } else if (sctp_cmt_on_off) { 5274 /* ran dry with CMT on */ 5275 SCTP_STAT_INCR(sctps_cmt_randry); 5276 } 5277 } 5278 } 5279 5280 __inline void 5281 sctp_fix_ecn_echo(struct sctp_association *asoc) 5282 { 5283 struct sctp_tmit_chunk *chk; 5284 5285 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 5286 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 5287 chk->sent = SCTP_DATAGRAM_UNSENT; 5288 } 5289 } 5290 } 5291 5292 static void 5293 sctp_move_to_an_alt(struct sctp_tcb *stcb, 5294 struct sctp_association *asoc, 5295 struct sctp_nets *net) 5296 { 5297 struct sctp_tmit_chunk *chk; 5298 struct sctp_nets *a_net; 5299 5300 SCTP_TCB_LOCK_ASSERT(stcb); 5301 a_net = sctp_find_alternate_net(stcb, net, 0); 5302 if ((a_net != net) && 5303 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) { 5304 /* 5305 * We only proceed if a valid alternate is found that is not 5306 * this one and is reachable. Here we must move all chunks 5307 * queued in the send queue off of the destination address 5308 * to our alternate. 5309 */ 5310 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 5311 if (chk->whoTo == net) { 5312 /* Move the chunk to our alternate */ 5313 sctp_free_remote_addr(chk->whoTo); 5314 chk->whoTo = a_net; 5315 atomic_add_int(&a_net->ref_count, 1); 5316 } 5317 } 5318 } 5319 } 5320 5321 extern int sctp_early_fr; 5322 5323 int 5324 sctp_med_chunk_output(struct sctp_inpcb *inp, 5325 struct sctp_tcb *stcb, 5326 struct sctp_association *asoc, 5327 int *num_out, 5328 int *reason_code, 5329 int control_only, int *cwnd_full, int from_where, 5330 struct timeval *now, int *now_filled, int frag_point) 5331 { 5332 /* 5333 * Ok this is the generic chunk service queue. we must do the 5334 * following: - Service the stream queue that is next, moving any 5335 * message (note I must get a complete message i.e. FIRST/MIDDLE and 5336 * LAST to the out queue in one pass) and assigning TSN's - Check to 5337 * see if the cwnd/rwnd allows any output, if so we go ahead and 5338 * fomulate and send the low level chunks. Making sure to combine 5339 * any control in the control chunk queue also. 5340 */ 5341 struct sctp_nets *net; 5342 struct mbuf *outchain, *endoutchain; 5343 struct sctp_tmit_chunk *chk, *nchk; 5344 struct sctphdr *shdr; 5345 5346 /* temp arrays for unlinking */ 5347 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 5348 int no_fragmentflg, error; 5349 int one_chunk, hbflag; 5350 int asconf, cookie, no_out_cnt; 5351 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode; 5352 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 5353 5354 *num_out = 0; 5355 struct sctp_nets *start_at, *old_startat = NULL, *send_start_at; 5356 5357 cwnd_full_ind = 0; 5358 int tsns_sent = 0; 5359 uint32_t auth_offset = 0; 5360 struct sctp_auth_chunk *auth = NULL; 5361 5362 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5363 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || 5364 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 5365 eeor_mode = 1; 5366 } else { 5367 eeor_mode = 0; 5368 } 5369 ctl_cnt = no_out_cnt = asconf = cookie = 0; 5370 /* 5371 * First lets prime the pump. For each destination, if there is room 5372 * in the flight size, attempt to pull an MTU's worth out of the 5373 * stream queues into the general send_queue 5374 */ 5375 #ifdef SCTP_AUDITING_ENABLED 5376 sctp_audit_log(0xC2, 2); 5377 #endif 5378 SCTP_TCB_LOCK_ASSERT(stcb); 5379 hbflag = 0; 5380 if ((control_only) || (asoc->stream_reset_outstanding)) 5381 no_data_chunks = 1; 5382 else 5383 no_data_chunks = 0; 5384 5385 /* Nothing to possible to send? */ 5386 if (TAILQ_EMPTY(&asoc->control_send_queue) && 5387 TAILQ_EMPTY(&asoc->send_queue) && 5388 TAILQ_EMPTY(&asoc->out_wheel)) { 5389 *reason_code = 9; 5390 return (0); 5391 } 5392 if (asoc->peers_rwnd == 0) { 5393 /* No room in peers rwnd */ 5394 *cwnd_full = 1; 5395 *reason_code = 1; 5396 if (asoc->total_flight > 0) { 5397 /* we are allowed one chunk in flight */ 5398 no_data_chunks = 1; 5399 } 5400 } 5401 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) { 5402 if (sctp_cmt_on_off) { 5403 /* 5404 * for CMT we start at the next one past the one we 5405 * last added data to. 5406 */ 5407 if (TAILQ_FIRST(&asoc->send_queue) != NULL) { 5408 goto skip_the_fill_from_streams; 5409 } 5410 if (asoc->last_net_data_came_from) { 5411 net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next); 5412 if (net == NULL) { 5413 net = TAILQ_FIRST(&asoc->nets); 5414 } 5415 } else { 5416 /* back to start */ 5417 net = TAILQ_FIRST(&asoc->nets); 5418 } 5419 5420 } else { 5421 net = asoc->primary_destination; 5422 if (net == NULL) { 5423 /* TSNH */ 5424 net = TAILQ_FIRST(&asoc->nets); 5425 } 5426 } 5427 start_at = net; 5428 one_more_time: 5429 for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 5430 if (old_startat && (old_startat == net)) { 5431 break; 5432 } 5433 if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) { 5434 /* nothing can be in queue for this guy */ 5435 continue; 5436 } 5437 if (net->flight_size >= net->cwnd) { 5438 /* skip this network, no room */ 5439 cwnd_full_ind++; 5440 continue; 5441 } 5442 /* 5443 * @@@ JRI : this for loop we are in takes in each 5444 * net, if its's got space in cwnd and has data sent 5445 * to it (when CMT is off) then it calls 5446 * sctp_fill_outqueue for the net. This gets data on 5447 * the send queue for that network. 5448 * 5449 * In sctp_fill_outqueue TSN's are assigned and data is 5450 * copied out of the stream buffers. Note mostly 5451 * copy by reference (we hope). 5452 */ 5453 #ifdef SCTP_CWND_LOGGING 5454 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 5455 #endif 5456 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode); 5457 } 5458 if (start_at != TAILQ_FIRST(&asoc->nets)) { 5459 /* got to pick up the beginning stuff. */ 5460 old_startat = start_at; 5461 start_at = net = TAILQ_FIRST(&asoc->nets); 5462 goto one_more_time; 5463 } 5464 } 5465 skip_the_fill_from_streams: 5466 *cwnd_full = cwnd_full_ind; 5467 /* now service each destination and send out what we can for it */ 5468 /* Nothing to send? */ 5469 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) && 5470 (TAILQ_FIRST(&asoc->send_queue) == NULL)) { 5471 *reason_code = 8; 5472 return (0); 5473 } 5474 chk = TAILQ_FIRST(&asoc->send_queue); 5475 if (chk) { 5476 send_start_at = chk->whoTo; 5477 } else { 5478 send_start_at = TAILQ_FIRST(&asoc->nets); 5479 } 5480 old_startat = NULL; 5481 again_one_more_time: 5482 for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 5483 /* how much can we send? */ 5484 /* printf("Examine for sending net:%x\n", (uint32_t)net); */ 5485 if (old_startat && (old_startat == net)) { 5486 /* through list ocmpletely. */ 5487 break; 5488 } 5489 tsns_sent = 0; 5490 if (net->ref_count < 2) { 5491 /* 5492 * Ref-count of 1 so we cannot have data or control 5493 * queued to this address. Skip it. 5494 */ 5495 continue; 5496 } 5497 ctl_cnt = bundle_at = 0; 5498 endoutchain = outchain = NULL; 5499 no_fragmentflg = 1; 5500 one_chunk = 0; 5501 5502 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { 5503 /* 5504 * if we have a route and an ifp check to see if we 5505 * have room to send to this guy 5506 */ 5507 struct ifnet *ifp; 5508 5509 ifp = net->ro.ro_rt->rt_ifp; 5510 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { 5511 SCTP_STAT_INCR(sctps_ifnomemqueued); 5512 #ifdef SCTP_LOG_MAXBURST 5513 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); 5514 #endif 5515 continue; 5516 } 5517 } 5518 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 5519 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 5520 } else { 5521 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 5522 } 5523 mx_mtu = mtu; 5524 to_out = 0; 5525 if (mtu > asoc->peers_rwnd) { 5526 if (asoc->total_flight > 0) { 5527 /* We have a packet in flight somewhere */ 5528 r_mtu = asoc->peers_rwnd; 5529 } else { 5530 /* We are always allowed to send one MTU out */ 5531 one_chunk = 1; 5532 r_mtu = mtu; 5533 } 5534 } else { 5535 r_mtu = mtu; 5536 } 5537 /************************/ 5538 /* Control transmission */ 5539 /************************/ 5540 /* Now first lets go through the control queue */ 5541 for (chk = TAILQ_FIRST(&asoc->control_send_queue); 5542 chk; chk = nchk) { 5543 nchk = TAILQ_NEXT(chk, sctp_next); 5544 if (chk->whoTo != net) { 5545 /* 5546 * No, not sent to the network we are 5547 * looking at 5548 */ 5549 continue; 5550 } 5551 if (chk->data == NULL) { 5552 continue; 5553 } 5554 if (chk->sent != SCTP_DATAGRAM_UNSENT) { 5555 /* 5556 * It must be unsent. Cookies and ASCONF's 5557 * hang around but there timers will force 5558 * when marked for resend. 5559 */ 5560 continue; 5561 } 5562 /* 5563 * if no AUTH is yet included and this chunk 5564 * requires it, make sure to account for it. We 5565 * don't apply the size until the AUTH chunk is 5566 * actually added below in case there is no room for 5567 * this chunk. NOTE: we overload the use of "omtu" 5568 * here 5569 */ 5570 if ((auth == NULL) && 5571 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 5572 stcb->asoc.peer_auth_chunks)) { 5573 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 5574 } else 5575 omtu = 0; 5576 /* Here we do NOT factor the r_mtu */ 5577 if ((chk->send_size < (int)(mtu - omtu)) || 5578 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 5579 /* 5580 * We probably should glom the mbuf chain 5581 * from the chk->data for control but the 5582 * problem is it becomes yet one more level 5583 * of tracking to do if for some reason 5584 * output fails. Then I have got to 5585 * reconstruct the merged control chain.. el 5586 * yucko.. for now we take the easy way and 5587 * do the copy 5588 */ 5589 /* 5590 * Add an AUTH chunk, if chunk requires it 5591 * save the offset into the chain for AUTH 5592 */ 5593 if ((auth == NULL) && 5594 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 5595 stcb->asoc.peer_auth_chunks))) { 5596 outchain = sctp_add_auth_chunk(outchain, 5597 &endoutchain, 5598 &auth, 5599 &auth_offset, 5600 stcb, 5601 chk->rec.chunk_id.id); 5602 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5603 } 5604 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 5605 (int)chk->rec.chunk_id.can_take_data, 5606 chk->send_size, chk->copy_by_ref); 5607 if (outchain == NULL) { 5608 *reason_code = 8; 5609 return (ENOMEM); 5610 } 5611 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5612 /* update our MTU size */ 5613 if (mtu > (chk->send_size + omtu)) 5614 mtu -= (chk->send_size + omtu); 5615 else 5616 mtu = 0; 5617 to_out += (chk->send_size + omtu); 5618 /* Do clear IP_DF ? */ 5619 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 5620 no_fragmentflg = 0; 5621 } 5622 if (chk->rec.chunk_id.can_take_data) 5623 chk->data = NULL; 5624 /* Mark things to be removed, if needed */ 5625 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 5626 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 5627 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 5628 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 5629 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 5630 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 5631 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 5632 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 5633 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 5634 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 5635 5636 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) 5637 hbflag = 1; 5638 /* remove these chunks at the end */ 5639 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 5640 /* turn off the timer */ 5641 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 5642 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 5643 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); 5644 } 5645 } 5646 ctl_cnt++; 5647 } else { 5648 /* 5649 * Other chunks, since they have 5650 * timers running (i.e. COOKIE or 5651 * ASCONF) we just "trust" that it 5652 * gets sent or retransmitted. 5653 */ 5654 ctl_cnt++; 5655 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 5656 cookie = 1; 5657 no_out_cnt = 1; 5658 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) { 5659 /* 5660 * set hb flag since we can 5661 * use these for RTO 5662 */ 5663 hbflag = 1; 5664 asconf = 1; 5665 } 5666 chk->sent = SCTP_DATAGRAM_SENT; 5667 chk->snd_count++; 5668 } 5669 if (mtu == 0) { 5670 /* 5671 * Ok we are out of room but we can 5672 * output without effecting the 5673 * flight size since this little guy 5674 * is a control only packet. 5675 */ 5676 if (asconf) { 5677 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 5678 asconf = 0; 5679 } 5680 if (cookie) { 5681 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 5682 cookie = 0; 5683 } 5684 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 5685 if (outchain == NULL) { 5686 /* no memory */ 5687 error = ENOBUFS; 5688 goto error_out_again; 5689 } 5690 shdr = mtod(outchain, struct sctphdr *); 5691 shdr->src_port = inp->sctp_lport; 5692 shdr->dest_port = stcb->rport; 5693 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 5694 shdr->checksum = 0; 5695 auth_offset += sizeof(struct sctphdr); 5696 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 5697 (struct sockaddr *)&net->ro._l_addr, 5698 outchain, auth_offset, auth, 5699 no_fragmentflg, 0, NULL, asconf))) { 5700 if (error == ENOBUFS) { 5701 asoc->ifp_had_enobuf = 1; 5702 } 5703 SCTP_STAT_INCR(sctps_lowlevelerr); 5704 if (from_where == 0) { 5705 SCTP_STAT_INCR(sctps_lowlevelerrusr); 5706 } 5707 error_out_again: 5708 /* error, could not output */ 5709 if (hbflag) { 5710 if (*now_filled == 0) { 5711 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 5712 *now_filled = 1; 5713 *now = net->last_sent_time; 5714 } else { 5715 net->last_sent_time = *now; 5716 } 5717 hbflag = 0; 5718 } 5719 if (error == EHOSTUNREACH) { 5720 /* 5721 * Destination went 5722 * unreachable 5723 * during this send 5724 */ 5725 sctp_move_to_an_alt(stcb, asoc, net); 5726 } 5727 sctp_clean_up_ctl(stcb, asoc); 5728 *reason_code = 7; 5729 return (error); 5730 } else 5731 asoc->ifp_had_enobuf = 0; 5732 /* Only HB or ASCONF advances time */ 5733 if (hbflag) { 5734 if (*now_filled == 0) { 5735 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 5736 *now_filled = 1; 5737 *now = net->last_sent_time; 5738 } else { 5739 net->last_sent_time = *now; 5740 } 5741 hbflag = 0; 5742 } 5743 /* 5744 * increase the number we sent, if a 5745 * cookie is sent we don't tell them 5746 * any was sent out. 5747 */ 5748 outchain = endoutchain = NULL; 5749 auth = NULL; 5750 auth_offset = 0; 5751 if (!no_out_cnt) 5752 *num_out += ctl_cnt; 5753 /* recalc a clean slate and setup */ 5754 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 5755 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 5756 } else { 5757 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD); 5758 } 5759 to_out = 0; 5760 no_fragmentflg = 1; 5761 } 5762 } 5763 } 5764 /*********************/ 5765 /* Data transmission */ 5766 /*********************/ 5767 /* 5768 * if AUTH for DATA is required and no AUTH has been added 5769 * yet, account for this in the mtu now... if no data can be 5770 * bundled, this adjustment won't matter anyways since the 5771 * packet will be going out... 5772 */ 5773 if ((auth == NULL) && 5774 sctp_auth_is_required_chunk(SCTP_DATA, 5775 stcb->asoc.peer_auth_chunks)) { 5776 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 5777 } 5778 /* now lets add any data within the MTU constraints */ 5779 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) { 5780 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) 5781 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 5782 else 5783 omtu = 0; 5784 } else { 5785 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) 5786 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 5787 else 5788 omtu = 0; 5789 } 5790 if (((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) || 5791 (cookie)) { 5792 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) { 5793 if (no_data_chunks) { 5794 /* let only control go out */ 5795 *reason_code = 1; 5796 break; 5797 } 5798 if (net->flight_size >= net->cwnd) { 5799 /* skip this net, no room for data */ 5800 *reason_code = 2; 5801 break; 5802 } 5803 nchk = TAILQ_NEXT(chk, sctp_next); 5804 if (chk->whoTo != net) { 5805 /* No, not sent to this net */ 5806 continue; 5807 } 5808 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 5809 /* 5810 * strange, we have a chunk that is 5811 * to bit for its destination and 5812 * yet no fragment ok flag. 5813 * Something went wrong when the 5814 * PMTU changed...we did not mark 5815 * this chunk for some reason?? I 5816 * will fix it here by letting IP 5817 * fragment it for now and printing 5818 * a warning. This really should not 5819 * happen ... 5820 */ 5821 #ifdef SCTP_DEBUG 5822 printf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 5823 chk->send_size, mtu); 5824 #endif 5825 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 5826 } 5827 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 5828 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 5829 /* ok we will add this one */ 5830 5831 /* 5832 * Add an AUTH chunk, if chunk 5833 * requires it, save the offset into 5834 * the chain for AUTH 5835 */ 5836 if ((auth == NULL) && 5837 (sctp_auth_is_required_chunk(SCTP_DATA, 5838 stcb->asoc.peer_auth_chunks))) { 5839 5840 outchain = sctp_add_auth_chunk(outchain, 5841 &endoutchain, 5842 &auth, 5843 &auth_offset, 5844 stcb, 5845 SCTP_DATA); 5846 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5847 } 5848 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 5849 chk->send_size, chk->copy_by_ref); 5850 if (outchain == NULL) { 5851 #ifdef SCTP_DEBUG 5852 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 5853 printf("No memory?\n"); 5854 } 5855 #endif 5856 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5857 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 5858 } 5859 *reason_code = 3; 5860 return (ENOMEM); 5861 } 5862 /* upate our MTU size */ 5863 /* Do clear IP_DF ? */ 5864 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 5865 no_fragmentflg = 0; 5866 } 5867 /* unsigned subtraction of mtu */ 5868 if (mtu > chk->send_size) 5869 mtu -= chk->send_size; 5870 else 5871 mtu = 0; 5872 /* unsigned subtraction of r_mtu */ 5873 if (r_mtu > chk->send_size) 5874 r_mtu -= chk->send_size; 5875 else 5876 r_mtu = 0; 5877 5878 to_out += chk->send_size; 5879 if (to_out > mx_mtu) { 5880 #ifdef INVARIANTS 5881 panic("gag"); 5882 #else 5883 printf("Exceeding mtu of %d out size is %d\n", 5884 mx_mtu, to_out); 5885 #endif 5886 } 5887 data_list[bundle_at++] = chk; 5888 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 5889 mtu = 0; 5890 break; 5891 } 5892 if (chk->sent == SCTP_DATAGRAM_UNSENT) { 5893 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 5894 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 5895 } else { 5896 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 5897 } 5898 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 5899 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 5900 /* 5901 * Count number of 5902 * user msg's that 5903 * were fragmented 5904 * we do this by 5905 * counting when we 5906 * see a LAST 5907 * fragment only. 5908 */ 5909 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 5910 } 5911 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 5912 break; 5913 } 5914 } else { 5915 /* 5916 * Must be sent in order of the 5917 * TSN's (on a network) 5918 */ 5919 break; 5920 } 5921 } /* for () */ 5922 } /* if asoc.state OPEN */ 5923 /* Is there something to send for this destination? */ 5924 if (outchain) { 5925 /* We may need to start a control timer or two */ 5926 if (asconf) { 5927 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 5928 asconf = 0; 5929 } 5930 if (cookie) { 5931 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 5932 cookie = 0; 5933 } 5934 /* must start a send timer if data is being sent */ 5935 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 5936 /* 5937 * no timer running on this destination 5938 * restart it. 5939 */ 5940 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 5941 } 5942 /* Now send it, if there is anything to send :> */ 5943 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT); 5944 if (outchain == NULL) { 5945 /* out of mbufs */ 5946 error = ENOBUFS; 5947 goto errored_send; 5948 } 5949 shdr = mtod(outchain, struct sctphdr *); 5950 shdr->src_port = inp->sctp_lport; 5951 shdr->dest_port = stcb->rport; 5952 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 5953 shdr->checksum = 0; 5954 auth_offset += sizeof(struct sctphdr); 5955 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 5956 (struct sockaddr *)&net->ro._l_addr, 5957 outchain, 5958 auth_offset, 5959 auth, 5960 no_fragmentflg, 5961 bundle_at, 5962 data_list[0], 5963 asconf))) { 5964 /* error, we could not output */ 5965 if (error == ENOBUFS) { 5966 asoc->ifp_had_enobuf = 1; 5967 } 5968 SCTP_STAT_INCR(sctps_lowlevelerr); 5969 if (from_where == 0) { 5970 SCTP_STAT_INCR(sctps_lowlevelerrusr); 5971 } 5972 errored_send: 5973 #ifdef SCTP_DEBUG 5974 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 5975 printf("Gak send error %d\n", error); 5976 } 5977 #endif 5978 if (hbflag) { 5979 if (*now_filled == 0) { 5980 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 5981 *now_filled = 1; 5982 *now = net->last_sent_time; 5983 } else { 5984 net->last_sent_time = *now; 5985 } 5986 hbflag = 0; 5987 } 5988 if (error == EHOSTUNREACH) { 5989 /* 5990 * Destination went unreachable 5991 * during this send 5992 */ 5993 sctp_move_to_an_alt(stcb, asoc, net); 5994 } 5995 sctp_clean_up_ctl(stcb, asoc); 5996 *reason_code = 6; 5997 return (error); 5998 } else { 5999 asoc->ifp_had_enobuf = 0; 6000 } 6001 outchain = endoutchain = NULL; 6002 auth = NULL; 6003 auth_offset = 0; 6004 if (bundle_at || hbflag) { 6005 /* For data/asconf and hb set time */ 6006 if (*now_filled == 0) { 6007 SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 6008 *now_filled = 1; 6009 *now = net->last_sent_time; 6010 } else { 6011 net->last_sent_time = *now; 6012 } 6013 } 6014 if (!no_out_cnt) { 6015 *num_out += (ctl_cnt + bundle_at); 6016 } 6017 if (bundle_at) { 6018 /* if (!net->rto_pending) { */ 6019 /* setup for a RTO measurement */ 6020 /* net->rto_pending = 1; */ 6021 tsns_sent = data_list[0]->rec.data.TSN_seq; 6022 6023 data_list[0]->do_rtt = 1; 6024 /* } else { */ 6025 /* data_list[0]->do_rtt = 0; */ 6026 /* } */ 6027 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 6028 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 6029 if (sctp_early_fr) { 6030 if (net->flight_size < net->cwnd) { 6031 /* start or restart it */ 6032 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 6033 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 6034 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); 6035 } 6036 SCTP_STAT_INCR(sctps_earlyfrstrout); 6037 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net); 6038 } else { 6039 /* stop it if its running */ 6040 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 6041 SCTP_STAT_INCR(sctps_earlyfrstpout); 6042 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 6043 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); 6044 } 6045 } 6046 } 6047 } 6048 if (one_chunk) { 6049 break; 6050 } 6051 } 6052 #ifdef SCTP_CWND_LOGGING 6053 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 6054 #endif 6055 } 6056 if (old_startat == NULL) { 6057 old_startat = send_start_at; 6058 send_start_at = TAILQ_FIRST(&asoc->nets); 6059 goto again_one_more_time; 6060 } 6061 /* 6062 * At the end there should be no NON timed chunks hanging on this 6063 * queue. 6064 */ 6065 #ifdef SCTP_CWND_LOGGING 6066 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 6067 #endif 6068 if ((*num_out == 0) && (*reason_code == 0)) { 6069 *reason_code = 4; 6070 } else { 6071 *reason_code = 5; 6072 } 6073 sctp_clean_up_ctl(stcb, asoc); 6074 return (0); 6075 } 6076 6077 void 6078 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 6079 { 6080 /* 6081 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 6082 * the control chunk queue. 6083 */ 6084 struct sctp_chunkhdr *hdr; 6085 struct sctp_tmit_chunk *chk; 6086 struct mbuf *mat; 6087 6088 SCTP_TCB_LOCK_ASSERT(stcb); 6089 sctp_alloc_a_chunk(stcb, chk); 6090 if (chk == NULL) { 6091 /* no memory */ 6092 sctp_m_freem(op_err); 6093 return; 6094 } 6095 chk->copy_by_ref = 0; 6096 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT); 6097 if (op_err == NULL) { 6098 sctp_free_a_chunk(stcb, chk); 6099 return; 6100 } 6101 chk->send_size = 0; 6102 mat = op_err; 6103 while (mat != NULL) { 6104 chk->send_size += SCTP_BUF_LEN(mat); 6105 mat = SCTP_BUF_NEXT(mat); 6106 } 6107 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 6108 chk->rec.chunk_id.can_take_data = 1; 6109 chk->sent = SCTP_DATAGRAM_UNSENT; 6110 chk->snd_count = 0; 6111 chk->flags = 0; 6112 chk->asoc = &stcb->asoc; 6113 chk->data = op_err; 6114 chk->whoTo = chk->asoc->primary_destination; 6115 atomic_add_int(&chk->whoTo->ref_count, 1); 6116 hdr = mtod(op_err, struct sctp_chunkhdr *); 6117 hdr->chunk_type = SCTP_OPERATION_ERROR; 6118 hdr->chunk_flags = 0; 6119 hdr->chunk_length = htons(chk->send_size); 6120 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, 6121 chk, 6122 sctp_next); 6123 chk->asoc->ctrl_queue_cnt++; 6124 } 6125 6126 int 6127 sctp_send_cookie_echo(struct mbuf *m, 6128 int offset, 6129 struct sctp_tcb *stcb, 6130 struct sctp_nets *net) 6131 { 6132 /* 6133 * pull out the cookie and put it at the front of the control chunk 6134 * queue. 6135 */ 6136 int at; 6137 struct mbuf *cookie; 6138 struct sctp_paramhdr parm, *phdr; 6139 struct sctp_chunkhdr *hdr; 6140 struct sctp_tmit_chunk *chk; 6141 uint16_t ptype, plen; 6142 6143 /* First find the cookie in the param area */ 6144 cookie = NULL; 6145 at = offset + sizeof(struct sctp_init_chunk); 6146 6147 SCTP_TCB_LOCK_ASSERT(stcb); 6148 do { 6149 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); 6150 if (phdr == NULL) { 6151 return (-3); 6152 } 6153 ptype = ntohs(phdr->param_type); 6154 plen = ntohs(phdr->param_length); 6155 if (ptype == SCTP_STATE_COOKIE) { 6156 int pad; 6157 6158 /* found the cookie */ 6159 if ((pad = (plen % 4))) { 6160 plen += 4 - pad; 6161 } 6162 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT); 6163 if (cookie == NULL) { 6164 /* No memory */ 6165 return (-2); 6166 } 6167 break; 6168 } 6169 at += SCTP_SIZE32(plen); 6170 } while (phdr); 6171 if (cookie == NULL) { 6172 /* Did not find the cookie */ 6173 return (-3); 6174 } 6175 /* ok, we got the cookie lets change it into a cookie echo chunk */ 6176 6177 /* first the change from param to cookie */ 6178 hdr = mtod(cookie, struct sctp_chunkhdr *); 6179 hdr->chunk_type = SCTP_COOKIE_ECHO; 6180 hdr->chunk_flags = 0; 6181 /* get the chunk stuff now and place it in the FRONT of the queue */ 6182 sctp_alloc_a_chunk(stcb, chk); 6183 if (chk == NULL) { 6184 /* no memory */ 6185 sctp_m_freem(cookie); 6186 return (-5); 6187 } 6188 chk->copy_by_ref = 0; 6189 chk->send_size = plen; 6190 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 6191 chk->rec.chunk_id.can_take_data = 0; 6192 chk->sent = SCTP_DATAGRAM_UNSENT; 6193 chk->snd_count = 0; 6194 chk->flags = 0; 6195 chk->asoc = &stcb->asoc; 6196 chk->data = cookie; 6197 chk->whoTo = chk->asoc->primary_destination; 6198 atomic_add_int(&chk->whoTo->ref_count, 1); 6199 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 6200 chk->asoc->ctrl_queue_cnt++; 6201 return (0); 6202 } 6203 6204 void 6205 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 6206 struct mbuf *m, 6207 int offset, 6208 int chk_length, 6209 struct sctp_nets *net) 6210 { 6211 /* 6212 * take a HB request and make it into a HB ack and send it. 6213 */ 6214 struct mbuf *outchain; 6215 struct sctp_chunkhdr *chdr; 6216 struct sctp_tmit_chunk *chk; 6217 6218 6219 if (net == NULL) 6220 /* must have a net pointer */ 6221 return; 6222 6223 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT); 6224 if (outchain == NULL) { 6225 /* gak out of memory */ 6226 return; 6227 } 6228 chdr = mtod(outchain, struct sctp_chunkhdr *); 6229 chdr->chunk_type = SCTP_HEARTBEAT_ACK; 6230 chdr->chunk_flags = 0; 6231 if (chk_length % 4) { 6232 /* need pad */ 6233 uint32_t cpthis = 0; 6234 int padlen; 6235 6236 padlen = 4 - (chk_length % 4); 6237 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); 6238 } 6239 sctp_alloc_a_chunk(stcb, chk); 6240 if (chk == NULL) { 6241 /* no memory */ 6242 sctp_m_freem(outchain); 6243 return; 6244 } 6245 chk->copy_by_ref = 0; 6246 chk->send_size = chk_length; 6247 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 6248 chk->rec.chunk_id.can_take_data = 1; 6249 chk->sent = SCTP_DATAGRAM_UNSENT; 6250 chk->snd_count = 0; 6251 chk->flags = 0; 6252 chk->asoc = &stcb->asoc; 6253 chk->data = outchain; 6254 chk->whoTo = net; 6255 atomic_add_int(&chk->whoTo->ref_count, 1); 6256 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6257 chk->asoc->ctrl_queue_cnt++; 6258 } 6259 6260 int 6261 sctp_send_cookie_ack(struct sctp_tcb *stcb) 6262 { 6263 /* formulate and queue a cookie-ack back to sender */ 6264 struct mbuf *cookie_ack; 6265 struct sctp_chunkhdr *hdr; 6266 struct sctp_tmit_chunk *chk; 6267 6268 cookie_ack = NULL; 6269 SCTP_TCB_LOCK_ASSERT(stcb); 6270 6271 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER); 6272 if (cookie_ack == NULL) { 6273 /* no mbuf's */ 6274 return (-1); 6275 } 6276 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 6277 sctp_alloc_a_chunk(stcb, chk); 6278 if (chk == NULL) { 6279 /* no memory */ 6280 sctp_m_freem(cookie_ack); 6281 return (-1); 6282 } 6283 chk->copy_by_ref = 0; 6284 chk->send_size = sizeof(struct sctp_chunkhdr); 6285 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 6286 chk->rec.chunk_id.can_take_data = 1; 6287 chk->sent = SCTP_DATAGRAM_UNSENT; 6288 chk->snd_count = 0; 6289 chk->flags = 0; 6290 chk->asoc = &stcb->asoc; 6291 chk->data = cookie_ack; 6292 if (chk->asoc->last_control_chunk_from != NULL) { 6293 chk->whoTo = chk->asoc->last_control_chunk_from; 6294 } else { 6295 chk->whoTo = chk->asoc->primary_destination; 6296 } 6297 atomic_add_int(&chk->whoTo->ref_count, 1); 6298 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 6299 hdr->chunk_type = SCTP_COOKIE_ACK; 6300 hdr->chunk_flags = 0; 6301 hdr->chunk_length = htons(chk->send_size); 6302 SCTP_BUF_LEN(cookie_ack) = chk->send_size; 6303 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6304 chk->asoc->ctrl_queue_cnt++; 6305 return (0); 6306 } 6307 6308 6309 int 6310 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 6311 { 6312 /* formulate and queue a SHUTDOWN-ACK back to the sender */ 6313 struct mbuf *m_shutdown_ack; 6314 struct sctp_shutdown_ack_chunk *ack_cp; 6315 struct sctp_tmit_chunk *chk; 6316 6317 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 6318 if (m_shutdown_ack == NULL) { 6319 /* no mbuf's */ 6320 return (-1); 6321 } 6322 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 6323 sctp_alloc_a_chunk(stcb, chk); 6324 if (chk == NULL) { 6325 /* no memory */ 6326 sctp_m_freem(m_shutdown_ack); 6327 return (-1); 6328 } 6329 chk->copy_by_ref = 0; 6330 6331 chk->send_size = sizeof(struct sctp_chunkhdr); 6332 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 6333 chk->rec.chunk_id.can_take_data = 1; 6334 chk->sent = SCTP_DATAGRAM_UNSENT; 6335 chk->snd_count = 0; 6336 chk->flags = 0; 6337 chk->asoc = &stcb->asoc; 6338 chk->data = m_shutdown_ack; 6339 chk->whoTo = net; 6340 atomic_add_int(&net->ref_count, 1); 6341 6342 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 6343 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 6344 ack_cp->ch.chunk_flags = 0; 6345 ack_cp->ch.chunk_length = htons(chk->send_size); 6346 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 6347 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6348 chk->asoc->ctrl_queue_cnt++; 6349 return (0); 6350 } 6351 6352 int 6353 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 6354 { 6355 /* formulate and queue a SHUTDOWN to the sender */ 6356 struct mbuf *m_shutdown; 6357 struct sctp_shutdown_chunk *shutdown_cp; 6358 struct sctp_tmit_chunk *chk; 6359 6360 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 6361 if (m_shutdown == NULL) { 6362 /* no mbuf's */ 6363 return (-1); 6364 } 6365 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 6366 sctp_alloc_a_chunk(stcb, chk); 6367 if (chk == NULL) { 6368 /* no memory */ 6369 sctp_m_freem(m_shutdown); 6370 return (-1); 6371 } 6372 chk->copy_by_ref = 0; 6373 chk->send_size = sizeof(struct sctp_shutdown_chunk); 6374 chk->rec.chunk_id.id = SCTP_SHUTDOWN; 6375 chk->rec.chunk_id.can_take_data = 1; 6376 chk->sent = SCTP_DATAGRAM_UNSENT; 6377 chk->snd_count = 0; 6378 chk->flags = 0; 6379 chk->asoc = &stcb->asoc; 6380 chk->data = m_shutdown; 6381 chk->whoTo = net; 6382 atomic_add_int(&net->ref_count, 1); 6383 6384 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 6385 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 6386 shutdown_cp->ch.chunk_flags = 0; 6387 shutdown_cp->ch.chunk_length = htons(chk->send_size); 6388 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 6389 SCTP_BUF_LEN(m_shutdown) = chk->send_size; 6390 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6391 chk->asoc->ctrl_queue_cnt++; 6392 return (0); 6393 } 6394 6395 int 6396 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net) 6397 { 6398 /* 6399 * formulate and queue an ASCONF to the peer ASCONF parameters 6400 * should be queued on the assoc queue 6401 */ 6402 struct sctp_tmit_chunk *chk; 6403 struct mbuf *m_asconf; 6404 struct sctp_asconf_chunk *acp; 6405 int len; 6406 6407 6408 SCTP_TCB_LOCK_ASSERT(stcb); 6409 /* compose an ASCONF chunk, maximum length is PMTU */ 6410 m_asconf = sctp_compose_asconf(stcb, &len); 6411 if (m_asconf == NULL) { 6412 return (-1); 6413 } 6414 acp = mtod(m_asconf, struct sctp_asconf_chunk *); 6415 sctp_alloc_a_chunk(stcb, chk); 6416 if (chk == NULL) { 6417 /* no memory */ 6418 sctp_m_freem(m_asconf); 6419 return (-1); 6420 } 6421 chk->copy_by_ref = 0; 6422 chk->data = m_asconf; 6423 chk->send_size = len; 6424 chk->rec.chunk_id.id = SCTP_ASCONF; 6425 chk->rec.chunk_id.can_take_data = 0; 6426 chk->sent = SCTP_DATAGRAM_UNSENT; 6427 chk->snd_count = 0; 6428 chk->flags = 0; 6429 chk->asoc = &stcb->asoc; 6430 chk->whoTo = chk->asoc->primary_destination; 6431 atomic_add_int(&chk->whoTo->ref_count, 1); 6432 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6433 chk->asoc->ctrl_queue_cnt++; 6434 return (0); 6435 } 6436 6437 int 6438 sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans) 6439 { 6440 /* 6441 * formulate and queue a asconf-ack back to sender the asconf-ack 6442 * must be stored in the tcb 6443 */ 6444 struct sctp_tmit_chunk *chk; 6445 struct mbuf *m_ack, *m; 6446 6447 SCTP_TCB_LOCK_ASSERT(stcb); 6448 /* is there a asconf-ack mbuf chain to send? */ 6449 if (stcb->asoc.last_asconf_ack_sent == NULL) { 6450 return (-1); 6451 } 6452 /* copy the asconf_ack */ 6453 m_ack = SCTP_M_COPYM(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL, M_DONTWAIT); 6454 if (m_ack == NULL) { 6455 /* couldn't copy it */ 6456 6457 return (-1); 6458 } 6459 sctp_alloc_a_chunk(stcb, chk); 6460 if (chk == NULL) { 6461 /* no memory */ 6462 if (m_ack) 6463 sctp_m_freem(m_ack); 6464 return (-1); 6465 } 6466 chk->copy_by_ref = 0; 6467 /* figure out where it goes to */ 6468 if (retrans) { 6469 /* we're doing a retransmission */ 6470 if (stcb->asoc.used_alt_asconfack > 2) { 6471 /* tried alternate nets already, go back */ 6472 chk->whoTo = NULL; 6473 } else { 6474 /* need to try and alternate net */ 6475 chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 6476 stcb->asoc.used_alt_asconfack++; 6477 } 6478 if (chk->whoTo == NULL) { 6479 /* no alternate */ 6480 if (stcb->asoc.last_control_chunk_from == NULL) 6481 chk->whoTo = stcb->asoc.primary_destination; 6482 else 6483 chk->whoTo = stcb->asoc.last_control_chunk_from; 6484 stcb->asoc.used_alt_asconfack = 0; 6485 } 6486 } else { 6487 /* normal case */ 6488 if (stcb->asoc.last_control_chunk_from == NULL) 6489 chk->whoTo = stcb->asoc.primary_destination; 6490 else 6491 chk->whoTo = stcb->asoc.last_control_chunk_from; 6492 stcb->asoc.used_alt_asconfack = 0; 6493 } 6494 chk->data = m_ack; 6495 chk->send_size = 0; 6496 /* Get size */ 6497 m = m_ack; 6498 while (m) { 6499 chk->send_size += SCTP_BUF_LEN(m); 6500 m = SCTP_BUF_NEXT(m); 6501 } 6502 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 6503 chk->rec.chunk_id.can_take_data = 1; 6504 chk->sent = SCTP_DATAGRAM_UNSENT; 6505 chk->snd_count = 0; 6506 chk->flags = 0; 6507 chk->asoc = &stcb->asoc; 6508 atomic_add_int(&chk->whoTo->ref_count, 1); 6509 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 6510 chk->asoc->ctrl_queue_cnt++; 6511 return (0); 6512 } 6513 6514 6515 static int 6516 sctp_chunk_retransmission(struct sctp_inpcb *inp, 6517 struct sctp_tcb *stcb, 6518 struct sctp_association *asoc, 6519 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done) 6520 { 6521 /* 6522 * send out one MTU of retransmission. If fast_retransmit is 6523 * happening we ignore the cwnd. Otherwise we obey the cwnd and 6524 * rwnd. For a Cookie or Asconf in the control chunk queue we 6525 * retransmit them by themselves. 6526 * 6527 * For data chunks we will pick out the lowest TSN's in the sent_queue 6528 * marked for resend and bundle them all together (up to a MTU of 6529 * destination). The address to send to should have been 6530 * selected/changed where the retransmission was marked (i.e. in FR 6531 * or t3-timeout routines). 6532 */ 6533 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 6534 struct sctp_tmit_chunk *chk, *fwd; 6535 struct mbuf *m, *endofchain; 6536 struct sctphdr *shdr; 6537 int asconf; 6538 struct sctp_nets *net; 6539 uint32_t tsns_sent = 0; 6540 int no_fragmentflg, bundle_at, cnt_thru; 6541 unsigned int mtu; 6542 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 6543 struct sctp_auth_chunk *auth = NULL; 6544 uint32_t auth_offset = 0; 6545 uint32_t dmtu = 0; 6546 6547 SCTP_TCB_LOCK_ASSERT(stcb); 6548 tmr_started = ctl_cnt = bundle_at = error = 0; 6549 no_fragmentflg = 1; 6550 asconf = 0; 6551 fwd_tsn = 0; 6552 *cnt_out = 0; 6553 fwd = NULL; 6554 endofchain = m = NULL; 6555 #ifdef SCTP_AUDITING_ENABLED 6556 sctp_audit_log(0xC3, 1); 6557 #endif 6558 if (TAILQ_EMPTY(&asoc->sent_queue)) { 6559 #ifdef SCTP_DEBUG 6560 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 6561 printf("SCTP hits empty queue with cnt set to %d?\n", 6562 asoc->sent_queue_retran_cnt); 6563 } 6564 #endif 6565 asoc->sent_queue_cnt = 0; 6566 asoc->sent_queue_cnt_removeable = 0; 6567 } 6568 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 6569 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 6570 (chk->rec.chunk_id.id == SCTP_ASCONF) || 6571 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 6572 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 6573 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 6574 if (chk != asoc->str_reset) { 6575 /* 6576 * not eligible for retran if its 6577 * not ours 6578 */ 6579 continue; 6580 } 6581 } 6582 ctl_cnt++; 6583 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 6584 no_fragmentflg = 1; 6585 asconf = 1; 6586 } 6587 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 6588 fwd_tsn = 1; 6589 fwd = chk; 6590 } 6591 /* 6592 * Add an AUTH chunk, if chunk requires it save the 6593 * offset into the chain for AUTH 6594 */ 6595 if ((auth == NULL) && 6596 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 6597 stcb->asoc.peer_auth_chunks))) { 6598 m = sctp_add_auth_chunk(m, &endofchain, 6599 &auth, &auth_offset, 6600 stcb, 6601 chk->rec.chunk_id.id); 6602 } 6603 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 6604 break; 6605 } 6606 } 6607 one_chunk = 0; 6608 cnt_thru = 0; 6609 /* do we have control chunks to retransmit? */ 6610 if (m != NULL) { 6611 /* Start a timer no matter if we suceed or fail */ 6612 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 6613 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 6614 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) 6615 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 6616 6617 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 6618 if (m == NULL) { 6619 return (ENOBUFS); 6620 } 6621 shdr = mtod(m, struct sctphdr *); 6622 shdr->src_port = inp->sctp_lport; 6623 shdr->dest_port = stcb->rport; 6624 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 6625 shdr->checksum = 0; 6626 auth_offset += sizeof(struct sctphdr); 6627 chk->snd_count++; /* update our count */ 6628 6629 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 6630 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset, 6631 auth, no_fragmentflg, 0, NULL, asconf))) { 6632 SCTP_STAT_INCR(sctps_lowlevelerr); 6633 return (error); 6634 } 6635 m = endofchain = NULL; 6636 auth = NULL; 6637 auth_offset = 0; 6638 /* 6639 * We don't want to mark the net->sent time here since this 6640 * we use this for HB and retrans cannot measure RTT 6641 */ 6642 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 6643 *cnt_out += 1; 6644 chk->sent = SCTP_DATAGRAM_SENT; 6645 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 6646 if (fwd_tsn == 0) { 6647 return (0); 6648 } else { 6649 /* Clean up the fwd-tsn list */ 6650 sctp_clean_up_ctl(stcb, asoc); 6651 return (0); 6652 } 6653 } 6654 /* 6655 * Ok, it is just data retransmission we need to do or that and a 6656 * fwd-tsn with it all. 6657 */ 6658 if (TAILQ_EMPTY(&asoc->sent_queue)) { 6659 return (-1); 6660 } 6661 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || 6662 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { 6663 /* not yet open, resend the cookie and that is it */ 6664 return (1); 6665 } 6666 #ifdef SCTP_AUDITING_ENABLED 6667 sctp_auditing(20, inp, stcb, NULL); 6668 #endif 6669 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 6670 if (chk->sent != SCTP_DATAGRAM_RESEND) { 6671 /* No, not sent to this net or not ready for rtx */ 6672 continue; 6673 6674 } 6675 /* pick up the net */ 6676 net = chk->whoTo; 6677 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 6678 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 6679 } else { 6680 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 6681 } 6682 6683 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 6684 /* No room in peers rwnd */ 6685 uint32_t tsn; 6686 6687 tsn = asoc->last_acked_seq + 1; 6688 if (tsn == chk->rec.data.TSN_seq) { 6689 /* 6690 * we make a special exception for this 6691 * case. The peer has no rwnd but is missing 6692 * the lowest chunk.. which is probably what 6693 * is holding up the rwnd. 6694 */ 6695 goto one_chunk_around; 6696 } 6697 return (1); 6698 } 6699 one_chunk_around: 6700 if (asoc->peers_rwnd < mtu) { 6701 one_chunk = 1; 6702 } 6703 #ifdef SCTP_AUDITING_ENABLED 6704 sctp_audit_log(0xC3, 2); 6705 #endif 6706 bundle_at = 0; 6707 m = NULL; 6708 net->fast_retran_ip = 0; 6709 if (chk->rec.data.doing_fast_retransmit == 0) { 6710 /* 6711 * if no FR in progress skip destination that have 6712 * flight_size > cwnd. 6713 */ 6714 if (net->flight_size >= net->cwnd) { 6715 continue; 6716 } 6717 } else { 6718 /* 6719 * Mark the destination net to have FR recovery 6720 * limits put on it. 6721 */ 6722 *fr_done = 1; 6723 net->fast_retran_ip = 1; 6724 } 6725 6726 /* 6727 * if no AUTH is yet included and this chunk requires it, 6728 * make sure to account for it. We don't apply the size 6729 * until the AUTH chunk is actually added below in case 6730 * there is no room for this chunk. 6731 */ 6732 if ((auth == NULL) && 6733 sctp_auth_is_required_chunk(SCTP_DATA, 6734 stcb->asoc.peer_auth_chunks)) { 6735 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 6736 } else 6737 dmtu = 0; 6738 6739 if ((chk->send_size <= (mtu - dmtu)) || 6740 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 6741 /* ok we will add this one */ 6742 if ((auth == NULL) && 6743 (sctp_auth_is_required_chunk(SCTP_DATA, 6744 stcb->asoc.peer_auth_chunks))) { 6745 m = sctp_add_auth_chunk(m, &endofchain, 6746 &auth, &auth_offset, 6747 stcb, SCTP_DATA); 6748 } 6749 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 6750 if (m == NULL) { 6751 return (ENOMEM); 6752 } 6753 /* Do clear IP_DF ? */ 6754 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 6755 no_fragmentflg = 0; 6756 } 6757 /* upate our MTU size */ 6758 if (mtu > (chk->send_size + dmtu)) 6759 mtu -= (chk->send_size + dmtu); 6760 else 6761 mtu = 0; 6762 data_list[bundle_at++] = chk; 6763 if (one_chunk && (asoc->total_flight <= 0)) { 6764 SCTP_STAT_INCR(sctps_windowprobed); 6765 chk->rec.data.state_flags |= SCTP_WINDOW_PROBE; 6766 } 6767 } 6768 if (one_chunk == 0) { 6769 /* 6770 * now are there anymore forward from chk to pick 6771 * up? 6772 */ 6773 fwd = TAILQ_NEXT(chk, sctp_next); 6774 while (fwd) { 6775 if (fwd->sent != SCTP_DATAGRAM_RESEND) { 6776 /* Nope, not for retran */ 6777 fwd = TAILQ_NEXT(fwd, sctp_next); 6778 continue; 6779 } 6780 if (fwd->whoTo != net) { 6781 /* Nope, not the net in question */ 6782 fwd = TAILQ_NEXT(fwd, sctp_next); 6783 continue; 6784 } 6785 if ((auth == NULL) && 6786 sctp_auth_is_required_chunk(SCTP_DATA, 6787 stcb->asoc.peer_auth_chunks)) { 6788 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 6789 } else 6790 dmtu = 0; 6791 if (fwd->send_size <= (mtu - dmtu)) { 6792 if ((auth == NULL) && 6793 (sctp_auth_is_required_chunk(SCTP_DATA, 6794 stcb->asoc.peer_auth_chunks))) { 6795 m = sctp_add_auth_chunk(m, 6796 &endofchain, 6797 &auth, &auth_offset, 6798 stcb, 6799 SCTP_DATA); 6800 } 6801 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 6802 if (m == NULL) { 6803 return (ENOMEM); 6804 } 6805 /* Do clear IP_DF ? */ 6806 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 6807 no_fragmentflg = 0; 6808 } 6809 /* upate our MTU size */ 6810 if (mtu > (fwd->send_size + dmtu)) 6811 mtu -= (fwd->send_size + dmtu); 6812 else 6813 mtu = 0; 6814 data_list[bundle_at++] = fwd; 6815 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 6816 break; 6817 } 6818 fwd = TAILQ_NEXT(fwd, sctp_next); 6819 } else { 6820 /* can't fit so we are done */ 6821 break; 6822 } 6823 } 6824 } 6825 /* Is there something to send for this destination? */ 6826 if (m) { 6827 /* 6828 * No matter if we fail/or suceed we should start a 6829 * timer. A failure is like a lost IP packet :-) 6830 */ 6831 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 6832 /* 6833 * no timer running on this destination 6834 * restart it. 6835 */ 6836 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 6837 tmr_started = 1; 6838 } 6839 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT); 6840 if (m == NULL) { 6841 return (ENOBUFS); 6842 } 6843 shdr = mtod(m, struct sctphdr *); 6844 shdr->src_port = inp->sctp_lport; 6845 shdr->dest_port = stcb->rport; 6846 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 6847 shdr->checksum = 0; 6848 auth_offset += sizeof(struct sctphdr); 6849 /* Now lets send it, if there is anything to send :> */ 6850 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 6851 (struct sockaddr *)&net->ro._l_addr, m, auth_offset, 6852 auth, no_fragmentflg, 0, NULL, asconf))) { 6853 /* error, we could not output */ 6854 SCTP_STAT_INCR(sctps_lowlevelerr); 6855 return (error); 6856 } 6857 m = endofchain = NULL; 6858 auth = NULL; 6859 auth_offset = 0; 6860 /* For HB's */ 6861 /* 6862 * We don't want to mark the net->sent time here 6863 * since this we use this for HB and retrans cannot 6864 * measure RTT 6865 */ 6866 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 6867 6868 /* For auto-close */ 6869 cnt_thru++; 6870 if (*now_filled == 0) { 6871 SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 6872 *now = asoc->time_last_sent; 6873 *now_filled = 1; 6874 } else { 6875 asoc->time_last_sent = *now; 6876 } 6877 *cnt_out += bundle_at; 6878 #ifdef SCTP_AUDITING_ENABLED 6879 sctp_audit_log(0xC4, bundle_at); 6880 #endif 6881 if (bundle_at) { 6882 tsns_sent = data_list[0]->rec.data.TSN_seq; 6883 } 6884 for (i = 0; i < bundle_at; i++) { 6885 SCTP_STAT_INCR(sctps_sendretransdata); 6886 data_list[i]->sent = SCTP_DATAGRAM_SENT; 6887 /* 6888 * When we have a revoked data, and we 6889 * retransmit it, then we clear the revoked 6890 * flag since this flag dictates if we 6891 * subtracted from the fs 6892 */ 6893 data_list[i]->rec.data.chunk_was_revoked = 0; 6894 data_list[i]->snd_count++; 6895 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 6896 /* record the time */ 6897 data_list[i]->sent_rcv_time = asoc->time_last_sent; 6898 if (asoc->sent_queue_retran_cnt < 0) { 6899 asoc->sent_queue_retran_cnt = 0; 6900 } 6901 if (data_list[i]->book_size_scale) { 6902 /* 6903 * need to double the book size on 6904 * this one 6905 */ 6906 data_list[i]->book_size_scale = 0; 6907 /* 6908 * Since we double the booksize, we 6909 * must also double the output queue 6910 * size, since this get shrunk when 6911 * we free by this amount. 6912 */ 6913 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); 6914 data_list[i]->book_size *= 2; 6915 6916 6917 } else { 6918 sctp_ucount_incr(asoc->total_flight_count); 6919 #ifdef SCTP_LOG_RWND 6920 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 6921 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh); 6922 #endif 6923 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 6924 (uint32_t) (data_list[i]->send_size + 6925 sctp_peer_chunk_oh)); 6926 } 6927 #ifdef SCTP_FLIGHT_LOGGING 6928 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 6929 data_list[i]->whoTo->flight_size, 6930 data_list[i]->book_size, 6931 (uintptr_t) stcb, 6932 data_list[i]->rec.data.TSN_seq); 6933 #endif 6934 net->flight_size += data_list[i]->book_size; 6935 asoc->total_flight += data_list[i]->book_size; 6936 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 6937 /* SWS sender side engages */ 6938 asoc->peers_rwnd = 0; 6939 } 6940 if ((i == 0) && 6941 (data_list[i]->rec.data.doing_fast_retransmit)) { 6942 SCTP_STAT_INCR(sctps_sendfastretrans); 6943 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 6944 (tmr_started == 0)) { 6945 /* 6946 * ok we just fast-retrans'd 6947 * the lowest TSN, i.e the 6948 * first on the list. In 6949 * this case we want to give 6950 * some more time to get a 6951 * SACK back without a 6952 * t3-expiring. 6953 */ 6954 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 6955 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); 6956 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 6957 } 6958 } 6959 } 6960 #ifdef SCTP_CWND_LOGGING 6961 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 6962 #endif 6963 #ifdef SCTP_AUDITING_ENABLED 6964 sctp_auditing(21, inp, stcb, NULL); 6965 #endif 6966 } else { 6967 /* None will fit */ 6968 return (1); 6969 } 6970 if (asoc->sent_queue_retran_cnt <= 0) { 6971 /* all done we have no more to retran */ 6972 asoc->sent_queue_retran_cnt = 0; 6973 break; 6974 } 6975 if (one_chunk) { 6976 /* No more room in rwnd */ 6977 return (1); 6978 } 6979 /* stop the for loop here. we sent out a packet */ 6980 break; 6981 } 6982 return (0); 6983 } 6984 6985 6986 static int 6987 sctp_timer_validation(struct sctp_inpcb *inp, 6988 struct sctp_tcb *stcb, 6989 struct sctp_association *asoc, 6990 int ret) 6991 { 6992 struct sctp_nets *net; 6993 6994 /* Validate that a timer is running somewhere */ 6995 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 6996 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 6997 /* Here is a timer */ 6998 return (ret); 6999 } 7000 } 7001 SCTP_TCB_LOCK_ASSERT(stcb); 7002 /* Gak, we did not have a timer somewhere */ 7003 #ifdef SCTP_DEBUG 7004 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 7005 printf("Deadlock avoided starting timer on a dest at retran\n"); 7006 } 7007 #endif 7008 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 7009 return (ret); 7010 } 7011 7012 int 7013 sctp_chunk_output(struct sctp_inpcb *inp, 7014 struct sctp_tcb *stcb, 7015 int from_where) 7016 { 7017 /* 7018 * Ok this is the generic chunk service queue. we must do the 7019 * following: - See if there are retransmits pending, if so we must 7020 * do these first and return. - Service the stream queue that is 7021 * next, moving any message (note I must get a complete message i.e. 7022 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 7023 * TSN's - Check to see if the cwnd/rwnd allows any output, if so we 7024 * go ahead and fomulate and send the low level chunks. Making sure 7025 * to combine any control in the control chunk queue also. 7026 */ 7027 struct sctp_association *asoc; 7028 struct sctp_nets *net; 7029 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0, 7030 burst_cnt = 0, burst_limit = 0; 7031 struct timeval now; 7032 int now_filled = 0; 7033 int cwnd_full = 0; 7034 int nagle_on = 0; 7035 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 7036 int un_sent = 0; 7037 int fr_done, tot_frs = 0; 7038 7039 asoc = &stcb->asoc; 7040 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 7041 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 7042 nagle_on = 0; 7043 } else { 7044 nagle_on = 1; 7045 } 7046 } 7047 SCTP_TCB_LOCK_ASSERT(stcb); 7048 7049 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 7050 7051 if ((un_sent <= 0) && 7052 (TAILQ_EMPTY(&asoc->control_send_queue)) && 7053 (asoc->sent_queue_retran_cnt == 0)) { 7054 /* Nothing to do unless there is something to be sent left */ 7055 return (error); 7056 } 7057 /* 7058 * Do we have something to send, data or control AND a sack timer 7059 * running, if so piggy-back the sack. 7060 */ 7061 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 7062 sctp_send_sack(stcb); 7063 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 7064 } 7065 while (asoc->sent_queue_retran_cnt) { 7066 /* 7067 * Ok, it is retransmission time only, we send out only ONE 7068 * packet with a single call off to the retran code. 7069 */ 7070 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 7071 /* 7072 * Special hook for handling cookiess discarded by 7073 * peer that carried data. Send cookie-ack only and 7074 * then the next call with get the retran's. 7075 */ 7076 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 7077 &cwnd_full, from_where, 7078 &now, &now_filled, frag_point); 7079 return (0); 7080 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 7081 /* if its not from a HB then do it */ 7082 fr_done = 0; 7083 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done); 7084 if (fr_done) { 7085 tot_frs++; 7086 } 7087 } else { 7088 /* 7089 * its from any other place, we don't allow retran 7090 * output (only control) 7091 */ 7092 ret = 1; 7093 } 7094 if (ret > 0) { 7095 /* Can't send anymore */ 7096 /* 7097 * now lets push out control by calling med-level 7098 * output once. this assures that we WILL send HB's 7099 * if queued too. 7100 */ 7101 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 7102 &cwnd_full, from_where, 7103 &now, &now_filled, frag_point); 7104 #ifdef SCTP_AUDITING_ENABLED 7105 sctp_auditing(8, inp, stcb, NULL); 7106 #endif 7107 return (sctp_timer_validation(inp, stcb, asoc, ret)); 7108 } 7109 if (ret < 0) { 7110 /* 7111 * The count was off.. retran is not happening so do 7112 * the normal retransmission. 7113 */ 7114 #ifdef SCTP_AUDITING_ENABLED 7115 sctp_auditing(9, inp, stcb, NULL); 7116 #endif 7117 break; 7118 } 7119 if (from_where == SCTP_OUTPUT_FROM_T3) { 7120 /* Only one transmission allowed out of a timeout */ 7121 #ifdef SCTP_AUDITING_ENABLED 7122 sctp_auditing(10, inp, stcb, NULL); 7123 #endif 7124 /* Push out any control */ 7125 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where, 7126 &now, &now_filled, frag_point); 7127 return (ret); 7128 } 7129 if (tot_frs > asoc->max_burst) { 7130 /* Hit FR burst limit */ 7131 return (0); 7132 } 7133 if ((num_out == 0) && (ret == 0)) { 7134 7135 /* No more retrans to send */ 7136 break; 7137 } 7138 } 7139 #ifdef SCTP_AUDITING_ENABLED 7140 sctp_auditing(12, inp, stcb, NULL); 7141 #endif 7142 /* Check for bad destinations, if they exist move chunks around. */ 7143 burst_limit = asoc->max_burst; 7144 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 7145 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 7146 SCTP_ADDR_NOT_REACHABLE) { 7147 /* 7148 * if possible move things off of this address we 7149 * still may send below due to the dormant state but 7150 * we try to find an alternate address to send to 7151 * and if we have one we move all queued data on the 7152 * out wheel to this alternate address. 7153 */ 7154 if (net->ref_count > 1) 7155 sctp_move_to_an_alt(stcb, asoc, net); 7156 } else { 7157 /* 7158 * if ((asoc->sat_network) || (net->addr_is_local)) 7159 * { burst_limit = asoc->max_burst * 7160 * SCTP_SAT_NETWORK_BURST_INCR; } 7161 */ 7162 if (sctp_use_cwnd_based_maxburst) { 7163 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) { 7164 int old_cwnd; 7165 7166 if (net->ssthresh < net->cwnd) 7167 net->ssthresh = net->cwnd; 7168 old_cwnd = net->cwnd; 7169 net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 7170 7171 #ifdef SCTP_CWND_MONITOR 7172 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 7173 #endif 7174 7175 #ifdef SCTP_LOG_MAXBURST 7176 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED); 7177 #endif 7178 SCTP_STAT_INCR(sctps_maxburstqueued); 7179 } 7180 net->fast_retran_ip = 0; 7181 } else { 7182 if (net->flight_size == 0) { 7183 /* Should be decaying the cwnd here */ 7184 ; 7185 } 7186 } 7187 } 7188 7189 } 7190 burst_cnt = 0; 7191 cwnd_full = 0; 7192 do { 7193 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 7194 &reason_code, 0, &cwnd_full, from_where, 7195 &now, &now_filled, frag_point); 7196 if (error) { 7197 #ifdef SCTP_DEBUG 7198 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 7199 printf("Error %d was returned from med-c-op\n", error); 7200 } 7201 #endif 7202 #ifdef SCTP_LOG_MAXBURST 7203 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 7204 #endif 7205 #ifdef SCTP_CWND_LOGGING 7206 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 7207 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 7208 #endif 7209 7210 break; 7211 } 7212 #ifdef SCTP_DEBUG 7213 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) { 7214 printf("m-c-o put out %d\n", num_out); 7215 } 7216 #endif 7217 tot_out += num_out; 7218 burst_cnt++; 7219 #ifdef SCTP_CWND_LOGGING 7220 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 7221 if (num_out == 0) { 7222 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 7223 } 7224 #endif 7225 if (nagle_on) { 7226 /* 7227 * When nagle is on, we look at how much is un_sent, 7228 * then if its smaller than an MTU and we have data 7229 * in flight we stop. 7230 */ 7231 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 7232 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) 7233 * sizeof(struct sctp_data_chunk))); 7234 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 7235 (stcb->asoc.total_flight > 0)) { 7236 break; 7237 } 7238 } 7239 if (TAILQ_EMPTY(&asoc->control_send_queue) && 7240 TAILQ_EMPTY(&asoc->send_queue) && 7241 TAILQ_EMPTY(&asoc->out_wheel)) { 7242 /* Nothing left to send */ 7243 break; 7244 } 7245 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 7246 /* Nothing left to send */ 7247 break; 7248 } 7249 } while (num_out && (sctp_use_cwnd_based_maxburst || 7250 (burst_cnt < burst_limit))); 7251 7252 if (sctp_use_cwnd_based_maxburst == 0) { 7253 if (burst_cnt >= burst_limit) { 7254 SCTP_STAT_INCR(sctps_maxburstqueued); 7255 asoc->burst_limit_applied = 1; 7256 #ifdef SCTP_LOG_MAXBURST 7257 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 7258 #endif 7259 } else { 7260 asoc->burst_limit_applied = 0; 7261 } 7262 } 7263 #ifdef SCTP_CWND_LOGGING 7264 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 7265 #endif 7266 #ifdef SCTP_DEBUG 7267 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 7268 printf("Ok, we have put out %d chunks\n", tot_out); 7269 } 7270 #endif 7271 /* 7272 * Now we need to clean up the control chunk chain if a ECNE is on 7273 * it. It must be marked as UNSENT again so next call will continue 7274 * to send it until such time that we get a CWR, to remove it. 7275 */ 7276 if (stcb->asoc.ecn_echo_cnt_onq) 7277 sctp_fix_ecn_echo(asoc); 7278 return (error); 7279 } 7280 7281 7282 int 7283 sctp_output(inp, m, addr, control, p, flags) 7284 struct sctp_inpcb *inp; 7285 struct mbuf *m; 7286 struct sockaddr *addr; 7287 struct mbuf *control; 7288 7289 struct thread *p; 7290 int flags; 7291 { 7292 if (inp == NULL) { 7293 return (EINVAL); 7294 } 7295 if (inp->sctp_socket == NULL) { 7296 return (EINVAL); 7297 } 7298 return (sctp_sosend(inp->sctp_socket, 7299 addr, 7300 (struct uio *)NULL, 7301 m, 7302 control, 7303 flags, 7304 p)); 7305 } 7306 7307 void 7308 send_forward_tsn(struct sctp_tcb *stcb, 7309 struct sctp_association *asoc) 7310 { 7311 struct sctp_tmit_chunk *chk; 7312 struct sctp_forward_tsn_chunk *fwdtsn; 7313 7314 SCTP_TCB_LOCK_ASSERT(stcb); 7315 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7316 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 7317 /* mark it to unsent */ 7318 chk->sent = SCTP_DATAGRAM_UNSENT; 7319 chk->snd_count = 0; 7320 /* Do we correct its output location? */ 7321 if (chk->whoTo != asoc->primary_destination) { 7322 sctp_free_remote_addr(chk->whoTo); 7323 chk->whoTo = asoc->primary_destination; 7324 atomic_add_int(&chk->whoTo->ref_count, 1); 7325 } 7326 goto sctp_fill_in_rest; 7327 } 7328 } 7329 /* Ok if we reach here we must build one */ 7330 sctp_alloc_a_chunk(stcb, chk); 7331 if (chk == NULL) { 7332 return; 7333 } 7334 chk->copy_by_ref = 0; 7335 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 7336 chk->rec.chunk_id.can_take_data = 0; 7337 chk->asoc = asoc; 7338 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 7339 if (chk->data == NULL) { 7340 atomic_subtract_int(&chk->whoTo->ref_count, 1); 7341 sctp_free_a_chunk(stcb, chk); 7342 return; 7343 } 7344 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 7345 chk->sent = SCTP_DATAGRAM_UNSENT; 7346 chk->snd_count = 0; 7347 chk->whoTo = asoc->primary_destination; 7348 atomic_add_int(&chk->whoTo->ref_count, 1); 7349 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 7350 asoc->ctrl_queue_cnt++; 7351 sctp_fill_in_rest: 7352 /* 7353 * Here we go through and fill out the part that deals with 7354 * stream/seq of the ones we skip. 7355 */ 7356 SCTP_BUF_LEN(chk->data) = 0; 7357 { 7358 struct sctp_tmit_chunk *at, *tp1, *last; 7359 struct sctp_strseq *strseq; 7360 unsigned int cnt_of_space, i, ovh; 7361 unsigned int space_needed; 7362 unsigned int cnt_of_skipped = 0; 7363 7364 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 7365 if (at->sent != SCTP_FORWARD_TSN_SKIP) { 7366 /* no more to look at */ 7367 break; 7368 } 7369 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 7370 /* We don't report these */ 7371 continue; 7372 } 7373 cnt_of_skipped++; 7374 } 7375 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 7376 (cnt_of_skipped * sizeof(struct sctp_strseq))); 7377 7378 cnt_of_space = M_TRAILINGSPACE(chk->data); 7379 7380 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7381 ovh = SCTP_MIN_OVERHEAD; 7382 } else { 7383 ovh = SCTP_MIN_V4_OVERHEAD; 7384 } 7385 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 7386 /* trim to a mtu size */ 7387 cnt_of_space = asoc->smallest_mtu - ovh; 7388 } 7389 if (cnt_of_space < space_needed) { 7390 /* 7391 * ok we must trim down the chunk by lowering the 7392 * advance peer ack point. 7393 */ 7394 cnt_of_skipped = (cnt_of_space - 7395 ((sizeof(struct sctp_forward_tsn_chunk)) / 7396 sizeof(struct sctp_strseq))); 7397 /* 7398 * Go through and find the TSN that will be the one 7399 * we report. 7400 */ 7401 at = TAILQ_FIRST(&asoc->sent_queue); 7402 for (i = 0; i < cnt_of_skipped; i++) { 7403 tp1 = TAILQ_NEXT(at, sctp_next); 7404 at = tp1; 7405 } 7406 last = at; 7407 /* 7408 * last now points to last one I can report, update 7409 * peer ack point 7410 */ 7411 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq; 7412 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq)); 7413 } 7414 chk->send_size = space_needed; 7415 /* Setup the chunk */ 7416 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 7417 fwdtsn->ch.chunk_length = htons(chk->send_size); 7418 fwdtsn->ch.chunk_flags = 0; 7419 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 7420 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point); 7421 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) + 7422 (cnt_of_skipped * sizeof(struct sctp_strseq))); 7423 SCTP_BUF_LEN(chk->data) = chk->send_size; 7424 fwdtsn++; 7425 /* 7426 * Move pointer to after the fwdtsn and transfer to the 7427 * strseq pointer. 7428 */ 7429 strseq = (struct sctp_strseq *)fwdtsn; 7430 /* 7431 * Now populate the strseq list. This is done blindly 7432 * without pulling out duplicate stream info. This is 7433 * inefficent but won't harm the process since the peer will 7434 * look at these in sequence and will thus release anything. 7435 * It could mean we exceed the PMTU and chop off some that 7436 * we could have included.. but this is unlikely (aka 1432/4 7437 * would mean 300+ stream seq's would have to be reported in 7438 * one FWD-TSN. With a bit of work we can later FIX this to 7439 * optimize and pull out duplcates.. but it does add more 7440 * overhead. So for now... not! 7441 */ 7442 at = TAILQ_FIRST(&asoc->sent_queue); 7443 for (i = 0; i < cnt_of_skipped; i++) { 7444 tp1 = TAILQ_NEXT(at, sctp_next); 7445 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 7446 /* We don't report these */ 7447 i--; 7448 at = tp1; 7449 continue; 7450 } 7451 strseq->stream = ntohs(at->rec.data.stream_number); 7452 strseq->sequence = ntohs(at->rec.data.stream_seq); 7453 strseq++; 7454 at = tp1; 7455 } 7456 } 7457 return; 7458 7459 } 7460 7461 void 7462 sctp_send_sack(struct sctp_tcb *stcb) 7463 { 7464 /* 7465 * Queue up a SACK in the control queue. We must first check to see 7466 * if a SACK is somehow on the control queue. If so, we will take 7467 * and and remove the old one. 7468 */ 7469 struct sctp_association *asoc; 7470 struct sctp_tmit_chunk *chk, *a_chk; 7471 struct sctp_sack_chunk *sack; 7472 struct sctp_gap_ack_block *gap_descriptor; 7473 struct sack_track *selector; 7474 int mergeable = 0; 7475 int offset; 7476 caddr_t limit; 7477 uint32_t *dup; 7478 int limit_reached = 0; 7479 unsigned int i, jstart, siz, j; 7480 unsigned int num_gap_blocks = 0, space; 7481 int num_dups = 0; 7482 int space_req; 7483 7484 7485 a_chk = NULL; 7486 asoc = &stcb->asoc; 7487 SCTP_TCB_LOCK_ASSERT(stcb); 7488 if (asoc->last_data_chunk_from == NULL) { 7489 /* Hmm we never received anything */ 7490 return; 7491 } 7492 sctp_set_rwnd(stcb, asoc); 7493 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7494 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) { 7495 /* Hmm, found a sack already on queue, remove it */ 7496 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 7497 asoc->ctrl_queue_cnt++; 7498 a_chk = chk; 7499 if (a_chk->data) { 7500 sctp_m_freem(a_chk->data); 7501 a_chk->data = NULL; 7502 } 7503 sctp_free_remote_addr(a_chk->whoTo); 7504 a_chk->whoTo = NULL; 7505 break; 7506 } 7507 } 7508 if (a_chk == NULL) { 7509 sctp_alloc_a_chunk(stcb, a_chk); 7510 if (a_chk == NULL) { 7511 /* No memory so we drop the idea, and set a timer */ 7512 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 7513 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 7514 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 7515 stcb->sctp_ep, stcb, NULL); 7516 return; 7517 } 7518 a_chk->copy_by_ref = 0; 7519 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */ 7520 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; 7521 a_chk->rec.chunk_id.can_take_data = 1; 7522 } 7523 a_chk->asoc = asoc; 7524 a_chk->snd_count = 0; 7525 a_chk->send_size = 0; /* fill in later */ 7526 a_chk->sent = SCTP_DATAGRAM_UNSENT; 7527 7528 if ((asoc->numduptsns) || 7529 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE) 7530 ) { 7531 /* 7532 * Ok, we have some duplicates or the destination for the 7533 * sack is unreachable, lets see if we can select an 7534 * alternate than asoc->last_data_chunk_from 7535 */ 7536 if ((!(asoc->last_data_chunk_from->dest_state & 7537 SCTP_ADDR_NOT_REACHABLE)) && 7538 (asoc->used_alt_onsack > asoc->numnets)) { 7539 /* We used an alt last time, don't this time */ 7540 a_chk->whoTo = NULL; 7541 } else { 7542 asoc->used_alt_onsack++; 7543 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 7544 } 7545 if (a_chk->whoTo == NULL) { 7546 /* Nope, no alternate */ 7547 a_chk->whoTo = asoc->last_data_chunk_from; 7548 asoc->used_alt_onsack = 0; 7549 } 7550 } else { 7551 /* 7552 * No duplicates so we use the last place we received data 7553 * from. 7554 */ 7555 asoc->used_alt_onsack = 0; 7556 a_chk->whoTo = asoc->last_data_chunk_from; 7557 } 7558 if (a_chk->whoTo) { 7559 atomic_add_int(&a_chk->whoTo->ref_count, 1); 7560 } 7561 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) { 7562 /* no gaps */ 7563 space_req = sizeof(struct sctp_sack_chunk); 7564 } else { 7565 /* gaps get a cluster */ 7566 space_req = MCLBYTES; 7567 } 7568 /* Ok now lets formulate a MBUF with our sack */ 7569 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA); 7570 if ((a_chk->data == NULL) || 7571 (a_chk->whoTo == NULL)) { 7572 /* rats, no mbuf memory */ 7573 if (a_chk->data) { 7574 /* was a problem with the destination */ 7575 sctp_m_freem(a_chk->data); 7576 a_chk->data = NULL; 7577 } 7578 if (a_chk->whoTo) 7579 atomic_subtract_int(&a_chk->whoTo->ref_count, 1); 7580 sctp_free_a_chunk(stcb, a_chk); 7581 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 7582 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 7583 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 7584 stcb->sctp_ep, stcb, NULL); 7585 return; 7586 } 7587 /* ok, lets go through and fill it in */ 7588 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 7589 space = M_TRAILINGSPACE(a_chk->data); 7590 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 7591 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 7592 } 7593 limit = mtod(a_chk->data, caddr_t); 7594 limit += space; 7595 7596 sack = mtod(a_chk->data, struct sctp_sack_chunk *); 7597 sack->ch.chunk_type = SCTP_SELECTIVE_ACK; 7598 /* 0x01 is used by nonce for ecn */ 7599 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM); 7600 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 7601 /* 7602 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 7603 * received, then set high bit to 1, else 0. Reset 7604 * pkts_rcvd. 7605 */ 7606 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6); 7607 asoc->cmt_dac_pkts_rcvd = 0; 7608 } 7609 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 7610 sack->sack.a_rwnd = htonl(asoc->my_rwnd); 7611 asoc->my_last_reported_rwnd = asoc->my_rwnd; 7612 7613 /* reset the readers interpretation */ 7614 stcb->freed_by_sorcv_sincelast = 0; 7615 7616 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 7617 7618 7619 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 7620 if (asoc->cumulative_tsn < asoc->mapping_array_base_tsn) { 7621 offset = 1; 7622 /* 7623 * cum-ack behind the mapping array, so we start and use all 7624 * entries. 7625 */ 7626 jstart = 0; 7627 } else { 7628 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 7629 /* 7630 * we skip the first one when the cum-ack is at or above the 7631 * mapping array base. 7632 */ 7633 jstart = 1; 7634 } 7635 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) { 7636 /* we have a gap .. maybe */ 7637 for (i = 0; i < siz; i++) { 7638 selector = &sack_array[asoc->mapping_array[i]]; 7639 if (mergeable && selector->right_edge) { 7640 /* 7641 * Backup, left and right edges were ok to 7642 * merge. 7643 */ 7644 num_gap_blocks--; 7645 gap_descriptor--; 7646 } 7647 if (selector->num_entries == 0) 7648 mergeable = 0; 7649 else { 7650 for (j = jstart; j < selector->num_entries; j++) { 7651 if (mergeable && selector->right_edge) { 7652 /* 7653 * do a merge by NOT setting 7654 * the left side 7655 */ 7656 mergeable = 0; 7657 } else { 7658 /* 7659 * no merge, set the left 7660 * side 7661 */ 7662 mergeable = 0; 7663 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 7664 } 7665 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 7666 num_gap_blocks++; 7667 gap_descriptor++; 7668 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 7669 /* no more room */ 7670 limit_reached = 1; 7671 break; 7672 } 7673 } 7674 if (selector->left_edge) { 7675 mergeable = 1; 7676 } 7677 } 7678 if (limit_reached) { 7679 /* Reached the limit stop */ 7680 break; 7681 } 7682 jstart = 0; 7683 offset += 8; 7684 } 7685 if (num_gap_blocks == 0) { 7686 /* reneged all chunks */ 7687 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 7688 } 7689 } 7690 /* now we must add any dups we are going to report. */ 7691 if ((limit_reached == 0) && (asoc->numduptsns)) { 7692 dup = (uint32_t *) gap_descriptor; 7693 for (i = 0; i < asoc->numduptsns; i++) { 7694 *dup = htonl(asoc->dup_tsns[i]); 7695 dup++; 7696 num_dups++; 7697 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 7698 /* no more room */ 7699 break; 7700 } 7701 } 7702 asoc->numduptsns = 0; 7703 } 7704 /* 7705 * now that the chunk is prepared queue it to the control chunk 7706 * queue. 7707 */ 7708 a_chk->send_size = (sizeof(struct sctp_sack_chunk) + 7709 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) + 7710 (num_dups * sizeof(int32_t))); 7711 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 7712 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 7713 sack->sack.num_dup_tsns = htons(num_dups); 7714 sack->ch.chunk_length = htons(a_chk->send_size); 7715 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 7716 asoc->ctrl_queue_cnt++; 7717 SCTP_STAT_INCR(sctps_sendsacks); 7718 return; 7719 } 7720 7721 7722 void 7723 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr) 7724 { 7725 struct mbuf *m_abort; 7726 struct mbuf *m_out = NULL, *m_end = NULL; 7727 struct sctp_abort_chunk *abort = NULL; 7728 int sz; 7729 uint32_t auth_offset = 0; 7730 struct sctp_auth_chunk *auth = NULL; 7731 struct sctphdr *shdr; 7732 7733 /* 7734 * Add an AUTH chunk, if chunk requires it and save the offset into 7735 * the chain for AUTH 7736 */ 7737 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 7738 stcb->asoc.peer_auth_chunks)) { 7739 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset, 7740 stcb, SCTP_ABORT_ASSOCIATION); 7741 } 7742 SCTP_TCB_LOCK_ASSERT(stcb); 7743 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 7744 if (m_abort == NULL) { 7745 /* no mbuf's */ 7746 if (m_out) 7747 sctp_m_freem(m_out); 7748 return; 7749 } 7750 /* link in any error */ 7751 SCTP_BUF_NEXT(m_abort) = operr; 7752 sz = 0; 7753 if (operr) { 7754 struct mbuf *n; 7755 7756 n = operr; 7757 while (n) { 7758 sz += SCTP_BUF_LEN(n); 7759 n = SCTP_BUF_NEXT(n); 7760 } 7761 } 7762 SCTP_BUF_LEN(m_abort) = sizeof(*abort); 7763 if (m_out == NULL) { 7764 /* NO Auth chunk prepended, so reserve space in front */ 7765 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 7766 m_out = m_abort; 7767 } else { 7768 /* Put AUTH chunk at the front of the chain */ 7769 SCTP_BUF_NEXT(m_end) = m_abort; 7770 } 7771 7772 /* fill in the ABORT chunk */ 7773 abort = mtod(m_abort, struct sctp_abort_chunk *); 7774 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 7775 abort->ch.chunk_flags = 0; 7776 abort->ch.chunk_length = htons(sizeof(*abort) + sz); 7777 7778 /* prepend and fill in the SCTP header */ 7779 SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT); 7780 if (m_out == NULL) { 7781 /* TSNH: no memory */ 7782 return; 7783 } 7784 shdr = mtod(m_out, struct sctphdr *); 7785 shdr->src_port = stcb->sctp_ep->sctp_lport; 7786 shdr->dest_port = stcb->rport; 7787 shdr->v_tag = htonl(stcb->asoc.peer_vtag); 7788 shdr->checksum = 0; 7789 auth_offset += sizeof(struct sctphdr); 7790 7791 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, 7792 stcb->asoc.primary_destination, 7793 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr, 7794 m_out, auth_offset, auth, 1, 0, NULL, 0); 7795 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7796 } 7797 7798 int 7799 sctp_send_shutdown_complete(struct sctp_tcb *stcb, 7800 struct sctp_nets *net) 7801 { 7802 /* formulate and SEND a SHUTDOWN-COMPLETE */ 7803 struct mbuf *m_shutdown_comp; 7804 struct sctp_shutdown_complete_msg *comp_cp; 7805 7806 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 0, M_DONTWAIT, 1, MT_HEADER); 7807 if (m_shutdown_comp == NULL) { 7808 /* no mbuf's */ 7809 return (-1); 7810 } 7811 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *); 7812 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 7813 comp_cp->shut_cmp.ch.chunk_flags = 0; 7814 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 7815 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport; 7816 comp_cp->sh.dest_port = stcb->rport; 7817 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag); 7818 comp_cp->sh.checksum = 0; 7819 7820 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg); 7821 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 7822 (struct sockaddr *)&net->ro._l_addr, 7823 m_shutdown_comp, 0, NULL, 1, 0, NULL, 0); 7824 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7825 return (0); 7826 } 7827 7828 int 7829 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh) 7830 { 7831 /* formulate and SEND a SHUTDOWN-COMPLETE */ 7832 struct mbuf *o_pak; 7833 struct mbuf *mout; 7834 struct ip *iph, *iph_out; 7835 struct ip6_hdr *ip6, *ip6_out; 7836 int offset_out, len; 7837 struct sctp_shutdown_complete_msg *comp_cp; 7838 7839 /* Get room for the largest message */ 7840 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg)); 7841 7842 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(len); 7843 if (o_pak == NULL) { 7844 /* no mbuf's */ 7845 return (-1); 7846 } 7847 mout = SCTP_HEADER_TO_CHAIN(o_pak); 7848 iph = mtod(m, struct ip *); 7849 iph_out = NULL; 7850 ip6_out = NULL; 7851 offset_out = 0; 7852 if (iph->ip_v == IPVERSION) { 7853 SCTP_BUF_LEN(mout) = sizeof(struct ip) + 7854 sizeof(struct sctp_shutdown_complete_msg); 7855 SCTP_BUF_NEXT(mout) = NULL; 7856 iph_out = mtod(mout, struct ip *); 7857 7858 /* Fill in the IP header for the ABORT */ 7859 iph_out->ip_v = IPVERSION; 7860 iph_out->ip_hl = (sizeof(struct ip) / 4); 7861 iph_out->ip_tos = (u_char)0; 7862 iph_out->ip_id = 0; 7863 iph_out->ip_off = 0; 7864 iph_out->ip_ttl = MAXTTL; 7865 iph_out->ip_p = IPPROTO_SCTP; 7866 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 7867 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 7868 7869 /* let IP layer calculate this */ 7870 iph_out->ip_sum = 0; 7871 offset_out += sizeof(*iph_out); 7872 comp_cp = (struct sctp_shutdown_complete_msg *)( 7873 (caddr_t)iph_out + offset_out); 7874 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 7875 ip6 = (struct ip6_hdr *)iph; 7876 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr) + 7877 sizeof(struct sctp_shutdown_complete_msg); 7878 SCTP_BUF_NEXT(mout) = NULL; 7879 ip6_out = mtod(mout, struct ip6_hdr *); 7880 7881 /* Fill in the IPv6 header for the ABORT */ 7882 ip6_out->ip6_flow = ip6->ip6_flow; 7883 ip6_out->ip6_hlim = ip6_defhlim; 7884 ip6_out->ip6_nxt = IPPROTO_SCTP; 7885 ip6_out->ip6_src = ip6->ip6_dst; 7886 ip6_out->ip6_dst = ip6->ip6_src; 7887 /* 7888 * ?? The old code had both the iph len + payload, I think 7889 * this is wrong and would never have worked 7890 */ 7891 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg); 7892 offset_out += sizeof(*ip6_out); 7893 comp_cp = (struct sctp_shutdown_complete_msg *)( 7894 (caddr_t)ip6_out + offset_out); 7895 } else { 7896 /* Currently not supported. */ 7897 return (-1); 7898 } 7899 7900 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout); 7901 /* Now copy in and fill in the ABORT tags etc. */ 7902 comp_cp->sh.src_port = sh->dest_port; 7903 comp_cp->sh.dest_port = sh->src_port; 7904 comp_cp->sh.checksum = 0; 7905 comp_cp->sh.v_tag = sh->v_tag; 7906 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB; 7907 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 7908 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 7909 7910 /* add checksum */ 7911 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(o_pak)) { 7912 comp_cp->sh.checksum = 0; 7913 } else { 7914 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out); 7915 } 7916 if (iph_out != NULL) { 7917 struct route ro; 7918 7919 bzero(&ro, sizeof ro); 7920 /* set IPv4 length */ 7921 iph_out->ip_len = SCTP_HEADER_LEN(o_pak); 7922 /* out it goes */ 7923 ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 7924 ,NULL 7925 ); 7926 /* Free the route if we got one back */ 7927 if (ro.ro_rt) 7928 RTFREE(ro.ro_rt); 7929 } else if (ip6_out != NULL) { 7930 struct route_in6 ro; 7931 7932 7933 bzero(&ro, sizeof(ro)); 7934 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 7935 ,NULL 7936 ); 7937 /* Free the route if we got one back */ 7938 if (ro.ro_rt) 7939 RTFREE(ro.ro_rt); 7940 } 7941 SCTP_STAT_INCR(sctps_sendpackets); 7942 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 7943 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7944 return (0); 7945 } 7946 7947 static struct sctp_nets * 7948 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now) 7949 { 7950 struct sctp_nets *net, *hnet; 7951 int ms_goneby, highest_ms, state_overide = 0; 7952 7953 SCTP_GETTIME_TIMEVAL(now); 7954 highest_ms = 0; 7955 hnet = NULL; 7956 SCTP_TCB_LOCK_ASSERT(stcb); 7957 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 7958 if ( 7959 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) || 7960 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE) 7961 ) { 7962 /* 7963 * Skip this guy from consideration if HB is off AND 7964 * its confirmed 7965 */ 7966 continue; 7967 } 7968 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) { 7969 /* skip this dest net from consideration */ 7970 continue; 7971 } 7972 if (net->last_sent_time.tv_sec) { 7973 /* Sent to so we subtract */ 7974 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000; 7975 } else 7976 /* Never been sent to */ 7977 ms_goneby = 0x7fffffff; 7978 /* 7979 * When the address state is unconfirmed but still 7980 * considered reachable, we HB at a higher rate. Once it 7981 * goes confirmed OR reaches the "unreachable" state, thenw 7982 * we cut it back to HB at a more normal pace. 7983 */ 7984 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) { 7985 state_overide = 1; 7986 } else { 7987 state_overide = 0; 7988 } 7989 7990 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) && 7991 (ms_goneby > highest_ms)) { 7992 highest_ms = ms_goneby; 7993 hnet = net; 7994 } 7995 } 7996 if (hnet && 7997 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) { 7998 state_overide = 1; 7999 } else { 8000 state_overide = 0; 8001 } 8002 8003 if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) { 8004 /* 8005 * Found the one with longest delay bounds OR it is 8006 * unconfirmed and still not marked unreachable. 8007 */ 8008 #ifdef SCTP_DEBUG 8009 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 8010 printf("net:%p is the hb winner -", 8011 hnet); 8012 if (hnet) 8013 sctp_print_address((struct sockaddr *)&hnet->ro._l_addr); 8014 else 8015 printf(" none\n"); 8016 } 8017 #endif 8018 /* update the timer now */ 8019 hnet->last_sent_time = *now; 8020 return (hnet); 8021 } 8022 /* Nothing to HB */ 8023 return (NULL); 8024 } 8025 8026 int 8027 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net) 8028 { 8029 struct sctp_tmit_chunk *chk; 8030 struct sctp_nets *net; 8031 struct sctp_heartbeat_chunk *hb; 8032 struct timeval now; 8033 struct sockaddr_in *sin; 8034 struct sockaddr_in6 *sin6; 8035 8036 SCTP_TCB_LOCK_ASSERT(stcb); 8037 if (user_req == 0) { 8038 net = sctp_select_hb_destination(stcb, &now); 8039 if (net == NULL) { 8040 /* 8041 * All our busy none to send to, just start the 8042 * timer again. 8043 */ 8044 if (stcb->asoc.state == 0) { 8045 return (0); 8046 } 8047 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, 8048 stcb->sctp_ep, 8049 stcb, 8050 net); 8051 return (0); 8052 } 8053 } else { 8054 net = u_net; 8055 if (net == NULL) { 8056 return (0); 8057 } 8058 SCTP_GETTIME_TIMEVAL(&now); 8059 } 8060 sin = (struct sockaddr_in *)&net->ro._l_addr; 8061 if (sin->sin_family != AF_INET) { 8062 if (sin->sin_family != AF_INET6) { 8063 /* huh */ 8064 return (0); 8065 } 8066 } 8067 sctp_alloc_a_chunk(stcb, chk); 8068 if (chk == NULL) { 8069 #ifdef SCTP_DEBUG 8070 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 8071 printf("Gak, can't get a chunk for hb\n"); 8072 } 8073 #endif 8074 return (0); 8075 } 8076 chk->copy_by_ref = 0; 8077 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 8078 chk->rec.chunk_id.can_take_data = 1; 8079 chk->asoc = &stcb->asoc; 8080 chk->send_size = sizeof(struct sctp_heartbeat_chunk); 8081 8082 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 8083 if (chk->data == NULL) { 8084 sctp_free_a_chunk(stcb, chk); 8085 return (0); 8086 } 8087 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8088 SCTP_BUF_LEN(chk->data) = chk->send_size; 8089 chk->sent = SCTP_DATAGRAM_UNSENT; 8090 chk->snd_count = 0; 8091 chk->whoTo = net; 8092 atomic_add_int(&chk->whoTo->ref_count, 1); 8093 /* Now we have a mbuf that we can fill in with the details */ 8094 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 8095 8096 /* fill out chunk header */ 8097 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 8098 hb->ch.chunk_flags = 0; 8099 hb->ch.chunk_length = htons(chk->send_size); 8100 /* Fill out hb parameter */ 8101 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 8102 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 8103 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 8104 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 8105 /* Did our user request this one, put it in */ 8106 hb->heartbeat.hb_info.user_req = user_req; 8107 hb->heartbeat.hb_info.addr_family = sin->sin_family; 8108 hb->heartbeat.hb_info.addr_len = sin->sin_len; 8109 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 8110 /* 8111 * we only take from the entropy pool if the address is not 8112 * confirmed. 8113 */ 8114 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 8115 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 8116 } else { 8117 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 8118 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 8119 } 8120 if (sin->sin_family == AF_INET) { 8121 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr)); 8122 } else if (sin->sin_family == AF_INET6) { 8123 /* We leave the scope the way it is in our lookup table. */ 8124 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 8125 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr)); 8126 } else { 8127 /* huh compiler bug */ 8128 return (0); 8129 } 8130 /* ok we have a destination that needs a beat */ 8131 /* lets do the theshold management Qiaobing style */ 8132 8133 if (sctp_threshold_management(stcb->sctp_ep, stcb, net, 8134 stcb->asoc.max_send_times)) { 8135 /* 8136 * we have lost the association, in a way this is quite bad 8137 * since we really are one less time since we really did not 8138 * send yet. This is the down side to the Q's style as 8139 * defined in the RFC and not my alternate style defined in 8140 * the RFC. 8141 */ 8142 atomic_subtract_int(&chk->whoTo->ref_count, 1); 8143 if (chk->data != NULL) { 8144 sctp_m_freem(chk->data); 8145 chk->data = NULL; 8146 } 8147 sctp_free_a_chunk(stcb, chk); 8148 return (-1); 8149 } 8150 net->hb_responded = 0; 8151 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 8152 stcb->asoc.ctrl_queue_cnt++; 8153 SCTP_STAT_INCR(sctps_sendheartbeat); 8154 /* 8155 * Call directly med level routine to put out the chunk. It will 8156 * always tumble out control chunks aka HB but it may even tumble 8157 * out data too. 8158 */ 8159 return (1); 8160 } 8161 8162 void 8163 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 8164 uint32_t high_tsn) 8165 { 8166 struct sctp_association *asoc; 8167 struct sctp_ecne_chunk *ecne; 8168 struct sctp_tmit_chunk *chk; 8169 8170 asoc = &stcb->asoc; 8171 SCTP_TCB_LOCK_ASSERT(stcb); 8172 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8173 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 8174 /* found a previous ECN_ECHO update it if needed */ 8175 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 8176 ecne->tsn = htonl(high_tsn); 8177 return; 8178 } 8179 } 8180 /* nope could not find one to update so we must build one */ 8181 sctp_alloc_a_chunk(stcb, chk); 8182 if (chk == NULL) { 8183 return; 8184 } 8185 chk->copy_by_ref = 0; 8186 SCTP_STAT_INCR(sctps_sendecne); 8187 chk->rec.chunk_id.id = SCTP_ECN_ECHO; 8188 chk->rec.chunk_id.can_take_data = 0; 8189 chk->asoc = &stcb->asoc; 8190 chk->send_size = sizeof(struct sctp_ecne_chunk); 8191 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 8192 if (chk->data == NULL) { 8193 sctp_free_a_chunk(stcb, chk); 8194 return; 8195 } 8196 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8197 SCTP_BUF_LEN(chk->data) = chk->send_size; 8198 chk->sent = SCTP_DATAGRAM_UNSENT; 8199 chk->snd_count = 0; 8200 chk->whoTo = net; 8201 atomic_add_int(&chk->whoTo->ref_count, 1); 8202 stcb->asoc.ecn_echo_cnt_onq++; 8203 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 8204 ecne->ch.chunk_type = SCTP_ECN_ECHO; 8205 ecne->ch.chunk_flags = 0; 8206 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 8207 ecne->tsn = htonl(high_tsn); 8208 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 8209 asoc->ctrl_queue_cnt++; 8210 } 8211 8212 void 8213 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 8214 struct mbuf *m, int iphlen, int bad_crc) 8215 { 8216 struct sctp_association *asoc; 8217 struct sctp_pktdrop_chunk *drp; 8218 struct sctp_tmit_chunk *chk; 8219 uint8_t *datap; 8220 int len; 8221 unsigned int small_one; 8222 struct ip *iph; 8223 8224 long spc; 8225 8226 asoc = &stcb->asoc; 8227 SCTP_TCB_LOCK_ASSERT(stcb); 8228 if (asoc->peer_supports_pktdrop == 0) { 8229 /* 8230 * peer must declare support before I send one. 8231 */ 8232 return; 8233 } 8234 if (stcb->sctp_socket == NULL) { 8235 return; 8236 } 8237 sctp_alloc_a_chunk(stcb, chk); 8238 if (chk == NULL) { 8239 return; 8240 } 8241 chk->copy_by_ref = 0; 8242 iph = mtod(m, struct ip *); 8243 if (iph == NULL) { 8244 return; 8245 } 8246 if (iph->ip_v == IPVERSION) { 8247 /* IPv4 */ 8248 len = chk->send_size = iph->ip_len; 8249 } else { 8250 struct ip6_hdr *ip6h; 8251 8252 /* IPv6 */ 8253 ip6h = mtod(m, struct ip6_hdr *); 8254 len = chk->send_size = htons(ip6h->ip6_plen); 8255 } 8256 chk->asoc = &stcb->asoc; 8257 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 8258 if (chk->data == NULL) { 8259 jump_out: 8260 sctp_free_a_chunk(stcb, chk); 8261 return; 8262 } 8263 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8264 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 8265 if (drp == NULL) { 8266 sctp_m_freem(chk->data); 8267 chk->data = NULL; 8268 goto jump_out; 8269 } 8270 small_one = asoc->smallest_mtu; 8271 if (small_one > MCLBYTES) { 8272 /* Only one cluster worth of data MAX */ 8273 small_one = MCLBYTES; 8274 } 8275 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 8276 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 8277 chk->book_size_scale = 0; 8278 if (chk->book_size > small_one) { 8279 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 8280 drp->trunc_len = htons(chk->send_size); 8281 chk->send_size = small_one - (SCTP_MED_OVERHEAD + 8282 sizeof(struct sctp_pktdrop_chunk) + 8283 sizeof(struct sctphdr)); 8284 len = chk->send_size; 8285 } else { 8286 /* no truncation needed */ 8287 drp->ch.chunk_flags = 0; 8288 drp->trunc_len = htons(0); 8289 } 8290 if (bad_crc) { 8291 drp->ch.chunk_flags |= SCTP_BADCRC; 8292 } 8293 chk->send_size += sizeof(struct sctp_pktdrop_chunk); 8294 SCTP_BUF_LEN(chk->data) = chk->send_size; 8295 chk->sent = SCTP_DATAGRAM_UNSENT; 8296 chk->snd_count = 0; 8297 if (net) { 8298 /* we should hit here */ 8299 chk->whoTo = net; 8300 } else { 8301 chk->whoTo = asoc->primary_destination; 8302 } 8303 atomic_add_int(&chk->whoTo->ref_count, 1); 8304 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 8305 chk->rec.chunk_id.can_take_data = 1; 8306 drp->ch.chunk_type = SCTP_PACKET_DROPPED; 8307 drp->ch.chunk_length = htons(chk->send_size); 8308 spc = stcb->sctp_socket->so_rcv.sb_hiwat; 8309 if (spc < 0) { 8310 spc = 0; 8311 } 8312 drp->bottle_bw = htonl(spc); 8313 if (asoc->my_rwnd) { 8314 drp->current_onq = htonl(asoc->size_on_reasm_queue + 8315 asoc->size_on_all_streams + 8316 asoc->my_rwnd_control_len + 8317 stcb->sctp_socket->so_rcv.sb_cc); 8318 } else { 8319 /* 8320 * If my rwnd is 0, possibly from mbuf depletion as well as 8321 * space used, tell the peer there is NO space aka onq == bw 8322 */ 8323 drp->current_onq = htonl(spc); 8324 } 8325 drp->reserved = 0; 8326 datap = drp->data; 8327 m_copydata(m, iphlen, len, (caddr_t)datap); 8328 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 8329 asoc->ctrl_queue_cnt++; 8330 } 8331 8332 void 8333 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn) 8334 { 8335 struct sctp_association *asoc; 8336 struct sctp_cwr_chunk *cwr; 8337 struct sctp_tmit_chunk *chk; 8338 8339 asoc = &stcb->asoc; 8340 SCTP_TCB_LOCK_ASSERT(stcb); 8341 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8342 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) { 8343 /* found a previous ECN_CWR update it if needed */ 8344 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 8345 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn), 8346 MAX_TSN)) { 8347 cwr->tsn = htonl(high_tsn); 8348 } 8349 return; 8350 } 8351 } 8352 /* nope could not find one to update so we must build one */ 8353 sctp_alloc_a_chunk(stcb, chk); 8354 if (chk == NULL) { 8355 return; 8356 } 8357 chk->copy_by_ref = 0; 8358 chk->rec.chunk_id.id = SCTP_ECN_CWR; 8359 chk->rec.chunk_id.can_take_data = 1; 8360 chk->asoc = &stcb->asoc; 8361 chk->send_size = sizeof(struct sctp_cwr_chunk); 8362 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 8363 if (chk->data == NULL) { 8364 sctp_free_a_chunk(stcb, chk); 8365 return; 8366 } 8367 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8368 SCTP_BUF_LEN(chk->data) = chk->send_size; 8369 chk->sent = SCTP_DATAGRAM_UNSENT; 8370 chk->snd_count = 0; 8371 chk->whoTo = net; 8372 atomic_add_int(&chk->whoTo->ref_count, 1); 8373 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 8374 cwr->ch.chunk_type = SCTP_ECN_CWR; 8375 cwr->ch.chunk_flags = 0; 8376 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 8377 cwr->tsn = htonl(high_tsn); 8378 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 8379 asoc->ctrl_queue_cnt++; 8380 } 8381 8382 void 8383 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, 8384 int number_entries, uint16_t * list, 8385 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 8386 { 8387 int len, old_len, i; 8388 struct sctp_stream_reset_out_request *req_out; 8389 struct sctp_chunkhdr *ch; 8390 8391 ch = mtod(chk->data, struct sctp_chunkhdr *); 8392 8393 8394 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8395 8396 /* get to new offset for the param. */ 8397 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 8398 /* now how long will this param be? */ 8399 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 8400 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 8401 req_out->ph.param_length = htons(len); 8402 req_out->request_seq = htonl(seq); 8403 req_out->response_seq = htonl(resp_seq); 8404 req_out->send_reset_at_tsn = htonl(last_sent); 8405 if (number_entries) { 8406 for (i = 0; i < number_entries; i++) { 8407 req_out->list_of_streams[i] = htons(list[i]); 8408 } 8409 } 8410 if (SCTP_SIZE32(len) > len) { 8411 /* 8412 * Need to worry about the pad we may end up adding to the 8413 * end. This is easy since the struct is either aligned to 4 8414 * bytes or 2 bytes off. 8415 */ 8416 req_out->list_of_streams[number_entries] = 0; 8417 } 8418 /* now fix the chunk length */ 8419 ch->chunk_length = htons(len + old_len); 8420 chk->book_size = len + old_len; 8421 chk->book_size_scale = 0; 8422 chk->send_size = SCTP_SIZE32(chk->book_size); 8423 SCTP_BUF_LEN(chk->data) = chk->send_size; 8424 return; 8425 } 8426 8427 8428 void 8429 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 8430 int number_entries, uint16_t * list, 8431 uint32_t seq) 8432 { 8433 int len, old_len, i; 8434 struct sctp_stream_reset_in_request *req_in; 8435 struct sctp_chunkhdr *ch; 8436 8437 ch = mtod(chk->data, struct sctp_chunkhdr *); 8438 8439 8440 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8441 8442 /* get to new offset for the param. */ 8443 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 8444 /* now how long will this param be? */ 8445 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 8446 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 8447 req_in->ph.param_length = htons(len); 8448 req_in->request_seq = htonl(seq); 8449 if (number_entries) { 8450 for (i = 0; i < number_entries; i++) { 8451 req_in->list_of_streams[i] = htons(list[i]); 8452 } 8453 } 8454 if (SCTP_SIZE32(len) > len) { 8455 /* 8456 * Need to worry about the pad we may end up adding to the 8457 * end. This is easy since the struct is either aligned to 4 8458 * bytes or 2 bytes off. 8459 */ 8460 req_in->list_of_streams[number_entries] = 0; 8461 } 8462 /* now fix the chunk length */ 8463 ch->chunk_length = htons(len + old_len); 8464 chk->book_size = len + old_len; 8465 chk->book_size_scale = 0; 8466 chk->send_size = SCTP_SIZE32(chk->book_size); 8467 SCTP_BUF_LEN(chk->data) = chk->send_size; 8468 return; 8469 } 8470 8471 8472 void 8473 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 8474 uint32_t seq) 8475 { 8476 int len, old_len; 8477 struct sctp_stream_reset_tsn_request *req_tsn; 8478 struct sctp_chunkhdr *ch; 8479 8480 ch = mtod(chk->data, struct sctp_chunkhdr *); 8481 8482 8483 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8484 8485 /* get to new offset for the param. */ 8486 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 8487 /* now how long will this param be? */ 8488 len = sizeof(struct sctp_stream_reset_tsn_request); 8489 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 8490 req_tsn->ph.param_length = htons(len); 8491 req_tsn->request_seq = htonl(seq); 8492 8493 /* now fix the chunk length */ 8494 ch->chunk_length = htons(len + old_len); 8495 chk->send_size = len + old_len; 8496 chk->book_size = SCTP_SIZE32(chk->send_size); 8497 chk->book_size_scale = 0; 8498 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 8499 return; 8500 } 8501 8502 void 8503 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 8504 uint32_t resp_seq, uint32_t result) 8505 { 8506 int len, old_len; 8507 struct sctp_stream_reset_response *resp; 8508 struct sctp_chunkhdr *ch; 8509 8510 ch = mtod(chk->data, struct sctp_chunkhdr *); 8511 8512 8513 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8514 8515 /* get to new offset for the param. */ 8516 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 8517 /* now how long will this param be? */ 8518 len = sizeof(struct sctp_stream_reset_response); 8519 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 8520 resp->ph.param_length = htons(len); 8521 resp->response_seq = htonl(resp_seq); 8522 resp->result = ntohl(result); 8523 8524 /* now fix the chunk length */ 8525 ch->chunk_length = htons(len + old_len); 8526 chk->book_size = len + old_len; 8527 chk->book_size_scale = 0; 8528 chk->send_size = SCTP_SIZE32(chk->book_size); 8529 SCTP_BUF_LEN(chk->data) = chk->send_size; 8530 return; 8531 8532 } 8533 8534 8535 void 8536 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 8537 uint32_t resp_seq, uint32_t result, 8538 uint32_t send_una, uint32_t recv_next) 8539 { 8540 int len, old_len; 8541 struct sctp_stream_reset_response_tsn *resp; 8542 struct sctp_chunkhdr *ch; 8543 8544 ch = mtod(chk->data, struct sctp_chunkhdr *); 8545 8546 8547 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 8548 8549 /* get to new offset for the param. */ 8550 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 8551 /* now how long will this param be? */ 8552 len = sizeof(struct sctp_stream_reset_response_tsn); 8553 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 8554 resp->ph.param_length = htons(len); 8555 resp->response_seq = htonl(resp_seq); 8556 resp->result = htonl(result); 8557 resp->senders_next_tsn = htonl(send_una); 8558 resp->receivers_next_tsn = htonl(recv_next); 8559 8560 /* now fix the chunk length */ 8561 ch->chunk_length = htons(len + old_len); 8562 chk->book_size = len + old_len; 8563 chk->send_size = SCTP_SIZE32(chk->book_size); 8564 chk->book_size_scale = 0; 8565 SCTP_BUF_LEN(chk->data) = chk->send_size; 8566 return; 8567 } 8568 8569 8570 int 8571 sctp_send_str_reset_req(struct sctp_tcb *stcb, 8572 int number_entries, uint16_t * list, 8573 uint8_t send_out_req, uint32_t resp_seq, 8574 uint8_t send_in_req, 8575 uint8_t send_tsn_req) 8576 { 8577 8578 struct sctp_association *asoc; 8579 struct sctp_tmit_chunk *chk; 8580 struct sctp_chunkhdr *ch; 8581 uint32_t seq; 8582 8583 asoc = &stcb->asoc; 8584 if (asoc->stream_reset_outstanding) { 8585 /* 8586 * Already one pending, must get ACK back to clear the flag. 8587 */ 8588 return (EBUSY); 8589 } 8590 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) { 8591 /* nothing to do */ 8592 return (EINVAL); 8593 } 8594 if (send_tsn_req && (send_out_req || send_in_req)) { 8595 /* error, can't do that */ 8596 return (EINVAL); 8597 } 8598 sctp_alloc_a_chunk(stcb, chk); 8599 if (chk == NULL) { 8600 return (ENOMEM); 8601 } 8602 chk->copy_by_ref = 0; 8603 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 8604 chk->rec.chunk_id.can_take_data = 0; 8605 chk->asoc = &stcb->asoc; 8606 chk->book_size = sizeof(struct sctp_chunkhdr); 8607 chk->send_size = SCTP_SIZE32(chk->book_size); 8608 chk->book_size_scale = 0; 8609 8610 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 8611 if (chk->data == NULL) { 8612 sctp_free_a_chunk(stcb, chk); 8613 return (ENOMEM); 8614 } 8615 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 8616 8617 /* setup chunk parameters */ 8618 chk->sent = SCTP_DATAGRAM_UNSENT; 8619 chk->snd_count = 0; 8620 chk->whoTo = asoc->primary_destination; 8621 atomic_add_int(&chk->whoTo->ref_count, 1); 8622 8623 ch = mtod(chk->data, struct sctp_chunkhdr *); 8624 ch->chunk_type = SCTP_STREAM_RESET; 8625 ch->chunk_flags = 0; 8626 ch->chunk_length = htons(chk->book_size); 8627 SCTP_BUF_LEN(chk->data) = chk->send_size; 8628 8629 seq = stcb->asoc.str_reset_seq_out; 8630 if (send_out_req) { 8631 sctp_add_stream_reset_out(chk, number_entries, list, 8632 seq, resp_seq, (stcb->asoc.sending_seq - 1)); 8633 asoc->stream_reset_out_is_outstanding = 1; 8634 seq++; 8635 asoc->stream_reset_outstanding++; 8636 } 8637 if (send_in_req) { 8638 sctp_add_stream_reset_in(chk, number_entries, list, seq); 8639 asoc->stream_reset_outstanding++; 8640 } 8641 if (send_tsn_req) { 8642 sctp_add_stream_reset_tsn(chk, seq); 8643 asoc->stream_reset_outstanding++; 8644 } 8645 asoc->str_reset = chk; 8646 8647 /* insert the chunk for sending */ 8648 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 8649 chk, 8650 sctp_next); 8651 asoc->ctrl_queue_cnt++; 8652 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 8653 return (0); 8654 } 8655 8656 void 8657 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag, 8658 struct mbuf *err_cause) 8659 { 8660 /* 8661 * Formulate the abort message, and send it back down. 8662 */ 8663 struct mbuf *o_pak; 8664 struct mbuf *mout; 8665 struct sctp_abort_msg *abm; 8666 struct ip *iph, *iph_out; 8667 struct ip6_hdr *ip6, *ip6_out; 8668 int iphlen_out; 8669 8670 /* don't respond to ABORT with ABORT */ 8671 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 8672 if (err_cause) 8673 sctp_m_freem(err_cause); 8674 return; 8675 } 8676 o_pak = SCTP_GET_HEADER_FOR_OUTPUT((sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg))); 8677 if (o_pak == NULL) { 8678 if (err_cause) 8679 sctp_m_freem(err_cause); 8680 return; 8681 } 8682 mout = SCTP_HEADER_TO_CHAIN(o_pak); 8683 iph = mtod(m, struct ip *); 8684 iph_out = NULL; 8685 ip6_out = NULL; 8686 if (iph->ip_v == IPVERSION) { 8687 iph_out = mtod(mout, struct ip *); 8688 SCTP_BUF_LEN(mout) = sizeof(*iph_out) + sizeof(*abm); 8689 SCTP_BUF_NEXT(mout) = err_cause; 8690 8691 /* Fill in the IP header for the ABORT */ 8692 iph_out->ip_v = IPVERSION; 8693 iph_out->ip_hl = (sizeof(struct ip) / 4); 8694 iph_out->ip_tos = (u_char)0; 8695 iph_out->ip_id = 0; 8696 iph_out->ip_off = 0; 8697 iph_out->ip_ttl = MAXTTL; 8698 iph_out->ip_p = IPPROTO_SCTP; 8699 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 8700 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 8701 /* let IP layer calculate this */ 8702 iph_out->ip_sum = 0; 8703 8704 iphlen_out = sizeof(*iph_out); 8705 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out); 8706 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 8707 ip6 = (struct ip6_hdr *)iph; 8708 ip6_out = mtod(mout, struct ip6_hdr *); 8709 SCTP_BUF_LEN(mout) = sizeof(*ip6_out) + sizeof(*abm); 8710 SCTP_BUF_NEXT(mout) = err_cause; 8711 8712 /* Fill in the IP6 header for the ABORT */ 8713 ip6_out->ip6_flow = ip6->ip6_flow; 8714 ip6_out->ip6_hlim = ip6_defhlim; 8715 ip6_out->ip6_nxt = IPPROTO_SCTP; 8716 ip6_out->ip6_src = ip6->ip6_dst; 8717 ip6_out->ip6_dst = ip6->ip6_src; 8718 8719 iphlen_out = sizeof(*ip6_out); 8720 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out); 8721 } else { 8722 /* Currently not supported */ 8723 return; 8724 } 8725 8726 abm->sh.src_port = sh->dest_port; 8727 abm->sh.dest_port = sh->src_port; 8728 abm->sh.checksum = 0; 8729 if (vtag == 0) { 8730 abm->sh.v_tag = sh->v_tag; 8731 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB; 8732 } else { 8733 abm->sh.v_tag = htonl(vtag); 8734 abm->msg.ch.chunk_flags = 0; 8735 } 8736 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION; 8737 8738 if (err_cause) { 8739 struct mbuf *m_tmp = err_cause; 8740 int err_len = 0; 8741 8742 /* get length of the err_cause chain */ 8743 while (m_tmp != NULL) { 8744 err_len += SCTP_BUF_LEN(m_tmp); 8745 m_tmp = SCTP_BUF_NEXT(m_tmp); 8746 } 8747 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout) + err_len; 8748 if (err_len % 4) { 8749 /* need pad at end of chunk */ 8750 uint32_t cpthis = 0; 8751 int padlen; 8752 8753 padlen = 4 - (SCTP_HEADER_LEN(o_pak) % 4); 8754 m_copyback(mout, SCTP_HEADER_LEN(o_pak), padlen, (caddr_t)&cpthis); 8755 SCTP_HEADER_LEN(o_pak) += padlen; 8756 } 8757 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len); 8758 } else { 8759 SCTP_HEADER_LEN(mout) = SCTP_BUF_LEN(mout); 8760 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch)); 8761 } 8762 8763 /* add checksum */ 8764 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) { 8765 abm->sh.checksum = 0; 8766 } else { 8767 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out); 8768 } 8769 if (iph_out != NULL) { 8770 struct route ro; 8771 8772 /* zap the stack pointer to the route */ 8773 bzero(&ro, sizeof ro); 8774 #ifdef SCTP_DEBUG 8775 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 8776 printf("sctp_send_abort calling ip_output:\n"); 8777 sctp_print_address_pkt(iph_out, &abm->sh); 8778 } 8779 #endif 8780 /* set IPv4 length */ 8781 iph_out->ip_len = SCTP_HEADER_LEN(o_pak); 8782 /* out it goes */ 8783 (void)ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 8784 ,NULL 8785 ); 8786 /* Free the route if we got one back */ 8787 if (ro.ro_rt) 8788 RTFREE(ro.ro_rt); 8789 } else if (ip6_out != NULL) { 8790 struct route_in6 ro; 8791 8792 8793 /* zap the stack pointer to the route */ 8794 bzero(&ro, sizeof(ro)); 8795 #ifdef SCTP_DEBUG 8796 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 8797 printf("sctp_send_abort calling ip6_output:\n"); 8798 sctp_print_address_pkt((struct ip *)ip6_out, &abm->sh); 8799 } 8800 #endif 8801 ip6_out->ip6_plen = SCTP_HEADER_LEN(o_pak) - sizeof(*ip6_out); 8802 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 8803 ,NULL 8804 ); 8805 /* Free the route if we got one back */ 8806 if (ro.ro_rt) 8807 RTFREE(ro.ro_rt); 8808 } 8809 SCTP_STAT_INCR(sctps_sendpackets); 8810 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 8811 } 8812 8813 void 8814 sctp_send_operr_to(struct mbuf *m, int iphlen, 8815 struct mbuf *scm, 8816 uint32_t vtag) 8817 { 8818 struct mbuf *o_pak; 8819 struct sctphdr *ihdr; 8820 int retcode; 8821 struct sctphdr *ohdr; 8822 struct sctp_chunkhdr *ophdr; 8823 8824 struct ip *iph; 8825 8826 #ifdef SCTP_DEBUG 8827 struct sockaddr_in6 lsa6, fsa6; 8828 8829 #endif 8830 uint32_t val; 8831 struct mbuf *at; 8832 int len; 8833 8834 iph = mtod(m, struct ip *); 8835 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen); 8836 8837 SCTP_BUF_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT); 8838 if (scm == NULL) { 8839 /* can't send because we can't add a mbuf */ 8840 return; 8841 } 8842 ohdr = mtod(scm, struct sctphdr *); 8843 ohdr->src_port = ihdr->dest_port; 8844 ohdr->dest_port = ihdr->src_port; 8845 ohdr->v_tag = vtag; 8846 ohdr->checksum = 0; 8847 ophdr = (struct sctp_chunkhdr *)(ohdr + 1); 8848 ophdr->chunk_type = SCTP_OPERATION_ERROR; 8849 ophdr->chunk_flags = 0; 8850 len = 0; 8851 at = scm; 8852 while (at) { 8853 len += SCTP_BUF_LEN(at); 8854 at = SCTP_BUF_NEXT(at); 8855 } 8856 8857 ophdr->chunk_length = htons(len - sizeof(struct sctphdr)); 8858 if (len % 4) { 8859 /* need padding */ 8860 uint32_t cpthis = 0; 8861 int padlen; 8862 8863 padlen = 4 - (len % 4); 8864 m_copyback(scm, len, padlen, (caddr_t)&cpthis); 8865 len += padlen; 8866 } 8867 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) { 8868 val = 0; 8869 } else { 8870 val = sctp_calculate_sum(scm, NULL, 0); 8871 } 8872 ohdr->checksum = val; 8873 if (iph->ip_v == IPVERSION) { 8874 /* V4 */ 8875 struct ip *out; 8876 struct route ro; 8877 8878 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip)); 8879 if (o_pak == NULL) { 8880 sctp_m_freem(scm); 8881 return; 8882 } 8883 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip); 8884 len += sizeof(struct ip); 8885 SCTP_ATTACH_CHAIN(o_pak, scm, len); 8886 bzero(&ro, sizeof ro); 8887 out = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *); 8888 out->ip_v = iph->ip_v; 8889 out->ip_hl = (sizeof(struct ip) / 4); 8890 out->ip_tos = iph->ip_tos; 8891 out->ip_id = iph->ip_id; 8892 out->ip_off = 0; 8893 out->ip_ttl = MAXTTL; 8894 out->ip_p = IPPROTO_SCTP; 8895 out->ip_sum = 0; 8896 out->ip_src = iph->ip_dst; 8897 out->ip_dst = iph->ip_src; 8898 out->ip_len = SCTP_HEADER_LEN(o_pak); 8899 retcode = ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL 8900 ,NULL 8901 ); 8902 SCTP_STAT_INCR(sctps_sendpackets); 8903 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 8904 /* Free the route if we got one back */ 8905 if (ro.ro_rt) 8906 RTFREE(ro.ro_rt); 8907 } else { 8908 /* V6 */ 8909 struct route_in6 ro; 8910 8911 struct ip6_hdr *out6, *in6; 8912 8913 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr)); 8914 if (o_pak == NULL) { 8915 sctp_m_freem(scm); 8916 return; 8917 } 8918 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr); 8919 len += sizeof(struct ip6_hdr); 8920 SCTP_ATTACH_CHAIN(o_pak, scm, len); 8921 8922 bzero(&ro, sizeof ro); 8923 in6 = mtod(m, struct ip6_hdr *); 8924 out6 = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *); 8925 out6->ip6_flow = in6->ip6_flow; 8926 out6->ip6_hlim = ip6_defhlim; 8927 out6->ip6_nxt = IPPROTO_SCTP; 8928 out6->ip6_src = in6->ip6_dst; 8929 out6->ip6_dst = in6->ip6_src; 8930 out6->ip6_plen = len - sizeof(struct ip6_hdr); 8931 #ifdef SCTP_DEBUG 8932 bzero(&lsa6, sizeof(lsa6)); 8933 lsa6.sin6_len = sizeof(lsa6); 8934 lsa6.sin6_family = AF_INET6; 8935 lsa6.sin6_addr = out6->ip6_src; 8936 bzero(&fsa6, sizeof(fsa6)); 8937 fsa6.sin6_len = sizeof(fsa6); 8938 fsa6.sin6_family = AF_INET6; 8939 fsa6.sin6_addr = out6->ip6_dst; 8940 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 8941 printf("sctp_operr_to calling ipv6 output:\n"); 8942 printf("src: "); 8943 sctp_print_address((struct sockaddr *)&lsa6); 8944 printf("dst "); 8945 sctp_print_address((struct sockaddr *)&fsa6); 8946 } 8947 #endif /* SCTP_DEBUG */ 8948 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL 8949 ,NULL 8950 ); 8951 SCTP_STAT_INCR(sctps_sendpackets); 8952 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 8953 /* Free the route if we got one back */ 8954 if (ro.ro_rt) 8955 RTFREE(ro.ro_rt); 8956 } 8957 } 8958 8959 8960 8961 static struct mbuf * 8962 sctp_copy_resume(struct sctp_stream_queue_pending *sp, 8963 struct uio *uio, 8964 struct sctp_sndrcvinfo *srcv, 8965 int max_send_len, 8966 int user_marks_eor, 8967 int *error, 8968 uint32_t * sndout, 8969 struct mbuf **new_tail) 8970 { 8971 int left, cancpy, willcpy; 8972 struct mbuf *m, *prev, *head; 8973 8974 left = min(uio->uio_resid, max_send_len); 8975 /* Always get a header just in case */ 8976 head = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 8977 cancpy = M_TRAILINGSPACE(head); 8978 willcpy = min(cancpy, left); 8979 *error = uiomove(mtod(head, caddr_t), willcpy, uio); 8980 if (*error) { 8981 sctp_m_freem(head); 8982 return (NULL); 8983 } 8984 *sndout += willcpy; 8985 left -= willcpy; 8986 SCTP_BUF_LEN(head) = willcpy; 8987 m = head; 8988 *new_tail = head; 8989 while (left > 0) { 8990 /* move in user data */ 8991 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 8992 if (SCTP_BUF_NEXT(m) == NULL) { 8993 sctp_m_freem(head); 8994 *new_tail = NULL; 8995 *error = ENOMEM; 8996 return (NULL); 8997 } 8998 prev = m; 8999 m = SCTP_BUF_NEXT(m); 9000 cancpy = M_TRAILINGSPACE(m); 9001 willcpy = min(cancpy, left); 9002 *error = uiomove(mtod(m, caddr_t), willcpy, uio); 9003 if (*error) { 9004 sctp_m_freem(head); 9005 *new_tail = NULL; 9006 *error = EFAULT; 9007 return (NULL); 9008 } 9009 SCTP_BUF_LEN(m) = willcpy; 9010 left -= willcpy; 9011 *sndout += willcpy; 9012 *new_tail = m; 9013 if (left == 0) { 9014 SCTP_BUF_NEXT(m) = NULL; 9015 } 9016 } 9017 return (head); 9018 } 9019 9020 static int 9021 sctp_copy_one(struct sctp_stream_queue_pending *sp, 9022 struct uio *uio, 9023 int resv_upfront) 9024 { 9025 int left, cancpy, willcpy, error; 9026 struct mbuf *m, *head; 9027 int cpsz = 0; 9028 9029 /* First one gets a header */ 9030 left = sp->length; 9031 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAIT, 0, MT_DATA); 9032 if (m == NULL) { 9033 return (ENOMEM); 9034 } 9035 /* 9036 * Add this one for m in now, that way if the alloc fails we won't 9037 * have a bad cnt. 9038 */ 9039 SCTP_BUF_RESV_UF(m, resv_upfront); 9040 cancpy = M_TRAILINGSPACE(m); 9041 willcpy = min(cancpy, left); 9042 while (left > 0) { 9043 /* move in user data */ 9044 error = uiomove(mtod(m, caddr_t), willcpy, uio); 9045 if (error) { 9046 sctp_m_freem(head); 9047 return (error); 9048 } 9049 SCTP_BUF_LEN(m) = willcpy; 9050 left -= willcpy; 9051 cpsz += willcpy; 9052 if (left > 0) { 9053 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA); 9054 if (SCTP_BUF_NEXT(m) == NULL) { 9055 /* 9056 * the head goes back to caller, he can free 9057 * the rest 9058 */ 9059 sctp_m_freem(head); 9060 return (ENOMEM); 9061 } 9062 m = SCTP_BUF_NEXT(m); 9063 cancpy = M_TRAILINGSPACE(m); 9064 willcpy = min(cancpy, left); 9065 } else { 9066 sp->tail_mbuf = m; 9067 SCTP_BUF_NEXT(m) = NULL; 9068 } 9069 } 9070 sp->data = head; 9071 sp->length = cpsz; 9072 return (0); 9073 } 9074 9075 9076 9077 static struct sctp_stream_queue_pending * 9078 sctp_copy_it_in(struct sctp_tcb *stcb, 9079 struct sctp_association *asoc, 9080 struct sctp_sndrcvinfo *srcv, 9081 struct uio *uio, 9082 struct sctp_nets *net, 9083 int max_send_len, 9084 int user_marks_eor, 9085 int *errno, 9086 int non_blocking) 9087 { 9088 /* 9089 * This routine must be very careful in its work. Protocol 9090 * processing is up and running so care must be taken to spl...() 9091 * when you need to do something that may effect the stcb/asoc. The 9092 * sb is locked however. When data is copied the protocol processing 9093 * should be enabled since this is a slower operation... 9094 */ 9095 struct sctp_stream_queue_pending *sp = NULL; 9096 int resv_in_first; 9097 9098 *errno = 0; 9099 /* 9100 * Unless E_EOR mode is on, we must make a send FIT in one call. 9101 */ 9102 if (((user_marks_eor == 0) && non_blocking) && 9103 (uio->uio_resid > stcb->sctp_socket->so_snd.sb_hiwat)) { 9104 /* It will NEVER fit */ 9105 *errno = EMSGSIZE; 9106 goto out_now; 9107 } 9108 /* Now can we send this? */ 9109 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 9110 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 9111 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 9112 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 9113 /* got data while shutting down */ 9114 *errno = ECONNRESET; 9115 goto out_now; 9116 } 9117 sp = (struct sctp_stream_queue_pending *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq); 9118 if (sp == NULL) { 9119 *errno = ENOMEM; 9120 goto out_now; 9121 } 9122 SCTP_INCR_STRMOQ_COUNT(); 9123 sp->act_flags = 0; 9124 sp->sinfo_flags = srcv->sinfo_flags; 9125 sp->timetolive = srcv->sinfo_timetolive; 9126 sp->ppid = srcv->sinfo_ppid; 9127 sp->context = srcv->sinfo_context; 9128 sp->strseq = 0; 9129 SCTP_GETTIME_TIMEVAL(&sp->ts); 9130 9131 sp->stream = srcv->sinfo_stream; 9132 sp->length = min(uio->uio_resid, max_send_len); 9133 if ((sp->length == uio->uio_resid) && 9134 ((user_marks_eor == 0) || 9135 (srcv->sinfo_flags & SCTP_EOF) || 9136 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 9137 ) { 9138 sp->msg_is_complete = 1; 9139 } else { 9140 sp->msg_is_complete = 0; 9141 } 9142 sp->some_taken = 0; 9143 resv_in_first = sizeof(struct sctp_data_chunk); 9144 sp->data = sp->tail_mbuf = NULL; 9145 *errno = sctp_copy_one(sp, uio, resv_in_first); 9146 if (*errno) { 9147 sctp_free_a_strmoq(stcb, sp); 9148 sp->data = NULL; 9149 sp->net = NULL; 9150 sp = NULL; 9151 } else { 9152 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 9153 sp->net = net; 9154 sp->addr_over = 1; 9155 } else { 9156 sp->net = asoc->primary_destination; 9157 sp->addr_over = 0; 9158 } 9159 atomic_add_int(&sp->net->ref_count, 1); 9160 sctp_set_prsctp_policy(stcb, sp); 9161 } 9162 out_now: 9163 return (sp); 9164 } 9165 9166 9167 int 9168 sctp_sosend(struct socket *so, 9169 struct sockaddr *addr, 9170 struct uio *uio, 9171 struct mbuf *top, 9172 struct mbuf *control, 9173 int flags 9174 , 9175 struct thread *p 9176 ) 9177 { 9178 struct sctp_inpcb *inp; 9179 int error, use_rcvinfo = 0; 9180 struct sctp_sndrcvinfo srcv; 9181 9182 inp = (struct sctp_inpcb *)so->so_pcb; 9183 if (control) { 9184 /* process cmsg snd/rcv info (maybe a assoc-id) */ 9185 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control, 9186 sizeof(srcv))) { 9187 /* got one */ 9188 use_rcvinfo = 1; 9189 } 9190 } 9191 error = sctp_lower_sosend(so, addr, uio, top, control, flags, 9192 use_rcvinfo, &srcv, p); 9193 return (error); 9194 } 9195 9196 9197 extern unsigned int sctp_add_more_threshold; 9198 int 9199 sctp_lower_sosend(struct socket *so, 9200 struct sockaddr *addr, 9201 struct uio *uio, 9202 struct mbuf *i_pak, 9203 struct mbuf *control, 9204 int flags, 9205 int use_rcvinfo, 9206 struct sctp_sndrcvinfo *srcv, 9207 struct thread *p 9208 ) 9209 { 9210 unsigned int sndlen, max_len; 9211 int error, len; 9212 struct mbuf *top = NULL; 9213 9214 #if defined(__NetBSD__) || defined(__OpenBSD_) 9215 int s; 9216 9217 #endif 9218 int queue_only = 0, queue_only_for_init = 0; 9219 int free_cnt_applied = 0; 9220 int un_sent = 0; 9221 int now_filled = 0; 9222 struct sctp_block_entry be; 9223 struct sctp_inpcb *inp; 9224 struct sctp_tcb *stcb = NULL; 9225 struct timeval now; 9226 struct sctp_nets *net; 9227 struct sctp_association *asoc; 9228 struct sctp_inpcb *t_inp; 9229 int create_lock_applied = 0; 9230 int nagle_applies = 0; 9231 int some_on_control = 0; 9232 int got_all_of_the_send = 0; 9233 int hold_tcblock = 0; 9234 int non_blocking = 0; 9235 9236 error = 0; 9237 net = NULL; 9238 stcb = NULL; 9239 asoc = NULL; 9240 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 9241 if (inp == NULL) { 9242 error = EFAULT; 9243 goto out_unlocked; 9244 } 9245 atomic_add_int(&inp->total_sends, 1); 9246 if (uio) 9247 sndlen = uio->uio_resid; 9248 else { 9249 sndlen = SCTP_HEADER_LEN(i_pak); 9250 top = SCTP_HEADER_TO_CHAIN(i_pak); 9251 } 9252 9253 hold_tcblock = 0; 9254 9255 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 9256 (inp->sctp_socket->so_qlimit)) { 9257 /* The listener can NOT send */ 9258 error = EFAULT; 9259 goto out_unlocked; 9260 } 9261 if ((use_rcvinfo) && srcv) { 9262 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) || PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) { 9263 error = EINVAL; 9264 goto out_unlocked; 9265 } 9266 if (srcv->sinfo_flags) 9267 SCTP_STAT_INCR(sctps_sends_with_flags); 9268 9269 if (srcv->sinfo_flags & SCTP_SENDALL) { 9270 /* its a sendall */ 9271 error = sctp_sendall(inp, uio, top, srcv); 9272 top = NULL; 9273 goto out_unlocked; 9274 } 9275 } 9276 /* now we must find the assoc */ 9277 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 9278 SCTP_INP_RLOCK(inp); 9279 stcb = LIST_FIRST(&inp->sctp_asoc_list); 9280 if (stcb == NULL) { 9281 SCTP_INP_RUNLOCK(inp); 9282 error = ENOTCONN; 9283 goto out_unlocked; 9284 } 9285 hold_tcblock = 0; 9286 SCTP_INP_RUNLOCK(inp); 9287 if (addr) 9288 /* Must locate the net structure if addr given */ 9289 net = sctp_findnet(stcb, addr); 9290 else 9291 net = stcb->asoc.primary_destination; 9292 9293 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) { 9294 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0); 9295 if (stcb) { 9296 if (addr) 9297 /* 9298 * Must locate the net structure if addr 9299 * given 9300 */ 9301 net = sctp_findnet(stcb, addr); 9302 else 9303 net = stcb->asoc.primary_destination; 9304 } 9305 hold_tcblock = 0; 9306 } else if (addr) { 9307 /* 9308 * Since we did not use findep we must increment it, and if 9309 * we don't find a tcb decrement it. 9310 */ 9311 SCTP_INP_WLOCK(inp); 9312 SCTP_INP_INCR_REF(inp); 9313 SCTP_INP_WUNLOCK(inp); 9314 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 9315 if (stcb == NULL) { 9316 SCTP_INP_WLOCK(inp); 9317 SCTP_INP_DECR_REF(inp); 9318 SCTP_INP_WUNLOCK(inp); 9319 } else { 9320 hold_tcblock = 1; 9321 } 9322 } 9323 if ((stcb == NULL) && (addr)) { 9324 /* Possible implicit send? */ 9325 SCTP_ASOC_CREATE_LOCK(inp); 9326 create_lock_applied = 1; 9327 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 9328 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 9329 /* Should I really unlock ? */ 9330 error = EFAULT; 9331 goto out_unlocked; 9332 9333 } 9334 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 9335 (addr->sa_family == AF_INET6)) { 9336 error = EINVAL; 9337 goto out_unlocked; 9338 } 9339 SCTP_INP_WLOCK(inp); 9340 SCTP_INP_INCR_REF(inp); 9341 SCTP_INP_WUNLOCK(inp); 9342 /* With the lock applied look again */ 9343 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 9344 if (stcb == NULL) { 9345 SCTP_INP_WLOCK(inp); 9346 SCTP_INP_DECR_REF(inp); 9347 SCTP_INP_WUNLOCK(inp); 9348 } else { 9349 hold_tcblock = 1; 9350 } 9351 } 9352 if (stcb == NULL) { 9353 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 9354 error = ENOTCONN; 9355 goto out_unlocked; 9356 } else if (addr == NULL) { 9357 error = ENOENT; 9358 goto out_unlocked; 9359 } else { 9360 /* 9361 * UDP style, we must go ahead and start the INIT 9362 * process 9363 */ 9364 if ((use_rcvinfo) && (srcv) && 9365 ((srcv->sinfo_flags & SCTP_ABORT) || 9366 ((srcv->sinfo_flags & SCTP_EOF) && 9367 (uio->uio_resid == 0)))) { 9368 /* 9369 * User asks to abort a non-existant assoc, 9370 * or EOF a non-existant assoc with no data 9371 */ 9372 error = ENOENT; 9373 goto out_unlocked; 9374 } 9375 /* get an asoc/stcb struct */ 9376 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0); 9377 if (stcb == NULL) { 9378 /* Error is setup for us in the call */ 9379 goto out_unlocked; 9380 } 9381 if (create_lock_applied) { 9382 SCTP_ASOC_CREATE_UNLOCK(inp); 9383 create_lock_applied = 0; 9384 } else { 9385 printf("Huh-3? create lock should have been on??\n"); 9386 } 9387 /* 9388 * Turn on queue only flag to prevent data from 9389 * being sent 9390 */ 9391 queue_only = 1; 9392 asoc = &stcb->asoc; 9393 asoc->state = SCTP_STATE_COOKIE_WAIT; 9394 SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 9395 9396 /* initialize authentication params for the assoc */ 9397 sctp_initialize_auth_params(inp, stcb); 9398 9399 if (control) { 9400 /* 9401 * see if a init structure exists in cmsg 9402 * headers 9403 */ 9404 struct sctp_initmsg initm; 9405 int i; 9406 9407 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control, 9408 sizeof(initm))) { 9409 /* 9410 * we have an INIT override of the 9411 * default 9412 */ 9413 if (initm.sinit_max_attempts) 9414 asoc->max_init_times = initm.sinit_max_attempts; 9415 if (initm.sinit_num_ostreams) 9416 asoc->pre_open_streams = initm.sinit_num_ostreams; 9417 if (initm.sinit_max_instreams) 9418 asoc->max_inbound_streams = initm.sinit_max_instreams; 9419 if (initm.sinit_max_init_timeo) 9420 asoc->initial_init_rto_max = initm.sinit_max_init_timeo; 9421 if (asoc->streamoutcnt < asoc->pre_open_streams) { 9422 /* Default is NOT correct */ 9423 #ifdef SCTP_DEBUG 9424 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 9425 printf("Ok, defout:%d pre_open:%d\n", 9426 asoc->streamoutcnt, asoc->pre_open_streams); 9427 } 9428 #endif 9429 SCTP_FREE(asoc->strmout); 9430 asoc->strmout = NULL; 9431 asoc->streamoutcnt = asoc->pre_open_streams; 9432 /* 9433 * What happens if this 9434 * fails? .. we panic ... 9435 */ 9436 { 9437 struct sctp_stream_out *tmp_str; 9438 int had_lock = 0; 9439 9440 if (hold_tcblock) { 9441 had_lock = 1; 9442 SCTP_TCB_UNLOCK(stcb); 9443 } 9444 SCTP_MALLOC(tmp_str, 9445 struct sctp_stream_out *, 9446 asoc->streamoutcnt * 9447 sizeof(struct sctp_stream_out), 9448 "StreamsOut"); 9449 if (had_lock) { 9450 SCTP_TCB_LOCK(stcb); 9451 } 9452 if (asoc->strmout == NULL) { 9453 asoc->strmout = tmp_str; 9454 } else { 9455 SCTP_FREE(asoc->strmout); 9456 asoc->strmout = tmp_str; 9457 } 9458 } 9459 for (i = 0; i < asoc->streamoutcnt; i++) { 9460 /* 9461 * inbound side must 9462 * be set to 0xffff, 9463 * also NOTE when we 9464 * get the INIT-ACK 9465 * back (for INIT 9466 * sender) we MUST 9467 * reduce the count 9468 * (streamoutcnt) 9469 * but first check 9470 * if we sent to any 9471 * of the upper 9472 * streams that were 9473 * dropped (if some 9474 * were). Those that 9475 * were dropped must 9476 * be notified to 9477 * the upper layer 9478 * as failed to 9479 * send. 9480 */ 9481 asoc->strmout[i].next_sequence_sent = 0x0; 9482 TAILQ_INIT(&asoc->strmout[i].outqueue); 9483 asoc->strmout[i].stream_no = i; 9484 asoc->strmout[i].last_msg_incomplete = 0; 9485 asoc->strmout[i].next_spoke.tqe_next = 0; 9486 asoc->strmout[i].next_spoke.tqe_prev = 0; 9487 } 9488 } 9489 } 9490 } 9491 hold_tcblock = 1; 9492 /* out with the INIT */ 9493 queue_only_for_init = 1; 9494 /* 9495 * we may want to dig in after this call and adjust 9496 * the MTU value. It defaulted to 1500 (constant) 9497 * but the ro structure may now have an update and 9498 * thus we may need to change it BEFORE we append 9499 * the message. 9500 */ 9501 net = stcb->asoc.primary_destination; 9502 asoc = &stcb->asoc; 9503 } 9504 } 9505 if (((so->so_state & SS_NBIO) 9506 || (flags & MSG_NBIO) 9507 )) { 9508 non_blocking = 1; 9509 } 9510 asoc = &stcb->asoc; 9511 /* would we block? */ 9512 if (non_blocking) { 9513 if ((so->so_snd.sb_hiwat < 9514 (sndlen + stcb->asoc.total_output_queue_size)) || 9515 (stcb->asoc.chunks_on_out_queue > 9516 sctp_max_chunks_on_queue)) { 9517 error = EWOULDBLOCK; 9518 atomic_add_int(&stcb->sctp_ep->total_nospaces, 1); 9519 goto out_unlocked; 9520 } 9521 } 9522 /* Keep the stcb from being freed under our feet */ 9523 atomic_add_int(&stcb->asoc.refcnt, 1); 9524 free_cnt_applied = 1; 9525 9526 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 9527 error = ECONNRESET; 9528 goto out_unlocked; 9529 } 9530 if (create_lock_applied) { 9531 SCTP_ASOC_CREATE_UNLOCK(inp); 9532 create_lock_applied = 0; 9533 } 9534 if (asoc->stream_reset_outstanding) { 9535 /* 9536 * Can't queue any data while stream reset is underway. 9537 */ 9538 error = EAGAIN; 9539 goto out_unlocked; 9540 } 9541 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 9542 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 9543 queue_only = 1; 9544 } 9545 if ((use_rcvinfo == 0) || (srcv == NULL)) { 9546 /* Grab the default stuff from the asoc */ 9547 srcv = &stcb->asoc.def_send; 9548 } 9549 /* we are now done with all control */ 9550 if (control) { 9551 sctp_m_freem(control); 9552 control = NULL; 9553 } 9554 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 9555 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 9556 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 9557 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 9558 if ((use_rcvinfo) && 9559 (srcv->sinfo_flags & SCTP_ABORT)) { 9560 ; 9561 } else { 9562 error = ECONNRESET; 9563 goto out_unlocked; 9564 } 9565 } 9566 /* Ok, we will attempt a msgsnd :> */ 9567 if (p) { 9568 p->td_proc->p_stats->p_ru.ru_msgsnd++; 9569 } 9570 if (stcb) { 9571 if (net && ((srcv->sinfo_flags & SCTP_ADDR_OVER))) { 9572 /* we take the override or the unconfirmed */ 9573 ; 9574 } else { 9575 net = stcb->asoc.primary_destination; 9576 } 9577 } 9578 if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) { 9579 /* 9580 * CMT: Added check for CMT above. net above is the primary 9581 * dest. If CMT is ON, sender should always attempt to send 9582 * with the output routine sctp_fill_outqueue() that loops 9583 * through all destination addresses. Therefore, if CMT is 9584 * ON, queue_only is NOT set to 1 here, so that 9585 * sctp_chunk_output() can be called below. 9586 */ 9587 queue_only = 1; 9588 9589 } else if (asoc->ifp_had_enobuf) { 9590 SCTP_STAT_INCR(sctps_ifnomemqueued); 9591 if (net->flight_size > (net->mtu * 2)) 9592 queue_only = 1; 9593 asoc->ifp_had_enobuf = 0; 9594 } else { 9595 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 9596 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk))); 9597 } 9598 /* Are we aborting? */ 9599 if (srcv->sinfo_flags & SCTP_ABORT) { 9600 struct mbuf *mm; 9601 int tot_demand, tot_out, max; 9602 9603 SCTP_STAT_INCR(sctps_sends_with_abort); 9604 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 9605 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 9606 /* It has to be up before we abort */ 9607 /* how big is the user initiated abort? */ 9608 error = EINVAL; 9609 goto out; 9610 } 9611 if (hold_tcblock) { 9612 SCTP_TCB_UNLOCK(stcb); 9613 hold_tcblock = 0; 9614 } 9615 if (top) { 9616 struct mbuf *cntm; 9617 9618 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA); 9619 9620 tot_out = 0; 9621 cntm = top; 9622 while (cntm) { 9623 tot_out += SCTP_BUF_LEN(cntm); 9624 cntm = SCTP_BUF_NEXT(cntm); 9625 } 9626 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 9627 } else { 9628 /* Must fit in a MTU */ 9629 tot_out = uio->uio_resid; 9630 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 9631 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA); 9632 } 9633 if (mm == NULL) { 9634 error = ENOMEM; 9635 goto out; 9636 } 9637 max = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 9638 max -= sizeof(struct sctp_abort_msg); 9639 if (tot_out > max) { 9640 tot_out = max; 9641 } 9642 if (mm) { 9643 struct sctp_paramhdr *ph; 9644 9645 /* now move forward the data pointer */ 9646 ph = mtod(mm, struct sctp_paramhdr *); 9647 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 9648 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out)); 9649 ph++; 9650 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); 9651 if (top == NULL) { 9652 error = uiomove((caddr_t)ph, (int)tot_out, uio); 9653 if (error) { 9654 /* 9655 * Here if we can't get his data we 9656 * still abort we just don't get to 9657 * send the users note :-0 9658 */ 9659 sctp_m_freem(mm); 9660 mm = NULL; 9661 } 9662 } else { 9663 SCTP_BUF_NEXT(mm) = top; 9664 } 9665 } 9666 if (hold_tcblock == 0) { 9667 SCTP_TCB_LOCK(stcb); 9668 hold_tcblock = 1; 9669 } 9670 atomic_add_int(&stcb->asoc.refcnt, -1); 9671 free_cnt_applied = 0; 9672 /* release this lock, otherwise we hang on ourselves */ 9673 sctp_abort_an_association(stcb->sctp_ep, stcb, 9674 SCTP_RESPONSE_TO_USER_REQ, 9675 mm); 9676 /* now relock the stcb so everything is sane */ 9677 hold_tcblock = 0; 9678 stcb = NULL; 9679 goto out_unlocked; 9680 } 9681 /* Calculate the maximum we can send */ 9682 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) { 9683 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size; 9684 } else { 9685 max_len = 0; 9686 } 9687 if (hold_tcblock) { 9688 SCTP_TCB_UNLOCK(stcb); 9689 hold_tcblock = 0; 9690 } 9691 /* Is the stream no. valid? */ 9692 if (srcv->sinfo_stream >= asoc->streamoutcnt) { 9693 /* Invalid stream number */ 9694 error = EINVAL; 9695 goto out_unlocked; 9696 } 9697 if (asoc->strmout == NULL) { 9698 /* huh? software error */ 9699 error = EFAULT; 9700 goto out_unlocked; 9701 } 9702 len = 0; 9703 if (max_len < sctp_add_more_threshold) { 9704 /* No room right no ! */ 9705 SOCKBUF_LOCK(&so->so_snd); 9706 while (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) { 9707 #ifdef SCTP_BLK_LOGGING 9708 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, 9709 so, asoc, uio->uio_resid); 9710 #endif 9711 be.error = 0; 9712 stcb->block_entry = &be; 9713 error = sbwait(&so->so_snd); 9714 stcb->block_entry = NULL; 9715 if (error || so->so_error || be.error) { 9716 if (error == 0) { 9717 if (so->so_error) 9718 error = so->so_error; 9719 if (be.error) { 9720 error = be.error; 9721 } 9722 } 9723 SOCKBUF_UNLOCK(&so->so_snd); 9724 goto out_unlocked; 9725 } 9726 #ifdef SCTP_BLK_LOGGING 9727 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 9728 so, asoc, stcb->asoc.total_output_queue_size); 9729 #endif 9730 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 9731 goto out_unlocked; 9732 } 9733 } 9734 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) { 9735 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size; 9736 } else { 9737 max_len = 0; 9738 } 9739 SOCKBUF_UNLOCK(&so->so_snd); 9740 } 9741 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 9742 goto out_unlocked; 9743 } 9744 atomic_add_int(&stcb->total_sends, 1); 9745 if (top == NULL) { 9746 struct sctp_stream_queue_pending *sp; 9747 struct sctp_stream_out *strm; 9748 uint32_t sndout, initial_out; 9749 int user_marks_eor; 9750 9751 if (uio->uio_resid == 0) { 9752 if (srcv->sinfo_flags & SCTP_EOF) { 9753 got_all_of_the_send = 1; 9754 goto dataless_eof; 9755 } else { 9756 error = EINVAL; 9757 goto out; 9758 } 9759 } 9760 initial_out = uio->uio_resid; 9761 9762 if ((asoc->stream_locked) && 9763 (asoc->stream_locked_on != srcv->sinfo_stream)) { 9764 error = EAGAIN; 9765 goto out; 9766 } 9767 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 9768 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 9769 if (strm->last_msg_incomplete == 0) { 9770 do_a_copy_in: 9771 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking); 9772 if ((sp == NULL) || (error)) { 9773 goto out; 9774 } 9775 SCTP_TCB_SEND_LOCK(stcb); 9776 if (sp->msg_is_complete) { 9777 strm->last_msg_incomplete = 0; 9778 asoc->stream_locked = 0; 9779 } else { 9780 /* 9781 * Just got locked to this guy in case of an 9782 * interupt. 9783 */ 9784 strm->last_msg_incomplete = 1; 9785 asoc->stream_locked = 1; 9786 asoc->stream_locked_on = srcv->sinfo_stream; 9787 } 9788 sctp_snd_sb_alloc(stcb, sp->length); 9789 9790 asoc->stream_queue_cnt++; 9791 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 9792 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 9793 sp->strseq = strm->next_sequence_sent; 9794 #ifdef SCTP_LOG_SENDING_STR 9795 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN, 9796 (uintptr_t) stcb, (uintptr_t) sp, 9797 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0); 9798 #endif 9799 strm->next_sequence_sent++; 9800 } else { 9801 SCTP_STAT_INCR(sctps_sends_with_unord); 9802 } 9803 9804 if ((strm->next_spoke.tqe_next == NULL) && 9805 (strm->next_spoke.tqe_prev == NULL)) { 9806 /* Not on wheel, insert */ 9807 sctp_insert_on_wheel(stcb, asoc, strm, 1); 9808 } 9809 SCTP_TCB_SEND_UNLOCK(stcb); 9810 } else { 9811 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 9812 if (sp == NULL) { 9813 /* ???? Huh ??? last msg is gone */ 9814 #ifdef INVARIANTS 9815 panic("Warning: Last msg marked incomplete, yet nothing left?"); 9816 #else 9817 printf("Warning: Last msg marked incomplete, yet nothing left?\n"); 9818 strm->last_msg_incomplete = 0; 9819 #endif 9820 goto do_a_copy_in; 9821 9822 } 9823 } 9824 while (uio->uio_resid > 0) { 9825 /* How much room do we have? */ 9826 struct mbuf *new_tail, *mm; 9827 9828 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) 9829 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size; 9830 else 9831 max_len = 0; 9832 9833 if ((max_len > sctp_add_more_threshold) || 9834 (uio->uio_resid && (uio->uio_resid < max_len))) { 9835 sndout = 0; 9836 new_tail = NULL; 9837 if (hold_tcblock) { 9838 SCTP_TCB_UNLOCK(stcb); 9839 hold_tcblock = 0; 9840 } 9841 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail); 9842 if ((mm == NULL) || error) { 9843 if (mm) { 9844 sctp_m_freem(mm); 9845 } 9846 goto out; 9847 } 9848 /* Update the mbuf and count */ 9849 SCTP_TCB_SEND_LOCK(stcb); 9850 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 9851 /* 9852 * we need to get out. Peer probably 9853 * aborted. 9854 */ 9855 sctp_m_freem(mm); 9856 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) 9857 error = ECONNRESET; 9858 goto out; 9859 } 9860 if (sp->tail_mbuf) { 9861 /* tack it to the end */ 9862 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 9863 sp->tail_mbuf = new_tail; 9864 } else { 9865 /* A stolen mbuf */ 9866 sp->data = mm; 9867 sp->tail_mbuf = new_tail; 9868 } 9869 sctp_snd_sb_alloc(stcb, sndout); 9870 sp->length += sndout; 9871 len += sndout; 9872 /* Did we reach EOR? */ 9873 if ((uio->uio_resid == 0) && 9874 ((user_marks_eor == 0) || 9875 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR))) 9876 ) { 9877 sp->msg_is_complete = 1; 9878 } else { 9879 sp->msg_is_complete = 0; 9880 } 9881 SCTP_TCB_SEND_UNLOCK(stcb); 9882 } 9883 if (uio->uio_resid == 0) { 9884 /* got it all? */ 9885 continue; 9886 } 9887 /* PR-SCTP? */ 9888 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) { 9889 /* 9890 * This is ugly but we must assure locking 9891 * order 9892 */ 9893 if (hold_tcblock == 0) { 9894 SCTP_TCB_LOCK(stcb); 9895 hold_tcblock = 1; 9896 } 9897 sctp_prune_prsctp(stcb, asoc, srcv, sndlen); 9898 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) 9899 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size; 9900 else 9901 max_len = 0; 9902 if (max_len > 0) { 9903 continue; 9904 } 9905 SCTP_TCB_UNLOCK(stcb); 9906 hold_tcblock = 0; 9907 } 9908 /* wait for space now */ 9909 if (non_blocking) { 9910 /* Non-blocking io in place out */ 9911 goto skip_out_eof; 9912 } 9913 if ((net->flight_size > net->cwnd) && 9914 (sctp_cmt_on_off == 0)) { 9915 queue_only = 1; 9916 9917 } else if (asoc->ifp_had_enobuf) { 9918 SCTP_STAT_INCR(sctps_ifnomemqueued); 9919 if (net->flight_size > (net->mtu * 2)) { 9920 queue_only = 1; 9921 } else { 9922 queue_only = 0; 9923 } 9924 asoc->ifp_had_enobuf = 0; 9925 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 9926 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 9927 sizeof(struct sctp_data_chunk))); 9928 } else { 9929 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 9930 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 9931 sizeof(struct sctp_data_chunk))); 9932 queue_only = 0; 9933 } 9934 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 9935 (stcb->asoc.total_flight > 0) && 9936 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 9937 ) { 9938 9939 /* 9940 * Ok, Nagle is set on and we have data 9941 * outstanding. Don't send anything and let 9942 * SACKs drive out the data unless wen have 9943 * a "full" segment to send. 9944 */ 9945 #ifdef SCTP_NAGLE_LOGGING 9946 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 9947 #endif 9948 SCTP_STAT_INCR(sctps_naglequeued); 9949 nagle_applies = 1; 9950 } else { 9951 #ifdef SCTP_NAGLE_LOGGING 9952 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 9953 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 9954 #endif 9955 SCTP_STAT_INCR(sctps_naglesent); 9956 nagle_applies = 0; 9957 } 9958 /* What about the INIT, send it maybe */ 9959 #ifdef SCTP_BLK_LOGGING 9960 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent); 9961 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, stcb->asoc.total_flight, 9962 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 9963 #endif 9964 if (queue_only_for_init) { 9965 if (hold_tcblock == 0) { 9966 SCTP_TCB_LOCK(stcb); 9967 hold_tcblock = 1; 9968 } 9969 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 9970 /* a collision took us forward? */ 9971 queue_only_for_init = 0; 9972 queue_only = 0; 9973 } else { 9974 sctp_send_initiate(inp, stcb); 9975 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 9976 queue_only_for_init = 0; 9977 queue_only = 1; 9978 } 9979 } 9980 if ((queue_only == 0) && (nagle_applies == 0) 9981 ) { 9982 /* 9983 * need to start chunk output before 9984 * blocking.. note that if a lock is already 9985 * applied, then the input via the net is 9986 * happening and I don't need to start 9987 * output :-D 9988 */ 9989 if (hold_tcblock == 0) { 9990 if (SCTP_TCB_TRYLOCK(stcb)) { 9991 hold_tcblock = 1; 9992 sctp_chunk_output(inp, 9993 stcb, 9994 SCTP_OUTPUT_FROM_USR_SEND); 9995 9996 } 9997 } else { 9998 sctp_chunk_output(inp, 9999 stcb, 10000 SCTP_OUTPUT_FROM_USR_SEND); 10001 } 10002 if (hold_tcblock == 1) { 10003 SCTP_TCB_UNLOCK(stcb); 10004 hold_tcblock = 0; 10005 } 10006 } 10007 SOCKBUF_LOCK(&so->so_snd); 10008 /* 10009 * This is a bit strange, but I think it will work. 10010 * The total_output_queue_size is locked and 10011 * protected by the TCB_LOCK, which we just 10012 * released. There is a race that can occur between 10013 * releasing it above, and me getting the socket 10014 * lock, where sacks come in but we have not put the 10015 * SB_WAIT on the so_snd buffer to get the wakeup. 10016 * After the LOCK is applied the sack_processing 10017 * will also need to LOCK the so->so_snd to do the 10018 * actual sowwakeup(). So once we have the socket 10019 * buffer lock if we recheck the size we KNOW we 10020 * will get to sleep safely with the wakeup flag in 10021 * place. 10022 */ 10023 if (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) { 10024 #ifdef SCTP_BLK_LOGGING 10025 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 10026 so, asoc, uio->uio_resid); 10027 #endif 10028 be.error = 0; 10029 stcb->block_entry = &be; 10030 error = sbwait(&so->so_snd); 10031 stcb->block_entry = NULL; 10032 10033 if (error || so->so_error || be.error) { 10034 if (error == 0) { 10035 if (so->so_error) 10036 error = so->so_error; 10037 if (be.error) { 10038 error = be.error; 10039 } 10040 } 10041 SOCKBUF_UNLOCK(&so->so_snd); 10042 goto out_unlocked; 10043 } 10044 #ifdef SCTP_BLK_LOGGING 10045 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 10046 so, asoc, stcb->asoc.total_output_queue_size); 10047 #endif 10048 } 10049 SOCKBUF_UNLOCK(&so->so_snd); 10050 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 10051 goto out_unlocked; 10052 } 10053 } 10054 SCTP_TCB_SEND_LOCK(stcb); 10055 if (sp->msg_is_complete == 0) { 10056 strm->last_msg_incomplete = 1; 10057 asoc->stream_locked = 1; 10058 asoc->stream_locked_on = srcv->sinfo_stream; 10059 } else { 10060 strm->last_msg_incomplete = 0; 10061 asoc->stream_locked = 0; 10062 } 10063 SCTP_TCB_SEND_UNLOCK(stcb); 10064 if (uio->uio_resid == 0) { 10065 got_all_of_the_send = 1; 10066 } 10067 } else if (top) { 10068 /* We send in a 0, since we do NOT have any locks */ 10069 error = sctp_msg_append(stcb, net, top, srcv, 0); 10070 top = NULL; 10071 } 10072 if (error) { 10073 goto out; 10074 } 10075 dataless_eof: 10076 /* EOF thing ? */ 10077 if ((srcv->sinfo_flags & SCTP_EOF) && 10078 (got_all_of_the_send == 1) && 10079 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) 10080 ) { 10081 SCTP_STAT_INCR(sctps_sends_with_eof); 10082 error = 0; 10083 if (hold_tcblock == 0) { 10084 SCTP_TCB_LOCK(stcb); 10085 hold_tcblock = 1; 10086 } 10087 if (TAILQ_EMPTY(&asoc->send_queue) && 10088 TAILQ_EMPTY(&asoc->sent_queue) && 10089 (asoc->stream_queue_cnt == 0)) { 10090 if (asoc->locked_on_sending) { 10091 goto abort_anyway; 10092 } 10093 /* there is nothing queued to send, so I'm done... */ 10094 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 10095 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 10096 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 10097 /* only send SHUTDOWN the first time through */ 10098 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 10099 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 10100 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 10101 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 10102 asoc->primary_destination); 10103 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 10104 asoc->primary_destination); 10105 } 10106 } else { 10107 /* 10108 * we still got (or just got) data to send, so set 10109 * SHUTDOWN_PENDING 10110 */ 10111 /* 10112 * XXX sockets draft says that SCTP_EOF should be 10113 * sent with no data. currently, we will allow user 10114 * data to be sent first and move to 10115 * SHUTDOWN-PENDING 10116 */ 10117 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 10118 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 10119 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 10120 if (hold_tcblock == 0) { 10121 SCTP_TCB_LOCK(stcb); 10122 hold_tcblock = 1; 10123 } 10124 if (asoc->locked_on_sending) { 10125 /* Locked to send out the data */ 10126 struct sctp_stream_queue_pending *sp; 10127 10128 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 10129 if (sp) { 10130 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 10131 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 10132 } 10133 } 10134 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 10135 if (TAILQ_EMPTY(&asoc->send_queue) && 10136 TAILQ_EMPTY(&asoc->sent_queue) && 10137 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 10138 abort_anyway: 10139 if (free_cnt_applied) { 10140 atomic_add_int(&stcb->asoc.refcnt, -1); 10141 free_cnt_applied = 0; 10142 } 10143 sctp_abort_an_association(stcb->sctp_ep, stcb, 10144 SCTP_RESPONSE_TO_USER_REQ, 10145 NULL); 10146 /* 10147 * now relock the stcb so everything 10148 * is sane 10149 */ 10150 hold_tcblock = 0; 10151 stcb = NULL; 10152 goto out; 10153 } 10154 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 10155 asoc->primary_destination); 10156 } 10157 } 10158 } 10159 skip_out_eof: 10160 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 10161 some_on_control = 1; 10162 } 10163 if ((net->flight_size > net->cwnd) && 10164 (sctp_cmt_on_off == 0)) { 10165 queue_only = 1; 10166 } else if (asoc->ifp_had_enobuf) { 10167 SCTP_STAT_INCR(sctps_ifnomemqueued); 10168 if (net->flight_size > (net->mtu * 2)) { 10169 queue_only = 1; 10170 } else { 10171 queue_only = 0; 10172 } 10173 asoc->ifp_had_enobuf = 0; 10174 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 10175 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 10176 sizeof(struct sctp_data_chunk))); 10177 } else { 10178 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 10179 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * 10180 sizeof(struct sctp_data_chunk))); 10181 queue_only = 0; 10182 } 10183 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 10184 (stcb->asoc.total_flight > 0) && 10185 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 10186 ) { 10187 10188 /* 10189 * Ok, Nagle is set on and we have data outstanding. Don't 10190 * send anything and let SACKs drive out the data unless wen 10191 * have a "full" segment to send. 10192 */ 10193 #ifdef SCTP_NAGLE_LOGGING 10194 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 10195 #endif 10196 SCTP_STAT_INCR(sctps_naglequeued); 10197 nagle_applies = 1; 10198 } else { 10199 #ifdef SCTP_NAGLE_LOGGING 10200 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 10201 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 10202 #endif 10203 SCTP_STAT_INCR(sctps_naglesent); 10204 nagle_applies = 0; 10205 } 10206 if (queue_only_for_init) { 10207 if (hold_tcblock == 0) { 10208 SCTP_TCB_LOCK(stcb); 10209 hold_tcblock = 1; 10210 } 10211 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 10212 /* a collision took us forward? */ 10213 queue_only_for_init = 0; 10214 queue_only = 0; 10215 } else { 10216 sctp_send_initiate(inp, stcb); 10217 if (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING) 10218 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT | 10219 SCTP_STATE_SHUTDOWN_PENDING; 10220 else 10221 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT; 10222 queue_only_for_init = 0; 10223 queue_only = 1; 10224 } 10225 } 10226 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { 10227 /* we can attempt to send too. */ 10228 if (hold_tcblock == 0) { 10229 /* 10230 * If there is activity recv'ing sacks no need to 10231 * send 10232 */ 10233 if (SCTP_TCB_TRYLOCK(stcb)) { 10234 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 10235 hold_tcblock = 1; 10236 } 10237 } else { 10238 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 10239 } 10240 } else if ((queue_only == 0) && 10241 (stcb->asoc.peers_rwnd == 0) && 10242 (stcb->asoc.total_flight == 0)) { 10243 /* We get to have a probe outstanding */ 10244 if (hold_tcblock == 0) { 10245 hold_tcblock = 1; 10246 SCTP_TCB_LOCK(stcb); 10247 } 10248 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND); 10249 } else if (some_on_control) { 10250 int num_out, reason, cwnd_full, frag_point; 10251 10252 /* Here we do control only */ 10253 if (hold_tcblock == 0) { 10254 hold_tcblock = 1; 10255 SCTP_TCB_LOCK(stcb); 10256 } 10257 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 10258 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 10259 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point); 10260 } 10261 #ifdef SCTP_DEBUG 10262 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) { 10263 printf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n", 10264 queue_only, stcb->asoc.peers_rwnd, un_sent, 10265 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, 10266 stcb->asoc.total_output_queue_size); 10267 } 10268 #endif 10269 out: 10270 out_unlocked: 10271 10272 if (create_lock_applied) { 10273 SCTP_ASOC_CREATE_UNLOCK(inp); 10274 create_lock_applied = 0; 10275 } 10276 if ((stcb) && hold_tcblock) { 10277 SCTP_TCB_UNLOCK(stcb); 10278 } 10279 if (stcb && free_cnt_applied) { 10280 atomic_add_int(&stcb->asoc.refcnt, -1); 10281 } 10282 #ifdef INVARIANTS 10283 if (stcb) { 10284 if (mtx_owned(&stcb->tcb_mtx)) { 10285 panic("Leaving with tcb mtx owned?"); 10286 } 10287 if (mtx_owned(&stcb->tcb_send_mtx)) { 10288 panic("Leaving with tcb send mtx owned?"); 10289 } 10290 } 10291 #endif 10292 if (top) { 10293 sctp_m_freem(top); 10294 } 10295 if (control) { 10296 sctp_m_freem(control); 10297 } 10298 return (error); 10299 } 10300 10301 10302 /* 10303 * generate an AUTHentication chunk, if required 10304 */ 10305 struct mbuf * 10306 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 10307 struct sctp_auth_chunk **auth_ret, uint32_t * offset, 10308 struct sctp_tcb *stcb, uint8_t chunk) 10309 { 10310 struct mbuf *m_auth; 10311 struct sctp_auth_chunk *auth; 10312 int chunk_len; 10313 10314 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 10315 (stcb == NULL)) 10316 return (m); 10317 10318 /* sysctl disabled auth? */ 10319 if (sctp_auth_disable) 10320 return (m); 10321 10322 /* peer doesn't do auth... */ 10323 if (!stcb->asoc.peer_supports_auth) { 10324 return (m); 10325 } 10326 /* does the requested chunk require auth? */ 10327 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 10328 return (m); 10329 } 10330 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER); 10331 if (m_auth == NULL) { 10332 /* no mbuf's */ 10333 return (m); 10334 } 10335 /* reserve some space if this will be the first mbuf */ 10336 if (m == NULL) 10337 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 10338 /* fill in the AUTH chunk details */ 10339 auth = mtod(m_auth, struct sctp_auth_chunk *); 10340 bzero(auth, sizeof(*auth)); 10341 auth->ch.chunk_type = SCTP_AUTHENTICATION; 10342 auth->ch.chunk_flags = 0; 10343 chunk_len = sizeof(*auth) + 10344 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 10345 auth->ch.chunk_length = htons(chunk_len); 10346 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 10347 /* key id and hmac digest will be computed and filled in upon send */ 10348 10349 /* save the offset where the auth was inserted into the chain */ 10350 if (m != NULL) { 10351 struct mbuf *cn; 10352 10353 *offset = 0; 10354 cn = m; 10355 while (cn) { 10356 *offset += SCTP_BUF_LEN(cn); 10357 cn = SCTP_BUF_NEXT(cn); 10358 } 10359 } else 10360 *offset = 0; 10361 10362 /* update length and return pointer to the auth chunk */ 10363 SCTP_BUF_LEN(m_auth) = chunk_len; 10364 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 10365 if (auth_ret != NULL) 10366 *auth_ret = auth; 10367 10368 return (m); 10369 } 10370