1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <sys/proc.h> 40 #include <netinet/sctp_var.h> 41 #include <netinet/sctp_sysctl.h> 42 #include <netinet/sctp_header.h> 43 #include <netinet/sctp_pcb.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctputil.h> 48 #include <netinet/sctp_auth.h> 49 #include <netinet/sctp_timer.h> 50 #include <netinet/sctp_asconf.h> 51 #include <netinet/sctp_indata.h> 52 #include <netinet/sctp_bsd_addr.h> 53 #include <netinet/sctp_input.h> 54 #include <netinet/sctp_crc32.h> 55 #include <netinet/udp.h> 56 #include <machine/in_cksum.h> 57 58 59 60 #define SCTP_MAX_GAPS_INARRAY 4 61 struct sack_track { 62 uint8_t right_edge; /* mergable on the right edge */ 63 uint8_t left_edge; /* mergable on the left edge */ 64 uint8_t num_entries; 65 uint8_t spare; 66 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 67 }; 68 69 struct sack_track sack_array[256] = { 70 {0, 0, 0, 0, /* 0x00 */ 71 {{0, 0}, 72 {0, 0}, 73 {0, 0}, 74 {0, 0} 75 } 76 }, 77 {1, 0, 1, 0, /* 0x01 */ 78 {{0, 0}, 79 {0, 0}, 80 {0, 0}, 81 {0, 0} 82 } 83 }, 84 {0, 0, 1, 0, /* 0x02 */ 85 {{1, 1}, 86 {0, 0}, 87 {0, 0}, 88 {0, 0} 89 } 90 }, 91 {1, 0, 1, 0, /* 0x03 */ 92 {{0, 1}, 93 {0, 0}, 94 {0, 0}, 95 {0, 0} 96 } 97 }, 98 {0, 0, 1, 0, /* 0x04 */ 99 {{2, 2}, 100 {0, 0}, 101 {0, 0}, 102 {0, 0} 103 } 104 }, 105 {1, 0, 2, 0, /* 0x05 */ 106 {{0, 0}, 107 {2, 2}, 108 {0, 0}, 109 {0, 0} 110 } 111 }, 112 {0, 0, 1, 0, /* 0x06 */ 113 {{1, 2}, 114 {0, 0}, 115 {0, 0}, 116 {0, 0} 117 } 118 }, 119 {1, 0, 1, 0, /* 0x07 */ 120 {{0, 2}, 121 {0, 0}, 122 {0, 0}, 123 {0, 0} 124 } 125 }, 126 {0, 0, 1, 0, /* 0x08 */ 127 {{3, 3}, 128 {0, 0}, 129 {0, 0}, 130 {0, 0} 131 } 132 }, 133 {1, 0, 2, 0, /* 0x09 */ 134 {{0, 0}, 135 {3, 3}, 136 {0, 0}, 137 {0, 0} 138 } 139 }, 140 {0, 0, 2, 0, /* 0x0a */ 141 {{1, 1}, 142 {3, 3}, 143 {0, 0}, 144 {0, 0} 145 } 146 }, 147 {1, 0, 2, 0, /* 0x0b */ 148 {{0, 1}, 149 {3, 3}, 150 {0, 0}, 151 {0, 0} 152 } 153 }, 154 {0, 0, 1, 0, /* 0x0c */ 155 {{2, 3}, 156 {0, 0}, 157 {0, 0}, 158 {0, 0} 159 } 160 }, 161 {1, 0, 2, 0, /* 0x0d */ 162 {{0, 0}, 163 {2, 3}, 164 {0, 0}, 165 {0, 0} 166 } 167 }, 168 {0, 0, 1, 0, /* 0x0e */ 169 {{1, 3}, 170 {0, 0}, 171 {0, 0}, 172 {0, 0} 173 } 174 }, 175 {1, 0, 1, 0, /* 0x0f */ 176 {{0, 3}, 177 {0, 0}, 178 {0, 0}, 179 {0, 0} 180 } 181 }, 182 {0, 0, 1, 0, /* 0x10 */ 183 {{4, 4}, 184 {0, 0}, 185 {0, 0}, 186 {0, 0} 187 } 188 }, 189 {1, 0, 2, 0, /* 0x11 */ 190 {{0, 0}, 191 {4, 4}, 192 {0, 0}, 193 {0, 0} 194 } 195 }, 196 {0, 0, 2, 0, /* 0x12 */ 197 {{1, 1}, 198 {4, 4}, 199 {0, 0}, 200 {0, 0} 201 } 202 }, 203 {1, 0, 2, 0, /* 0x13 */ 204 {{0, 1}, 205 {4, 4}, 206 {0, 0}, 207 {0, 0} 208 } 209 }, 210 {0, 0, 2, 0, /* 0x14 */ 211 {{2, 2}, 212 {4, 4}, 213 {0, 0}, 214 {0, 0} 215 } 216 }, 217 {1, 0, 3, 0, /* 0x15 */ 218 {{0, 0}, 219 {2, 2}, 220 {4, 4}, 221 {0, 0} 222 } 223 }, 224 {0, 0, 2, 0, /* 0x16 */ 225 {{1, 2}, 226 {4, 4}, 227 {0, 0}, 228 {0, 0} 229 } 230 }, 231 {1, 0, 2, 0, /* 0x17 */ 232 {{0, 2}, 233 {4, 4}, 234 {0, 0}, 235 {0, 0} 236 } 237 }, 238 {0, 0, 1, 0, /* 0x18 */ 239 {{3, 4}, 240 {0, 0}, 241 {0, 0}, 242 {0, 0} 243 } 244 }, 245 {1, 0, 2, 0, /* 0x19 */ 246 {{0, 0}, 247 {3, 4}, 248 {0, 0}, 249 {0, 0} 250 } 251 }, 252 {0, 0, 2, 0, /* 0x1a */ 253 {{1, 1}, 254 {3, 4}, 255 {0, 0}, 256 {0, 0} 257 } 258 }, 259 {1, 0, 2, 0, /* 0x1b */ 260 {{0, 1}, 261 {3, 4}, 262 {0, 0}, 263 {0, 0} 264 } 265 }, 266 {0, 0, 1, 0, /* 0x1c */ 267 {{2, 4}, 268 {0, 0}, 269 {0, 0}, 270 {0, 0} 271 } 272 }, 273 {1, 0, 2, 0, /* 0x1d */ 274 {{0, 0}, 275 {2, 4}, 276 {0, 0}, 277 {0, 0} 278 } 279 }, 280 {0, 0, 1, 0, /* 0x1e */ 281 {{1, 4}, 282 {0, 0}, 283 {0, 0}, 284 {0, 0} 285 } 286 }, 287 {1, 0, 1, 0, /* 0x1f */ 288 {{0, 4}, 289 {0, 0}, 290 {0, 0}, 291 {0, 0} 292 } 293 }, 294 {0, 0, 1, 0, /* 0x20 */ 295 {{5, 5}, 296 {0, 0}, 297 {0, 0}, 298 {0, 0} 299 } 300 }, 301 {1, 0, 2, 0, /* 0x21 */ 302 {{0, 0}, 303 {5, 5}, 304 {0, 0}, 305 {0, 0} 306 } 307 }, 308 {0, 0, 2, 0, /* 0x22 */ 309 {{1, 1}, 310 {5, 5}, 311 {0, 0}, 312 {0, 0} 313 } 314 }, 315 {1, 0, 2, 0, /* 0x23 */ 316 {{0, 1}, 317 {5, 5}, 318 {0, 0}, 319 {0, 0} 320 } 321 }, 322 {0, 0, 2, 0, /* 0x24 */ 323 {{2, 2}, 324 {5, 5}, 325 {0, 0}, 326 {0, 0} 327 } 328 }, 329 {1, 0, 3, 0, /* 0x25 */ 330 {{0, 0}, 331 {2, 2}, 332 {5, 5}, 333 {0, 0} 334 } 335 }, 336 {0, 0, 2, 0, /* 0x26 */ 337 {{1, 2}, 338 {5, 5}, 339 {0, 0}, 340 {0, 0} 341 } 342 }, 343 {1, 0, 2, 0, /* 0x27 */ 344 {{0, 2}, 345 {5, 5}, 346 {0, 0}, 347 {0, 0} 348 } 349 }, 350 {0, 0, 2, 0, /* 0x28 */ 351 {{3, 3}, 352 {5, 5}, 353 {0, 0}, 354 {0, 0} 355 } 356 }, 357 {1, 0, 3, 0, /* 0x29 */ 358 {{0, 0}, 359 {3, 3}, 360 {5, 5}, 361 {0, 0} 362 } 363 }, 364 {0, 0, 3, 0, /* 0x2a */ 365 {{1, 1}, 366 {3, 3}, 367 {5, 5}, 368 {0, 0} 369 } 370 }, 371 {1, 0, 3, 0, /* 0x2b */ 372 {{0, 1}, 373 {3, 3}, 374 {5, 5}, 375 {0, 0} 376 } 377 }, 378 {0, 0, 2, 0, /* 0x2c */ 379 {{2, 3}, 380 {5, 5}, 381 {0, 0}, 382 {0, 0} 383 } 384 }, 385 {1, 0, 3, 0, /* 0x2d */ 386 {{0, 0}, 387 {2, 3}, 388 {5, 5}, 389 {0, 0} 390 } 391 }, 392 {0, 0, 2, 0, /* 0x2e */ 393 {{1, 3}, 394 {5, 5}, 395 {0, 0}, 396 {0, 0} 397 } 398 }, 399 {1, 0, 2, 0, /* 0x2f */ 400 {{0, 3}, 401 {5, 5}, 402 {0, 0}, 403 {0, 0} 404 } 405 }, 406 {0, 0, 1, 0, /* 0x30 */ 407 {{4, 5}, 408 {0, 0}, 409 {0, 0}, 410 {0, 0} 411 } 412 }, 413 {1, 0, 2, 0, /* 0x31 */ 414 {{0, 0}, 415 {4, 5}, 416 {0, 0}, 417 {0, 0} 418 } 419 }, 420 {0, 0, 2, 0, /* 0x32 */ 421 {{1, 1}, 422 {4, 5}, 423 {0, 0}, 424 {0, 0} 425 } 426 }, 427 {1, 0, 2, 0, /* 0x33 */ 428 {{0, 1}, 429 {4, 5}, 430 {0, 0}, 431 {0, 0} 432 } 433 }, 434 {0, 0, 2, 0, /* 0x34 */ 435 {{2, 2}, 436 {4, 5}, 437 {0, 0}, 438 {0, 0} 439 } 440 }, 441 {1, 0, 3, 0, /* 0x35 */ 442 {{0, 0}, 443 {2, 2}, 444 {4, 5}, 445 {0, 0} 446 } 447 }, 448 {0, 0, 2, 0, /* 0x36 */ 449 {{1, 2}, 450 {4, 5}, 451 {0, 0}, 452 {0, 0} 453 } 454 }, 455 {1, 0, 2, 0, /* 0x37 */ 456 {{0, 2}, 457 {4, 5}, 458 {0, 0}, 459 {0, 0} 460 } 461 }, 462 {0, 0, 1, 0, /* 0x38 */ 463 {{3, 5}, 464 {0, 0}, 465 {0, 0}, 466 {0, 0} 467 } 468 }, 469 {1, 0, 2, 0, /* 0x39 */ 470 {{0, 0}, 471 {3, 5}, 472 {0, 0}, 473 {0, 0} 474 } 475 }, 476 {0, 0, 2, 0, /* 0x3a */ 477 {{1, 1}, 478 {3, 5}, 479 {0, 0}, 480 {0, 0} 481 } 482 }, 483 {1, 0, 2, 0, /* 0x3b */ 484 {{0, 1}, 485 {3, 5}, 486 {0, 0}, 487 {0, 0} 488 } 489 }, 490 {0, 0, 1, 0, /* 0x3c */ 491 {{2, 5}, 492 {0, 0}, 493 {0, 0}, 494 {0, 0} 495 } 496 }, 497 {1, 0, 2, 0, /* 0x3d */ 498 {{0, 0}, 499 {2, 5}, 500 {0, 0}, 501 {0, 0} 502 } 503 }, 504 {0, 0, 1, 0, /* 0x3e */ 505 {{1, 5}, 506 {0, 0}, 507 {0, 0}, 508 {0, 0} 509 } 510 }, 511 {1, 0, 1, 0, /* 0x3f */ 512 {{0, 5}, 513 {0, 0}, 514 {0, 0}, 515 {0, 0} 516 } 517 }, 518 {0, 0, 1, 0, /* 0x40 */ 519 {{6, 6}, 520 {0, 0}, 521 {0, 0}, 522 {0, 0} 523 } 524 }, 525 {1, 0, 2, 0, /* 0x41 */ 526 {{0, 0}, 527 {6, 6}, 528 {0, 0}, 529 {0, 0} 530 } 531 }, 532 {0, 0, 2, 0, /* 0x42 */ 533 {{1, 1}, 534 {6, 6}, 535 {0, 0}, 536 {0, 0} 537 } 538 }, 539 {1, 0, 2, 0, /* 0x43 */ 540 {{0, 1}, 541 {6, 6}, 542 {0, 0}, 543 {0, 0} 544 } 545 }, 546 {0, 0, 2, 0, /* 0x44 */ 547 {{2, 2}, 548 {6, 6}, 549 {0, 0}, 550 {0, 0} 551 } 552 }, 553 {1, 0, 3, 0, /* 0x45 */ 554 {{0, 0}, 555 {2, 2}, 556 {6, 6}, 557 {0, 0} 558 } 559 }, 560 {0, 0, 2, 0, /* 0x46 */ 561 {{1, 2}, 562 {6, 6}, 563 {0, 0}, 564 {0, 0} 565 } 566 }, 567 {1, 0, 2, 0, /* 0x47 */ 568 {{0, 2}, 569 {6, 6}, 570 {0, 0}, 571 {0, 0} 572 } 573 }, 574 {0, 0, 2, 0, /* 0x48 */ 575 {{3, 3}, 576 {6, 6}, 577 {0, 0}, 578 {0, 0} 579 } 580 }, 581 {1, 0, 3, 0, /* 0x49 */ 582 {{0, 0}, 583 {3, 3}, 584 {6, 6}, 585 {0, 0} 586 } 587 }, 588 {0, 0, 3, 0, /* 0x4a */ 589 {{1, 1}, 590 {3, 3}, 591 {6, 6}, 592 {0, 0} 593 } 594 }, 595 {1, 0, 3, 0, /* 0x4b */ 596 {{0, 1}, 597 {3, 3}, 598 {6, 6}, 599 {0, 0} 600 } 601 }, 602 {0, 0, 2, 0, /* 0x4c */ 603 {{2, 3}, 604 {6, 6}, 605 {0, 0}, 606 {0, 0} 607 } 608 }, 609 {1, 0, 3, 0, /* 0x4d */ 610 {{0, 0}, 611 {2, 3}, 612 {6, 6}, 613 {0, 0} 614 } 615 }, 616 {0, 0, 2, 0, /* 0x4e */ 617 {{1, 3}, 618 {6, 6}, 619 {0, 0}, 620 {0, 0} 621 } 622 }, 623 {1, 0, 2, 0, /* 0x4f */ 624 {{0, 3}, 625 {6, 6}, 626 {0, 0}, 627 {0, 0} 628 } 629 }, 630 {0, 0, 2, 0, /* 0x50 */ 631 {{4, 4}, 632 {6, 6}, 633 {0, 0}, 634 {0, 0} 635 } 636 }, 637 {1, 0, 3, 0, /* 0x51 */ 638 {{0, 0}, 639 {4, 4}, 640 {6, 6}, 641 {0, 0} 642 } 643 }, 644 {0, 0, 3, 0, /* 0x52 */ 645 {{1, 1}, 646 {4, 4}, 647 {6, 6}, 648 {0, 0} 649 } 650 }, 651 {1, 0, 3, 0, /* 0x53 */ 652 {{0, 1}, 653 {4, 4}, 654 {6, 6}, 655 {0, 0} 656 } 657 }, 658 {0, 0, 3, 0, /* 0x54 */ 659 {{2, 2}, 660 {4, 4}, 661 {6, 6}, 662 {0, 0} 663 } 664 }, 665 {1, 0, 4, 0, /* 0x55 */ 666 {{0, 0}, 667 {2, 2}, 668 {4, 4}, 669 {6, 6} 670 } 671 }, 672 {0, 0, 3, 0, /* 0x56 */ 673 {{1, 2}, 674 {4, 4}, 675 {6, 6}, 676 {0, 0} 677 } 678 }, 679 {1, 0, 3, 0, /* 0x57 */ 680 {{0, 2}, 681 {4, 4}, 682 {6, 6}, 683 {0, 0} 684 } 685 }, 686 {0, 0, 2, 0, /* 0x58 */ 687 {{3, 4}, 688 {6, 6}, 689 {0, 0}, 690 {0, 0} 691 } 692 }, 693 {1, 0, 3, 0, /* 0x59 */ 694 {{0, 0}, 695 {3, 4}, 696 {6, 6}, 697 {0, 0} 698 } 699 }, 700 {0, 0, 3, 0, /* 0x5a */ 701 {{1, 1}, 702 {3, 4}, 703 {6, 6}, 704 {0, 0} 705 } 706 }, 707 {1, 0, 3, 0, /* 0x5b */ 708 {{0, 1}, 709 {3, 4}, 710 {6, 6}, 711 {0, 0} 712 } 713 }, 714 {0, 0, 2, 0, /* 0x5c */ 715 {{2, 4}, 716 {6, 6}, 717 {0, 0}, 718 {0, 0} 719 } 720 }, 721 {1, 0, 3, 0, /* 0x5d */ 722 {{0, 0}, 723 {2, 4}, 724 {6, 6}, 725 {0, 0} 726 } 727 }, 728 {0, 0, 2, 0, /* 0x5e */ 729 {{1, 4}, 730 {6, 6}, 731 {0, 0}, 732 {0, 0} 733 } 734 }, 735 {1, 0, 2, 0, /* 0x5f */ 736 {{0, 4}, 737 {6, 6}, 738 {0, 0}, 739 {0, 0} 740 } 741 }, 742 {0, 0, 1, 0, /* 0x60 */ 743 {{5, 6}, 744 {0, 0}, 745 {0, 0}, 746 {0, 0} 747 } 748 }, 749 {1, 0, 2, 0, /* 0x61 */ 750 {{0, 0}, 751 {5, 6}, 752 {0, 0}, 753 {0, 0} 754 } 755 }, 756 {0, 0, 2, 0, /* 0x62 */ 757 {{1, 1}, 758 {5, 6}, 759 {0, 0}, 760 {0, 0} 761 } 762 }, 763 {1, 0, 2, 0, /* 0x63 */ 764 {{0, 1}, 765 {5, 6}, 766 {0, 0}, 767 {0, 0} 768 } 769 }, 770 {0, 0, 2, 0, /* 0x64 */ 771 {{2, 2}, 772 {5, 6}, 773 {0, 0}, 774 {0, 0} 775 } 776 }, 777 {1, 0, 3, 0, /* 0x65 */ 778 {{0, 0}, 779 {2, 2}, 780 {5, 6}, 781 {0, 0} 782 } 783 }, 784 {0, 0, 2, 0, /* 0x66 */ 785 {{1, 2}, 786 {5, 6}, 787 {0, 0}, 788 {0, 0} 789 } 790 }, 791 {1, 0, 2, 0, /* 0x67 */ 792 {{0, 2}, 793 {5, 6}, 794 {0, 0}, 795 {0, 0} 796 } 797 }, 798 {0, 0, 2, 0, /* 0x68 */ 799 {{3, 3}, 800 {5, 6}, 801 {0, 0}, 802 {0, 0} 803 } 804 }, 805 {1, 0, 3, 0, /* 0x69 */ 806 {{0, 0}, 807 {3, 3}, 808 {5, 6}, 809 {0, 0} 810 } 811 }, 812 {0, 0, 3, 0, /* 0x6a */ 813 {{1, 1}, 814 {3, 3}, 815 {5, 6}, 816 {0, 0} 817 } 818 }, 819 {1, 0, 3, 0, /* 0x6b */ 820 {{0, 1}, 821 {3, 3}, 822 {5, 6}, 823 {0, 0} 824 } 825 }, 826 {0, 0, 2, 0, /* 0x6c */ 827 {{2, 3}, 828 {5, 6}, 829 {0, 0}, 830 {0, 0} 831 } 832 }, 833 {1, 0, 3, 0, /* 0x6d */ 834 {{0, 0}, 835 {2, 3}, 836 {5, 6}, 837 {0, 0} 838 } 839 }, 840 {0, 0, 2, 0, /* 0x6e */ 841 {{1, 3}, 842 {5, 6}, 843 {0, 0}, 844 {0, 0} 845 } 846 }, 847 {1, 0, 2, 0, /* 0x6f */ 848 {{0, 3}, 849 {5, 6}, 850 {0, 0}, 851 {0, 0} 852 } 853 }, 854 {0, 0, 1, 0, /* 0x70 */ 855 {{4, 6}, 856 {0, 0}, 857 {0, 0}, 858 {0, 0} 859 } 860 }, 861 {1, 0, 2, 0, /* 0x71 */ 862 {{0, 0}, 863 {4, 6}, 864 {0, 0}, 865 {0, 0} 866 } 867 }, 868 {0, 0, 2, 0, /* 0x72 */ 869 {{1, 1}, 870 {4, 6}, 871 {0, 0}, 872 {0, 0} 873 } 874 }, 875 {1, 0, 2, 0, /* 0x73 */ 876 {{0, 1}, 877 {4, 6}, 878 {0, 0}, 879 {0, 0} 880 } 881 }, 882 {0, 0, 2, 0, /* 0x74 */ 883 {{2, 2}, 884 {4, 6}, 885 {0, 0}, 886 {0, 0} 887 } 888 }, 889 {1, 0, 3, 0, /* 0x75 */ 890 {{0, 0}, 891 {2, 2}, 892 {4, 6}, 893 {0, 0} 894 } 895 }, 896 {0, 0, 2, 0, /* 0x76 */ 897 {{1, 2}, 898 {4, 6}, 899 {0, 0}, 900 {0, 0} 901 } 902 }, 903 {1, 0, 2, 0, /* 0x77 */ 904 {{0, 2}, 905 {4, 6}, 906 {0, 0}, 907 {0, 0} 908 } 909 }, 910 {0, 0, 1, 0, /* 0x78 */ 911 {{3, 6}, 912 {0, 0}, 913 {0, 0}, 914 {0, 0} 915 } 916 }, 917 {1, 0, 2, 0, /* 0x79 */ 918 {{0, 0}, 919 {3, 6}, 920 {0, 0}, 921 {0, 0} 922 } 923 }, 924 {0, 0, 2, 0, /* 0x7a */ 925 {{1, 1}, 926 {3, 6}, 927 {0, 0}, 928 {0, 0} 929 } 930 }, 931 {1, 0, 2, 0, /* 0x7b */ 932 {{0, 1}, 933 {3, 6}, 934 {0, 0}, 935 {0, 0} 936 } 937 }, 938 {0, 0, 1, 0, /* 0x7c */ 939 {{2, 6}, 940 {0, 0}, 941 {0, 0}, 942 {0, 0} 943 } 944 }, 945 {1, 0, 2, 0, /* 0x7d */ 946 {{0, 0}, 947 {2, 6}, 948 {0, 0}, 949 {0, 0} 950 } 951 }, 952 {0, 0, 1, 0, /* 0x7e */ 953 {{1, 6}, 954 {0, 0}, 955 {0, 0}, 956 {0, 0} 957 } 958 }, 959 {1, 0, 1, 0, /* 0x7f */ 960 {{0, 6}, 961 {0, 0}, 962 {0, 0}, 963 {0, 0} 964 } 965 }, 966 {0, 1, 1, 0, /* 0x80 */ 967 {{7, 7}, 968 {0, 0}, 969 {0, 0}, 970 {0, 0} 971 } 972 }, 973 {1, 1, 2, 0, /* 0x81 */ 974 {{0, 0}, 975 {7, 7}, 976 {0, 0}, 977 {0, 0} 978 } 979 }, 980 {0, 1, 2, 0, /* 0x82 */ 981 {{1, 1}, 982 {7, 7}, 983 {0, 0}, 984 {0, 0} 985 } 986 }, 987 {1, 1, 2, 0, /* 0x83 */ 988 {{0, 1}, 989 {7, 7}, 990 {0, 0}, 991 {0, 0} 992 } 993 }, 994 {0, 1, 2, 0, /* 0x84 */ 995 {{2, 2}, 996 {7, 7}, 997 {0, 0}, 998 {0, 0} 999 } 1000 }, 1001 {1, 1, 3, 0, /* 0x85 */ 1002 {{0, 0}, 1003 {2, 2}, 1004 {7, 7}, 1005 {0, 0} 1006 } 1007 }, 1008 {0, 1, 2, 0, /* 0x86 */ 1009 {{1, 2}, 1010 {7, 7}, 1011 {0, 0}, 1012 {0, 0} 1013 } 1014 }, 1015 {1, 1, 2, 0, /* 0x87 */ 1016 {{0, 2}, 1017 {7, 7}, 1018 {0, 0}, 1019 {0, 0} 1020 } 1021 }, 1022 {0, 1, 2, 0, /* 0x88 */ 1023 {{3, 3}, 1024 {7, 7}, 1025 {0, 0}, 1026 {0, 0} 1027 } 1028 }, 1029 {1, 1, 3, 0, /* 0x89 */ 1030 {{0, 0}, 1031 {3, 3}, 1032 {7, 7}, 1033 {0, 0} 1034 } 1035 }, 1036 {0, 1, 3, 0, /* 0x8a */ 1037 {{1, 1}, 1038 {3, 3}, 1039 {7, 7}, 1040 {0, 0} 1041 } 1042 }, 1043 {1, 1, 3, 0, /* 0x8b */ 1044 {{0, 1}, 1045 {3, 3}, 1046 {7, 7}, 1047 {0, 0} 1048 } 1049 }, 1050 {0, 1, 2, 0, /* 0x8c */ 1051 {{2, 3}, 1052 {7, 7}, 1053 {0, 0}, 1054 {0, 0} 1055 } 1056 }, 1057 {1, 1, 3, 0, /* 0x8d */ 1058 {{0, 0}, 1059 {2, 3}, 1060 {7, 7}, 1061 {0, 0} 1062 } 1063 }, 1064 {0, 1, 2, 0, /* 0x8e */ 1065 {{1, 3}, 1066 {7, 7}, 1067 {0, 0}, 1068 {0, 0} 1069 } 1070 }, 1071 {1, 1, 2, 0, /* 0x8f */ 1072 {{0, 3}, 1073 {7, 7}, 1074 {0, 0}, 1075 {0, 0} 1076 } 1077 }, 1078 {0, 1, 2, 0, /* 0x90 */ 1079 {{4, 4}, 1080 {7, 7}, 1081 {0, 0}, 1082 {0, 0} 1083 } 1084 }, 1085 {1, 1, 3, 0, /* 0x91 */ 1086 {{0, 0}, 1087 {4, 4}, 1088 {7, 7}, 1089 {0, 0} 1090 } 1091 }, 1092 {0, 1, 3, 0, /* 0x92 */ 1093 {{1, 1}, 1094 {4, 4}, 1095 {7, 7}, 1096 {0, 0} 1097 } 1098 }, 1099 {1, 1, 3, 0, /* 0x93 */ 1100 {{0, 1}, 1101 {4, 4}, 1102 {7, 7}, 1103 {0, 0} 1104 } 1105 }, 1106 {0, 1, 3, 0, /* 0x94 */ 1107 {{2, 2}, 1108 {4, 4}, 1109 {7, 7}, 1110 {0, 0} 1111 } 1112 }, 1113 {1, 1, 4, 0, /* 0x95 */ 1114 {{0, 0}, 1115 {2, 2}, 1116 {4, 4}, 1117 {7, 7} 1118 } 1119 }, 1120 {0, 1, 3, 0, /* 0x96 */ 1121 {{1, 2}, 1122 {4, 4}, 1123 {7, 7}, 1124 {0, 0} 1125 } 1126 }, 1127 {1, 1, 3, 0, /* 0x97 */ 1128 {{0, 2}, 1129 {4, 4}, 1130 {7, 7}, 1131 {0, 0} 1132 } 1133 }, 1134 {0, 1, 2, 0, /* 0x98 */ 1135 {{3, 4}, 1136 {7, 7}, 1137 {0, 0}, 1138 {0, 0} 1139 } 1140 }, 1141 {1, 1, 3, 0, /* 0x99 */ 1142 {{0, 0}, 1143 {3, 4}, 1144 {7, 7}, 1145 {0, 0} 1146 } 1147 }, 1148 {0, 1, 3, 0, /* 0x9a */ 1149 {{1, 1}, 1150 {3, 4}, 1151 {7, 7}, 1152 {0, 0} 1153 } 1154 }, 1155 {1, 1, 3, 0, /* 0x9b */ 1156 {{0, 1}, 1157 {3, 4}, 1158 {7, 7}, 1159 {0, 0} 1160 } 1161 }, 1162 {0, 1, 2, 0, /* 0x9c */ 1163 {{2, 4}, 1164 {7, 7}, 1165 {0, 0}, 1166 {0, 0} 1167 } 1168 }, 1169 {1, 1, 3, 0, /* 0x9d */ 1170 {{0, 0}, 1171 {2, 4}, 1172 {7, 7}, 1173 {0, 0} 1174 } 1175 }, 1176 {0, 1, 2, 0, /* 0x9e */ 1177 {{1, 4}, 1178 {7, 7}, 1179 {0, 0}, 1180 {0, 0} 1181 } 1182 }, 1183 {1, 1, 2, 0, /* 0x9f */ 1184 {{0, 4}, 1185 {7, 7}, 1186 {0, 0}, 1187 {0, 0} 1188 } 1189 }, 1190 {0, 1, 2, 0, /* 0xa0 */ 1191 {{5, 5}, 1192 {7, 7}, 1193 {0, 0}, 1194 {0, 0} 1195 } 1196 }, 1197 {1, 1, 3, 0, /* 0xa1 */ 1198 {{0, 0}, 1199 {5, 5}, 1200 {7, 7}, 1201 {0, 0} 1202 } 1203 }, 1204 {0, 1, 3, 0, /* 0xa2 */ 1205 {{1, 1}, 1206 {5, 5}, 1207 {7, 7}, 1208 {0, 0} 1209 } 1210 }, 1211 {1, 1, 3, 0, /* 0xa3 */ 1212 {{0, 1}, 1213 {5, 5}, 1214 {7, 7}, 1215 {0, 0} 1216 } 1217 }, 1218 {0, 1, 3, 0, /* 0xa4 */ 1219 {{2, 2}, 1220 {5, 5}, 1221 {7, 7}, 1222 {0, 0} 1223 } 1224 }, 1225 {1, 1, 4, 0, /* 0xa5 */ 1226 {{0, 0}, 1227 {2, 2}, 1228 {5, 5}, 1229 {7, 7} 1230 } 1231 }, 1232 {0, 1, 3, 0, /* 0xa6 */ 1233 {{1, 2}, 1234 {5, 5}, 1235 {7, 7}, 1236 {0, 0} 1237 } 1238 }, 1239 {1, 1, 3, 0, /* 0xa7 */ 1240 {{0, 2}, 1241 {5, 5}, 1242 {7, 7}, 1243 {0, 0} 1244 } 1245 }, 1246 {0, 1, 3, 0, /* 0xa8 */ 1247 {{3, 3}, 1248 {5, 5}, 1249 {7, 7}, 1250 {0, 0} 1251 } 1252 }, 1253 {1, 1, 4, 0, /* 0xa9 */ 1254 {{0, 0}, 1255 {3, 3}, 1256 {5, 5}, 1257 {7, 7} 1258 } 1259 }, 1260 {0, 1, 4, 0, /* 0xaa */ 1261 {{1, 1}, 1262 {3, 3}, 1263 {5, 5}, 1264 {7, 7} 1265 } 1266 }, 1267 {1, 1, 4, 0, /* 0xab */ 1268 {{0, 1}, 1269 {3, 3}, 1270 {5, 5}, 1271 {7, 7} 1272 } 1273 }, 1274 {0, 1, 3, 0, /* 0xac */ 1275 {{2, 3}, 1276 {5, 5}, 1277 {7, 7}, 1278 {0, 0} 1279 } 1280 }, 1281 {1, 1, 4, 0, /* 0xad */ 1282 {{0, 0}, 1283 {2, 3}, 1284 {5, 5}, 1285 {7, 7} 1286 } 1287 }, 1288 {0, 1, 3, 0, /* 0xae */ 1289 {{1, 3}, 1290 {5, 5}, 1291 {7, 7}, 1292 {0, 0} 1293 } 1294 }, 1295 {1, 1, 3, 0, /* 0xaf */ 1296 {{0, 3}, 1297 {5, 5}, 1298 {7, 7}, 1299 {0, 0} 1300 } 1301 }, 1302 {0, 1, 2, 0, /* 0xb0 */ 1303 {{4, 5}, 1304 {7, 7}, 1305 {0, 0}, 1306 {0, 0} 1307 } 1308 }, 1309 {1, 1, 3, 0, /* 0xb1 */ 1310 {{0, 0}, 1311 {4, 5}, 1312 {7, 7}, 1313 {0, 0} 1314 } 1315 }, 1316 {0, 1, 3, 0, /* 0xb2 */ 1317 {{1, 1}, 1318 {4, 5}, 1319 {7, 7}, 1320 {0, 0} 1321 } 1322 }, 1323 {1, 1, 3, 0, /* 0xb3 */ 1324 {{0, 1}, 1325 {4, 5}, 1326 {7, 7}, 1327 {0, 0} 1328 } 1329 }, 1330 {0, 1, 3, 0, /* 0xb4 */ 1331 {{2, 2}, 1332 {4, 5}, 1333 {7, 7}, 1334 {0, 0} 1335 } 1336 }, 1337 {1, 1, 4, 0, /* 0xb5 */ 1338 {{0, 0}, 1339 {2, 2}, 1340 {4, 5}, 1341 {7, 7} 1342 } 1343 }, 1344 {0, 1, 3, 0, /* 0xb6 */ 1345 {{1, 2}, 1346 {4, 5}, 1347 {7, 7}, 1348 {0, 0} 1349 } 1350 }, 1351 {1, 1, 3, 0, /* 0xb7 */ 1352 {{0, 2}, 1353 {4, 5}, 1354 {7, 7}, 1355 {0, 0} 1356 } 1357 }, 1358 {0, 1, 2, 0, /* 0xb8 */ 1359 {{3, 5}, 1360 {7, 7}, 1361 {0, 0}, 1362 {0, 0} 1363 } 1364 }, 1365 {1, 1, 3, 0, /* 0xb9 */ 1366 {{0, 0}, 1367 {3, 5}, 1368 {7, 7}, 1369 {0, 0} 1370 } 1371 }, 1372 {0, 1, 3, 0, /* 0xba */ 1373 {{1, 1}, 1374 {3, 5}, 1375 {7, 7}, 1376 {0, 0} 1377 } 1378 }, 1379 {1, 1, 3, 0, /* 0xbb */ 1380 {{0, 1}, 1381 {3, 5}, 1382 {7, 7}, 1383 {0, 0} 1384 } 1385 }, 1386 {0, 1, 2, 0, /* 0xbc */ 1387 {{2, 5}, 1388 {7, 7}, 1389 {0, 0}, 1390 {0, 0} 1391 } 1392 }, 1393 {1, 1, 3, 0, /* 0xbd */ 1394 {{0, 0}, 1395 {2, 5}, 1396 {7, 7}, 1397 {0, 0} 1398 } 1399 }, 1400 {0, 1, 2, 0, /* 0xbe */ 1401 {{1, 5}, 1402 {7, 7}, 1403 {0, 0}, 1404 {0, 0} 1405 } 1406 }, 1407 {1, 1, 2, 0, /* 0xbf */ 1408 {{0, 5}, 1409 {7, 7}, 1410 {0, 0}, 1411 {0, 0} 1412 } 1413 }, 1414 {0, 1, 1, 0, /* 0xc0 */ 1415 {{6, 7}, 1416 {0, 0}, 1417 {0, 0}, 1418 {0, 0} 1419 } 1420 }, 1421 {1, 1, 2, 0, /* 0xc1 */ 1422 {{0, 0}, 1423 {6, 7}, 1424 {0, 0}, 1425 {0, 0} 1426 } 1427 }, 1428 {0, 1, 2, 0, /* 0xc2 */ 1429 {{1, 1}, 1430 {6, 7}, 1431 {0, 0}, 1432 {0, 0} 1433 } 1434 }, 1435 {1, 1, 2, 0, /* 0xc3 */ 1436 {{0, 1}, 1437 {6, 7}, 1438 {0, 0}, 1439 {0, 0} 1440 } 1441 }, 1442 {0, 1, 2, 0, /* 0xc4 */ 1443 {{2, 2}, 1444 {6, 7}, 1445 {0, 0}, 1446 {0, 0} 1447 } 1448 }, 1449 {1, 1, 3, 0, /* 0xc5 */ 1450 {{0, 0}, 1451 {2, 2}, 1452 {6, 7}, 1453 {0, 0} 1454 } 1455 }, 1456 {0, 1, 2, 0, /* 0xc6 */ 1457 {{1, 2}, 1458 {6, 7}, 1459 {0, 0}, 1460 {0, 0} 1461 } 1462 }, 1463 {1, 1, 2, 0, /* 0xc7 */ 1464 {{0, 2}, 1465 {6, 7}, 1466 {0, 0}, 1467 {0, 0} 1468 } 1469 }, 1470 {0, 1, 2, 0, /* 0xc8 */ 1471 {{3, 3}, 1472 {6, 7}, 1473 {0, 0}, 1474 {0, 0} 1475 } 1476 }, 1477 {1, 1, 3, 0, /* 0xc9 */ 1478 {{0, 0}, 1479 {3, 3}, 1480 {6, 7}, 1481 {0, 0} 1482 } 1483 }, 1484 {0, 1, 3, 0, /* 0xca */ 1485 {{1, 1}, 1486 {3, 3}, 1487 {6, 7}, 1488 {0, 0} 1489 } 1490 }, 1491 {1, 1, 3, 0, /* 0xcb */ 1492 {{0, 1}, 1493 {3, 3}, 1494 {6, 7}, 1495 {0, 0} 1496 } 1497 }, 1498 {0, 1, 2, 0, /* 0xcc */ 1499 {{2, 3}, 1500 {6, 7}, 1501 {0, 0}, 1502 {0, 0} 1503 } 1504 }, 1505 {1, 1, 3, 0, /* 0xcd */ 1506 {{0, 0}, 1507 {2, 3}, 1508 {6, 7}, 1509 {0, 0} 1510 } 1511 }, 1512 {0, 1, 2, 0, /* 0xce */ 1513 {{1, 3}, 1514 {6, 7}, 1515 {0, 0}, 1516 {0, 0} 1517 } 1518 }, 1519 {1, 1, 2, 0, /* 0xcf */ 1520 {{0, 3}, 1521 {6, 7}, 1522 {0, 0}, 1523 {0, 0} 1524 } 1525 }, 1526 {0, 1, 2, 0, /* 0xd0 */ 1527 {{4, 4}, 1528 {6, 7}, 1529 {0, 0}, 1530 {0, 0} 1531 } 1532 }, 1533 {1, 1, 3, 0, /* 0xd1 */ 1534 {{0, 0}, 1535 {4, 4}, 1536 {6, 7}, 1537 {0, 0} 1538 } 1539 }, 1540 {0, 1, 3, 0, /* 0xd2 */ 1541 {{1, 1}, 1542 {4, 4}, 1543 {6, 7}, 1544 {0, 0} 1545 } 1546 }, 1547 {1, 1, 3, 0, /* 0xd3 */ 1548 {{0, 1}, 1549 {4, 4}, 1550 {6, 7}, 1551 {0, 0} 1552 } 1553 }, 1554 {0, 1, 3, 0, /* 0xd4 */ 1555 {{2, 2}, 1556 {4, 4}, 1557 {6, 7}, 1558 {0, 0} 1559 } 1560 }, 1561 {1, 1, 4, 0, /* 0xd5 */ 1562 {{0, 0}, 1563 {2, 2}, 1564 {4, 4}, 1565 {6, 7} 1566 } 1567 }, 1568 {0, 1, 3, 0, /* 0xd6 */ 1569 {{1, 2}, 1570 {4, 4}, 1571 {6, 7}, 1572 {0, 0} 1573 } 1574 }, 1575 {1, 1, 3, 0, /* 0xd7 */ 1576 {{0, 2}, 1577 {4, 4}, 1578 {6, 7}, 1579 {0, 0} 1580 } 1581 }, 1582 {0, 1, 2, 0, /* 0xd8 */ 1583 {{3, 4}, 1584 {6, 7}, 1585 {0, 0}, 1586 {0, 0} 1587 } 1588 }, 1589 {1, 1, 3, 0, /* 0xd9 */ 1590 {{0, 0}, 1591 {3, 4}, 1592 {6, 7}, 1593 {0, 0} 1594 } 1595 }, 1596 {0, 1, 3, 0, /* 0xda */ 1597 {{1, 1}, 1598 {3, 4}, 1599 {6, 7}, 1600 {0, 0} 1601 } 1602 }, 1603 {1, 1, 3, 0, /* 0xdb */ 1604 {{0, 1}, 1605 {3, 4}, 1606 {6, 7}, 1607 {0, 0} 1608 } 1609 }, 1610 {0, 1, 2, 0, /* 0xdc */ 1611 {{2, 4}, 1612 {6, 7}, 1613 {0, 0}, 1614 {0, 0} 1615 } 1616 }, 1617 {1, 1, 3, 0, /* 0xdd */ 1618 {{0, 0}, 1619 {2, 4}, 1620 {6, 7}, 1621 {0, 0} 1622 } 1623 }, 1624 {0, 1, 2, 0, /* 0xde */ 1625 {{1, 4}, 1626 {6, 7}, 1627 {0, 0}, 1628 {0, 0} 1629 } 1630 }, 1631 {1, 1, 2, 0, /* 0xdf */ 1632 {{0, 4}, 1633 {6, 7}, 1634 {0, 0}, 1635 {0, 0} 1636 } 1637 }, 1638 {0, 1, 1, 0, /* 0xe0 */ 1639 {{5, 7}, 1640 {0, 0}, 1641 {0, 0}, 1642 {0, 0} 1643 } 1644 }, 1645 {1, 1, 2, 0, /* 0xe1 */ 1646 {{0, 0}, 1647 {5, 7}, 1648 {0, 0}, 1649 {0, 0} 1650 } 1651 }, 1652 {0, 1, 2, 0, /* 0xe2 */ 1653 {{1, 1}, 1654 {5, 7}, 1655 {0, 0}, 1656 {0, 0} 1657 } 1658 }, 1659 {1, 1, 2, 0, /* 0xe3 */ 1660 {{0, 1}, 1661 {5, 7}, 1662 {0, 0}, 1663 {0, 0} 1664 } 1665 }, 1666 {0, 1, 2, 0, /* 0xe4 */ 1667 {{2, 2}, 1668 {5, 7}, 1669 {0, 0}, 1670 {0, 0} 1671 } 1672 }, 1673 {1, 1, 3, 0, /* 0xe5 */ 1674 {{0, 0}, 1675 {2, 2}, 1676 {5, 7}, 1677 {0, 0} 1678 } 1679 }, 1680 {0, 1, 2, 0, /* 0xe6 */ 1681 {{1, 2}, 1682 {5, 7}, 1683 {0, 0}, 1684 {0, 0} 1685 } 1686 }, 1687 {1, 1, 2, 0, /* 0xe7 */ 1688 {{0, 2}, 1689 {5, 7}, 1690 {0, 0}, 1691 {0, 0} 1692 } 1693 }, 1694 {0, 1, 2, 0, /* 0xe8 */ 1695 {{3, 3}, 1696 {5, 7}, 1697 {0, 0}, 1698 {0, 0} 1699 } 1700 }, 1701 {1, 1, 3, 0, /* 0xe9 */ 1702 {{0, 0}, 1703 {3, 3}, 1704 {5, 7}, 1705 {0, 0} 1706 } 1707 }, 1708 {0, 1, 3, 0, /* 0xea */ 1709 {{1, 1}, 1710 {3, 3}, 1711 {5, 7}, 1712 {0, 0} 1713 } 1714 }, 1715 {1, 1, 3, 0, /* 0xeb */ 1716 {{0, 1}, 1717 {3, 3}, 1718 {5, 7}, 1719 {0, 0} 1720 } 1721 }, 1722 {0, 1, 2, 0, /* 0xec */ 1723 {{2, 3}, 1724 {5, 7}, 1725 {0, 0}, 1726 {0, 0} 1727 } 1728 }, 1729 {1, 1, 3, 0, /* 0xed */ 1730 {{0, 0}, 1731 {2, 3}, 1732 {5, 7}, 1733 {0, 0} 1734 } 1735 }, 1736 {0, 1, 2, 0, /* 0xee */ 1737 {{1, 3}, 1738 {5, 7}, 1739 {0, 0}, 1740 {0, 0} 1741 } 1742 }, 1743 {1, 1, 2, 0, /* 0xef */ 1744 {{0, 3}, 1745 {5, 7}, 1746 {0, 0}, 1747 {0, 0} 1748 } 1749 }, 1750 {0, 1, 1, 0, /* 0xf0 */ 1751 {{4, 7}, 1752 {0, 0}, 1753 {0, 0}, 1754 {0, 0} 1755 } 1756 }, 1757 {1, 1, 2, 0, /* 0xf1 */ 1758 {{0, 0}, 1759 {4, 7}, 1760 {0, 0}, 1761 {0, 0} 1762 } 1763 }, 1764 {0, 1, 2, 0, /* 0xf2 */ 1765 {{1, 1}, 1766 {4, 7}, 1767 {0, 0}, 1768 {0, 0} 1769 } 1770 }, 1771 {1, 1, 2, 0, /* 0xf3 */ 1772 {{0, 1}, 1773 {4, 7}, 1774 {0, 0}, 1775 {0, 0} 1776 } 1777 }, 1778 {0, 1, 2, 0, /* 0xf4 */ 1779 {{2, 2}, 1780 {4, 7}, 1781 {0, 0}, 1782 {0, 0} 1783 } 1784 }, 1785 {1, 1, 3, 0, /* 0xf5 */ 1786 {{0, 0}, 1787 {2, 2}, 1788 {4, 7}, 1789 {0, 0} 1790 } 1791 }, 1792 {0, 1, 2, 0, /* 0xf6 */ 1793 {{1, 2}, 1794 {4, 7}, 1795 {0, 0}, 1796 {0, 0} 1797 } 1798 }, 1799 {1, 1, 2, 0, /* 0xf7 */ 1800 {{0, 2}, 1801 {4, 7}, 1802 {0, 0}, 1803 {0, 0} 1804 } 1805 }, 1806 {0, 1, 1, 0, /* 0xf8 */ 1807 {{3, 7}, 1808 {0, 0}, 1809 {0, 0}, 1810 {0, 0} 1811 } 1812 }, 1813 {1, 1, 2, 0, /* 0xf9 */ 1814 {{0, 0}, 1815 {3, 7}, 1816 {0, 0}, 1817 {0, 0} 1818 } 1819 }, 1820 {0, 1, 2, 0, /* 0xfa */ 1821 {{1, 1}, 1822 {3, 7}, 1823 {0, 0}, 1824 {0, 0} 1825 } 1826 }, 1827 {1, 1, 2, 0, /* 0xfb */ 1828 {{0, 1}, 1829 {3, 7}, 1830 {0, 0}, 1831 {0, 0} 1832 } 1833 }, 1834 {0, 1, 1, 0, /* 0xfc */ 1835 {{2, 7}, 1836 {0, 0}, 1837 {0, 0}, 1838 {0, 0} 1839 } 1840 }, 1841 {1, 1, 2, 0, /* 0xfd */ 1842 {{0, 0}, 1843 {2, 7}, 1844 {0, 0}, 1845 {0, 0} 1846 } 1847 }, 1848 {0, 1, 1, 0, /* 0xfe */ 1849 {{1, 7}, 1850 {0, 0}, 1851 {0, 0}, 1852 {0, 0} 1853 } 1854 }, 1855 {1, 1, 1, 0, /* 0xff */ 1856 {{0, 7}, 1857 {0, 0}, 1858 {0, 0}, 1859 {0, 0} 1860 } 1861 } 1862 }; 1863 1864 1865 int 1866 sctp_is_address_in_scope(struct sctp_ifa *ifa, 1867 int ipv4_addr_legal, 1868 int ipv6_addr_legal, 1869 int loopback_scope, 1870 int ipv4_local_scope, 1871 int local_scope, 1872 int site_scope, 1873 int do_update) 1874 { 1875 if ((loopback_scope == 0) && 1876 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { 1877 /* 1878 * skip loopback if not in scope * 1879 */ 1880 return (0); 1881 } 1882 switch (ifa->address.sa.sa_family) { 1883 case AF_INET: 1884 if (ipv4_addr_legal) { 1885 struct sockaddr_in *sin; 1886 1887 sin = (struct sockaddr_in *)&ifa->address.sin; 1888 if (sin->sin_addr.s_addr == 0) { 1889 /* not in scope , unspecified */ 1890 return (0); 1891 } 1892 if ((ipv4_local_scope == 0) && 1893 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1894 /* private address not in scope */ 1895 return (0); 1896 } 1897 } else { 1898 return (0); 1899 } 1900 break; 1901 #ifdef INET6 1902 case AF_INET6: 1903 if (ipv6_addr_legal) { 1904 struct sockaddr_in6 *sin6; 1905 1906 /* 1907 * Must update the flags, bummer, which means any 1908 * IFA locks must now be applied HERE <-> 1909 */ 1910 if (do_update) { 1911 sctp_gather_internal_ifa_flags(ifa); 1912 } 1913 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1914 return (0); 1915 } 1916 /* ok to use deprecated addresses? */ 1917 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1918 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1919 /* skip unspecifed addresses */ 1920 return (0); 1921 } 1922 if ( /* (local_scope == 0) && */ 1923 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { 1924 return (0); 1925 } 1926 if ((site_scope == 0) && 1927 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1928 return (0); 1929 } 1930 } else { 1931 return (0); 1932 } 1933 break; 1934 #endif 1935 default: 1936 return (0); 1937 } 1938 return (1); 1939 } 1940 1941 static struct mbuf * 1942 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa) 1943 { 1944 struct sctp_paramhdr *parmh; 1945 struct mbuf *mret; 1946 int len; 1947 1948 if (ifa->address.sa.sa_family == AF_INET) { 1949 len = sizeof(struct sctp_ipv4addr_param); 1950 } else if (ifa->address.sa.sa_family == AF_INET6) { 1951 len = sizeof(struct sctp_ipv6addr_param); 1952 } else { 1953 /* unknown type */ 1954 return (m); 1955 } 1956 if (M_TRAILINGSPACE(m) >= len) { 1957 /* easy side we just drop it on the end */ 1958 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); 1959 mret = m; 1960 } else { 1961 /* Need more space */ 1962 mret = m; 1963 while (SCTP_BUF_NEXT(mret) != NULL) { 1964 mret = SCTP_BUF_NEXT(mret); 1965 } 1966 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA); 1967 if (SCTP_BUF_NEXT(mret) == NULL) { 1968 /* We are hosed, can't add more addresses */ 1969 return (m); 1970 } 1971 mret = SCTP_BUF_NEXT(mret); 1972 parmh = mtod(mret, struct sctp_paramhdr *); 1973 } 1974 /* now add the parameter */ 1975 switch (ifa->address.sa.sa_family) { 1976 case AF_INET: 1977 { 1978 struct sctp_ipv4addr_param *ipv4p; 1979 struct sockaddr_in *sin; 1980 1981 sin = (struct sockaddr_in *)&ifa->address.sin; 1982 ipv4p = (struct sctp_ipv4addr_param *)parmh; 1983 parmh->param_type = htons(SCTP_IPV4_ADDRESS); 1984 parmh->param_length = htons(len); 1985 ipv4p->addr = sin->sin_addr.s_addr; 1986 SCTP_BUF_LEN(mret) += len; 1987 break; 1988 } 1989 #ifdef INET6 1990 case AF_INET6: 1991 { 1992 struct sctp_ipv6addr_param *ipv6p; 1993 struct sockaddr_in6 *sin6; 1994 1995 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1996 ipv6p = (struct sctp_ipv6addr_param *)parmh; 1997 parmh->param_type = htons(SCTP_IPV6_ADDRESS); 1998 parmh->param_length = htons(len); 1999 memcpy(ipv6p->addr, &sin6->sin6_addr, 2000 sizeof(ipv6p->addr)); 2001 /* clear embedded scope in the address */ 2002 in6_clearscope((struct in6_addr *)ipv6p->addr); 2003 SCTP_BUF_LEN(mret) += len; 2004 break; 2005 } 2006 #endif 2007 default: 2008 return (m); 2009 } 2010 return (mret); 2011 } 2012 2013 2014 struct mbuf * 2015 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope, 2016 struct mbuf *m_at, int cnt_inits_to) 2017 { 2018 struct sctp_vrf *vrf = NULL; 2019 int cnt, limit_out = 0, total_count; 2020 uint32_t vrf_id; 2021 2022 vrf_id = inp->def_vrf_id; 2023 SCTP_IPI_ADDR_RLOCK(); 2024 vrf = sctp_find_vrf(vrf_id); 2025 if (vrf == NULL) { 2026 SCTP_IPI_ADDR_RUNLOCK(); 2027 return (m_at); 2028 } 2029 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2030 struct sctp_ifa *sctp_ifap; 2031 struct sctp_ifn *sctp_ifnp; 2032 2033 cnt = cnt_inits_to; 2034 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { 2035 limit_out = 1; 2036 cnt = SCTP_ADDRESS_LIMIT; 2037 goto skip_count; 2038 } 2039 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2040 if ((scope->loopback_scope == 0) && 2041 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2042 /* 2043 * Skip loopback devices if loopback_scope 2044 * not set 2045 */ 2046 continue; 2047 } 2048 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2049 if (sctp_is_address_in_scope(sctp_ifap, 2050 scope->ipv4_addr_legal, 2051 scope->ipv6_addr_legal, 2052 scope->loopback_scope, 2053 scope->ipv4_local_scope, 2054 scope->local_scope, 2055 scope->site_scope, 1) == 0) { 2056 continue; 2057 } 2058 cnt++; 2059 if (cnt > SCTP_ADDRESS_LIMIT) { 2060 break; 2061 } 2062 } 2063 if (cnt > SCTP_ADDRESS_LIMIT) { 2064 break; 2065 } 2066 } 2067 skip_count: 2068 if (cnt > 1) { 2069 total_count = 0; 2070 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2071 cnt = 0; 2072 if ((scope->loopback_scope == 0) && 2073 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2074 /* 2075 * Skip loopback devices if 2076 * loopback_scope not set 2077 */ 2078 continue; 2079 } 2080 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2081 if (sctp_is_address_in_scope(sctp_ifap, 2082 scope->ipv4_addr_legal, 2083 scope->ipv6_addr_legal, 2084 scope->loopback_scope, 2085 scope->ipv4_local_scope, 2086 scope->local_scope, 2087 scope->site_scope, 0) == 0) { 2088 continue; 2089 } 2090 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap); 2091 if (limit_out) { 2092 cnt++; 2093 total_count++; 2094 if (cnt >= 2) { 2095 /* 2096 * two from each 2097 * address 2098 */ 2099 break; 2100 } 2101 if (total_count > SCTP_ADDRESS_LIMIT) { 2102 /* No more addresses */ 2103 break; 2104 } 2105 } 2106 } 2107 } 2108 } 2109 } else { 2110 struct sctp_laddr *laddr; 2111 2112 cnt = cnt_inits_to; 2113 /* First, how many ? */ 2114 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2115 if (laddr->ifa == NULL) { 2116 continue; 2117 } 2118 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2119 /* 2120 * Address being deleted by the system, dont 2121 * list. 2122 */ 2123 continue; 2124 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2125 /* 2126 * Address being deleted on this ep don't 2127 * list. 2128 */ 2129 continue; 2130 } 2131 if (sctp_is_address_in_scope(laddr->ifa, 2132 scope->ipv4_addr_legal, 2133 scope->ipv6_addr_legal, 2134 scope->loopback_scope, 2135 scope->ipv4_local_scope, 2136 scope->local_scope, 2137 scope->site_scope, 1) == 0) { 2138 continue; 2139 } 2140 cnt++; 2141 } 2142 if (cnt > SCTP_ADDRESS_LIMIT) { 2143 limit_out = 1; 2144 } 2145 /* 2146 * To get through a NAT we only list addresses if we have 2147 * more than one. That way if you just bind a single address 2148 * we let the source of the init dictate our address. 2149 */ 2150 if (cnt > 1) { 2151 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2152 cnt = 0; 2153 if (laddr->ifa == NULL) { 2154 continue; 2155 } 2156 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2157 continue; 2158 2159 if (sctp_is_address_in_scope(laddr->ifa, 2160 scope->ipv4_addr_legal, 2161 scope->ipv6_addr_legal, 2162 scope->loopback_scope, 2163 scope->ipv4_local_scope, 2164 scope->local_scope, 2165 scope->site_scope, 0) == 0) { 2166 continue; 2167 } 2168 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa); 2169 cnt++; 2170 if (cnt >= SCTP_ADDRESS_LIMIT) { 2171 break; 2172 } 2173 } 2174 } 2175 } 2176 SCTP_IPI_ADDR_RUNLOCK(); 2177 return (m_at); 2178 } 2179 2180 static struct sctp_ifa * 2181 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, 2182 uint8_t dest_is_loop, 2183 uint8_t dest_is_priv, 2184 sa_family_t fam) 2185 { 2186 uint8_t dest_is_global = 0; 2187 2188 /* dest_is_priv is true if destination is a private address */ 2189 /* dest_is_loop is true if destination is a loopback addresses */ 2190 2191 /** 2192 * Here we determine if its a preferred address. A preferred address 2193 * means it is the same scope or higher scope then the destination. 2194 * L = loopback, P = private, G = global 2195 * ----------------------------------------- 2196 * src | dest | result 2197 * ---------------------------------------- 2198 * L | L | yes 2199 * ----------------------------------------- 2200 * P | L | yes-v4 no-v6 2201 * ----------------------------------------- 2202 * G | L | yes-v4 no-v6 2203 * ----------------------------------------- 2204 * L | P | no 2205 * ----------------------------------------- 2206 * P | P | yes 2207 * ----------------------------------------- 2208 * G | P | no 2209 * ----------------------------------------- 2210 * L | G | no 2211 * ----------------------------------------- 2212 * P | G | no 2213 * ----------------------------------------- 2214 * G | G | yes 2215 * ----------------------------------------- 2216 */ 2217 2218 if (ifa->address.sa.sa_family != fam) { 2219 /* forget mis-matched family */ 2220 return (NULL); 2221 } 2222 if ((dest_is_priv == 0) && (dest_is_loop == 0)) { 2223 dest_is_global = 1; 2224 } 2225 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:"); 2226 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa); 2227 /* Ok the address may be ok */ 2228 if (fam == AF_INET6) { 2229 /* ok to use deprecated addresses? no lets not! */ 2230 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2231 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n"); 2232 return (NULL); 2233 } 2234 if (ifa->src_is_priv && !ifa->src_is_loop) { 2235 if (dest_is_loop) { 2236 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n"); 2237 return (NULL); 2238 } 2239 } 2240 if (ifa->src_is_glob) { 2241 if (dest_is_loop) { 2242 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n"); 2243 return (NULL); 2244 } 2245 } 2246 } 2247 /* 2248 * Now that we know what is what, implement or table this could in 2249 * theory be done slicker (it used to be), but this is 2250 * straightforward and easier to validate :-) 2251 */ 2252 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n", 2253 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob); 2254 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n", 2255 dest_is_loop, dest_is_priv, dest_is_global); 2256 2257 if ((ifa->src_is_loop) && (dest_is_priv)) { 2258 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n"); 2259 return (NULL); 2260 } 2261 if ((ifa->src_is_glob) && (dest_is_priv)) { 2262 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n"); 2263 return (NULL); 2264 } 2265 if ((ifa->src_is_loop) && (dest_is_global)) { 2266 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n"); 2267 return (NULL); 2268 } 2269 if ((ifa->src_is_priv) && (dest_is_global)) { 2270 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n"); 2271 return (NULL); 2272 } 2273 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n"); 2274 /* its a preferred address */ 2275 return (ifa); 2276 } 2277 2278 static struct sctp_ifa * 2279 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, 2280 uint8_t dest_is_loop, 2281 uint8_t dest_is_priv, 2282 sa_family_t fam) 2283 { 2284 uint8_t dest_is_global = 0; 2285 2286 /* 2287 * Here we determine if its a acceptable address. A acceptable 2288 * address means it is the same scope or higher scope but we can 2289 * allow for NAT which means its ok to have a global dest and a 2290 * private src. 2291 * 2292 * L = loopback, P = private, G = global 2293 * ----------------------------------------- src | dest | result 2294 * ----------------------------------------- L | L | yes 2295 * ----------------------------------------- P | L | 2296 * yes-v4 no-v6 ----------------------------------------- G | 2297 * L | yes ----------------------------------------- L | 2298 * P | no ----------------------------------------- P | P 2299 * | yes ----------------------------------------- G | P 2300 * | yes - May not work ----------------------------------------- 2301 * L | G | no ----------------------------------------- P 2302 * | G | yes - May not work 2303 * ----------------------------------------- G | G | yes 2304 * ----------------------------------------- 2305 */ 2306 2307 if (ifa->address.sa.sa_family != fam) { 2308 /* forget non matching family */ 2309 return (NULL); 2310 } 2311 /* Ok the address may be ok */ 2312 if ((dest_is_loop == 0) && (dest_is_priv == 0)) { 2313 dest_is_global = 1; 2314 } 2315 if (fam == AF_INET6) { 2316 /* ok to use deprecated addresses? */ 2317 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2318 return (NULL); 2319 } 2320 if (ifa->src_is_priv) { 2321 /* Special case, linklocal to loop */ 2322 if (dest_is_loop) 2323 return (NULL); 2324 } 2325 } 2326 /* 2327 * Now that we know what is what, implement our table. This could in 2328 * theory be done slicker (it used to be), but this is 2329 * straightforward and easier to validate :-) 2330 */ 2331 if ((ifa->src_is_loop == 1) && (dest_is_priv)) { 2332 return (NULL); 2333 } 2334 if ((ifa->src_is_loop == 1) && (dest_is_global)) { 2335 return (NULL); 2336 } 2337 /* its an acceptable address */ 2338 return (ifa); 2339 } 2340 2341 int 2342 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 2343 { 2344 struct sctp_laddr *laddr; 2345 2346 if (stcb == NULL) { 2347 /* There are no restrictions, no TCB :-) */ 2348 return (0); 2349 } 2350 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 2351 if (laddr->ifa == NULL) { 2352 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2353 __FUNCTION__); 2354 continue; 2355 } 2356 if (laddr->ifa == ifa) { 2357 /* Yes it is on the list */ 2358 return (1); 2359 } 2360 } 2361 return (0); 2362 } 2363 2364 2365 int 2366 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 2367 { 2368 struct sctp_laddr *laddr; 2369 2370 if (ifa == NULL) 2371 return (0); 2372 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2373 if (laddr->ifa == NULL) { 2374 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2375 __FUNCTION__); 2376 continue; 2377 } 2378 if ((laddr->ifa == ifa) && laddr->action == 0) 2379 /* same pointer */ 2380 return (1); 2381 } 2382 return (0); 2383 } 2384 2385 2386 2387 static struct sctp_ifa * 2388 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, 2389 sctp_route_t * ro, 2390 uint32_t vrf_id, 2391 int non_asoc_addr_ok, 2392 uint8_t dest_is_priv, 2393 uint8_t dest_is_loop, 2394 sa_family_t fam) 2395 { 2396 struct sctp_laddr *laddr, *starting_point; 2397 void *ifn; 2398 int resettotop = 0; 2399 struct sctp_ifn *sctp_ifn; 2400 struct sctp_ifa *sctp_ifa, *sifa; 2401 struct sctp_vrf *vrf; 2402 uint32_t ifn_index; 2403 2404 vrf = sctp_find_vrf(vrf_id); 2405 if (vrf == NULL) 2406 return (NULL); 2407 2408 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2409 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2410 sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2411 /* 2412 * first question, is the ifn we will emit on in our list, if so, we 2413 * want such an address. Note that we first looked for a preferred 2414 * address. 2415 */ 2416 if (sctp_ifn) { 2417 /* is a preferred one on the interface we route out? */ 2418 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2419 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2420 (non_asoc_addr_ok == 0)) 2421 continue; 2422 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, 2423 dest_is_loop, 2424 dest_is_priv, fam); 2425 if (sifa == NULL) 2426 continue; 2427 if (sctp_is_addr_in_ep(inp, sifa)) { 2428 atomic_add_int(&sifa->refcount, 1); 2429 return (sifa); 2430 } 2431 } 2432 } 2433 /* 2434 * ok, now we now need to find one on the list of the addresses. We 2435 * can't get one on the emitting interface so let's find first a 2436 * preferred one. If not that an acceptable one otherwise... we 2437 * return NULL. 2438 */ 2439 starting_point = inp->next_addr_touse; 2440 once_again: 2441 if (inp->next_addr_touse == NULL) { 2442 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2443 resettotop = 1; 2444 } 2445 for (laddr = inp->next_addr_touse; laddr; 2446 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2447 if (laddr->ifa == NULL) { 2448 /* address has been removed */ 2449 continue; 2450 } 2451 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2452 /* address is being deleted */ 2453 continue; 2454 } 2455 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, 2456 dest_is_priv, fam); 2457 if (sifa == NULL) 2458 continue; 2459 atomic_add_int(&sifa->refcount, 1); 2460 return (sifa); 2461 } 2462 if (resettotop == 0) { 2463 inp->next_addr_touse = NULL; 2464 goto once_again; 2465 } 2466 inp->next_addr_touse = starting_point; 2467 resettotop = 0; 2468 once_again_too: 2469 if (inp->next_addr_touse == NULL) { 2470 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2471 resettotop = 1; 2472 } 2473 /* ok, what about an acceptable address in the inp */ 2474 for (laddr = inp->next_addr_touse; laddr; 2475 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2476 if (laddr->ifa == NULL) { 2477 /* address has been removed */ 2478 continue; 2479 } 2480 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2481 /* address is being deleted */ 2482 continue; 2483 } 2484 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2485 dest_is_priv, fam); 2486 if (sifa == NULL) 2487 continue; 2488 atomic_add_int(&sifa->refcount, 1); 2489 return (sifa); 2490 } 2491 if (resettotop == 0) { 2492 inp->next_addr_touse = NULL; 2493 goto once_again_too; 2494 } 2495 /* 2496 * no address bound can be a source for the destination we are in 2497 * trouble 2498 */ 2499 return (NULL); 2500 } 2501 2502 2503 2504 static struct sctp_ifa * 2505 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, 2506 struct sctp_tcb *stcb, 2507 struct sctp_nets *net, 2508 sctp_route_t * ro, 2509 uint32_t vrf_id, 2510 uint8_t dest_is_priv, 2511 uint8_t dest_is_loop, 2512 int non_asoc_addr_ok, 2513 sa_family_t fam) 2514 { 2515 struct sctp_laddr *laddr, *starting_point; 2516 void *ifn; 2517 struct sctp_ifn *sctp_ifn; 2518 struct sctp_ifa *sctp_ifa, *sifa; 2519 uint8_t start_at_beginning = 0; 2520 struct sctp_vrf *vrf; 2521 uint32_t ifn_index; 2522 2523 /* 2524 * first question, is the ifn we will emit on in our list, if so, we 2525 * want that one. 2526 */ 2527 vrf = sctp_find_vrf(vrf_id); 2528 if (vrf == NULL) 2529 return (NULL); 2530 2531 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2532 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2533 sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2534 2535 /* 2536 * first question, is the ifn we will emit on in our list? If so, 2537 * we want that one. First we look for a preferred. Second, we go 2538 * for an acceptable. 2539 */ 2540 if (sctp_ifn) { 2541 /* first try for a preferred address on the ep */ 2542 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2543 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2544 continue; 2545 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2546 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2547 if (sifa == NULL) 2548 continue; 2549 if (((non_asoc_addr_ok == 0) && 2550 (sctp_is_addr_restricted(stcb, sifa))) || 2551 (non_asoc_addr_ok && 2552 (sctp_is_addr_restricted(stcb, sifa)) && 2553 (!sctp_is_addr_pending(stcb, sifa)))) { 2554 /* on the no-no list */ 2555 continue; 2556 } 2557 atomic_add_int(&sifa->refcount, 1); 2558 return (sifa); 2559 } 2560 } 2561 /* next try for an acceptable address on the ep */ 2562 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2563 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2564 continue; 2565 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2566 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2567 if (sifa == NULL) 2568 continue; 2569 if (((non_asoc_addr_ok == 0) && 2570 (sctp_is_addr_restricted(stcb, sifa))) || 2571 (non_asoc_addr_ok && 2572 (sctp_is_addr_restricted(stcb, sifa)) && 2573 (!sctp_is_addr_pending(stcb, sifa)))) { 2574 /* on the no-no list */ 2575 continue; 2576 } 2577 atomic_add_int(&sifa->refcount, 1); 2578 return (sifa); 2579 } 2580 } 2581 2582 } 2583 /* 2584 * if we can't find one like that then we must look at all addresses 2585 * bound to pick one at first preferable then secondly acceptable. 2586 */ 2587 starting_point = stcb->asoc.last_used_address; 2588 sctp_from_the_top: 2589 if (stcb->asoc.last_used_address == NULL) { 2590 start_at_beginning = 1; 2591 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2592 } 2593 /* search beginning with the last used address */ 2594 for (laddr = stcb->asoc.last_used_address; laddr; 2595 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2596 if (laddr->ifa == NULL) { 2597 /* address has been removed */ 2598 continue; 2599 } 2600 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2601 /* address is being deleted */ 2602 continue; 2603 } 2604 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2605 if (sifa == NULL) 2606 continue; 2607 if (((non_asoc_addr_ok == 0) && 2608 (sctp_is_addr_restricted(stcb, sifa))) || 2609 (non_asoc_addr_ok && 2610 (sctp_is_addr_restricted(stcb, sifa)) && 2611 (!sctp_is_addr_pending(stcb, sifa)))) { 2612 /* on the no-no list */ 2613 continue; 2614 } 2615 stcb->asoc.last_used_address = laddr; 2616 atomic_add_int(&sifa->refcount, 1); 2617 return (sifa); 2618 } 2619 if (start_at_beginning == 0) { 2620 stcb->asoc.last_used_address = NULL; 2621 goto sctp_from_the_top; 2622 } 2623 /* now try for any higher scope than the destination */ 2624 stcb->asoc.last_used_address = starting_point; 2625 start_at_beginning = 0; 2626 sctp_from_the_top2: 2627 if (stcb->asoc.last_used_address == NULL) { 2628 start_at_beginning = 1; 2629 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2630 } 2631 /* search beginning with the last used address */ 2632 for (laddr = stcb->asoc.last_used_address; laddr; 2633 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2634 if (laddr->ifa == NULL) { 2635 /* address has been removed */ 2636 continue; 2637 } 2638 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2639 /* address is being deleted */ 2640 continue; 2641 } 2642 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2643 dest_is_priv, fam); 2644 if (sifa == NULL) 2645 continue; 2646 if (((non_asoc_addr_ok == 0) && 2647 (sctp_is_addr_restricted(stcb, sifa))) || 2648 (non_asoc_addr_ok && 2649 (sctp_is_addr_restricted(stcb, sifa)) && 2650 (!sctp_is_addr_pending(stcb, sifa)))) { 2651 /* on the no-no list */ 2652 continue; 2653 } 2654 stcb->asoc.last_used_address = laddr; 2655 atomic_add_int(&sifa->refcount, 1); 2656 return (sifa); 2657 } 2658 if (start_at_beginning == 0) { 2659 stcb->asoc.last_used_address = NULL; 2660 goto sctp_from_the_top2; 2661 } 2662 return (NULL); 2663 } 2664 2665 static struct sctp_ifa * 2666 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, 2667 struct sctp_tcb *stcb, 2668 int non_asoc_addr_ok, 2669 uint8_t dest_is_loop, 2670 uint8_t dest_is_priv, 2671 int addr_wanted, 2672 sa_family_t fam, 2673 sctp_route_t * ro 2674 ) 2675 { 2676 struct sctp_ifa *ifa, *sifa; 2677 int num_eligible_addr = 0; 2678 2679 #ifdef INET6 2680 struct sockaddr_in6 sin6, lsa6; 2681 2682 if (fam == AF_INET6) { 2683 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6)); 2684 (void)sa6_recoverscope(&sin6); 2685 } 2686 #endif /* INET6 */ 2687 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2688 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2689 (non_asoc_addr_ok == 0)) 2690 continue; 2691 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2692 dest_is_priv, fam); 2693 if (sifa == NULL) 2694 continue; 2695 #ifdef INET6 2696 if (fam == AF_INET6 && 2697 dest_is_loop && 2698 sifa->src_is_loop && sifa->src_is_priv) { 2699 /* 2700 * don't allow fe80::1 to be a src on loop ::1, we 2701 * don't list it to the peer so we will get an 2702 * abort. 2703 */ 2704 continue; 2705 } 2706 if (fam == AF_INET6 && 2707 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) && 2708 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { 2709 /* 2710 * link-local <-> link-local must belong to the same 2711 * scope. 2712 */ 2713 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6)); 2714 (void)sa6_recoverscope(&lsa6); 2715 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) { 2716 continue; 2717 } 2718 } 2719 #endif /* INET6 */ 2720 2721 /* 2722 * Check if the IPv6 address matches to next-hop. In the 2723 * mobile case, old IPv6 address may be not deleted from the 2724 * interface. Then, the interface has previous and new 2725 * addresses. We should use one corresponding to the 2726 * next-hop. (by micchie) 2727 */ 2728 #ifdef INET6 2729 if (stcb && fam == AF_INET6 && 2730 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 2731 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro) 2732 == 0) { 2733 continue; 2734 } 2735 } 2736 #endif 2737 /* Avoid topologically incorrect IPv4 address */ 2738 if (stcb && fam == AF_INET && 2739 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 2740 if (sctp_v4src_match_nexthop(sifa, ro) == 0) { 2741 continue; 2742 } 2743 } 2744 if (stcb) { 2745 if (sctp_is_address_in_scope(ifa, 2746 stcb->asoc.ipv4_addr_legal, 2747 stcb->asoc.ipv6_addr_legal, 2748 stcb->asoc.loopback_scope, 2749 stcb->asoc.ipv4_local_scope, 2750 stcb->asoc.local_scope, 2751 stcb->asoc.site_scope, 0) == 0) { 2752 continue; 2753 } 2754 if (((non_asoc_addr_ok == 0) && 2755 (sctp_is_addr_restricted(stcb, sifa))) || 2756 (non_asoc_addr_ok && 2757 (sctp_is_addr_restricted(stcb, sifa)) && 2758 (!sctp_is_addr_pending(stcb, sifa)))) { 2759 /* 2760 * It is restricted for some reason.. 2761 * probably not yet added. 2762 */ 2763 continue; 2764 } 2765 } 2766 if (num_eligible_addr >= addr_wanted) { 2767 return (sifa); 2768 } 2769 num_eligible_addr++; 2770 } 2771 return (NULL); 2772 } 2773 2774 2775 static int 2776 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, 2777 struct sctp_tcb *stcb, 2778 int non_asoc_addr_ok, 2779 uint8_t dest_is_loop, 2780 uint8_t dest_is_priv, 2781 sa_family_t fam) 2782 { 2783 struct sctp_ifa *ifa, *sifa; 2784 int num_eligible_addr = 0; 2785 2786 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2787 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2788 (non_asoc_addr_ok == 0)) { 2789 continue; 2790 } 2791 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2792 dest_is_priv, fam); 2793 if (sifa == NULL) { 2794 continue; 2795 } 2796 if (stcb) { 2797 if (sctp_is_address_in_scope(ifa, 2798 stcb->asoc.ipv4_addr_legal, 2799 stcb->asoc.ipv6_addr_legal, 2800 stcb->asoc.loopback_scope, 2801 stcb->asoc.ipv4_local_scope, 2802 stcb->asoc.local_scope, 2803 stcb->asoc.site_scope, 0) == 0) { 2804 continue; 2805 } 2806 if (((non_asoc_addr_ok == 0) && 2807 (sctp_is_addr_restricted(stcb, sifa))) || 2808 (non_asoc_addr_ok && 2809 (sctp_is_addr_restricted(stcb, sifa)) && 2810 (!sctp_is_addr_pending(stcb, sifa)))) { 2811 /* 2812 * It is restricted for some reason.. 2813 * probably not yet added. 2814 */ 2815 continue; 2816 } 2817 } 2818 num_eligible_addr++; 2819 } 2820 return (num_eligible_addr); 2821 } 2822 2823 static struct sctp_ifa * 2824 sctp_choose_boundall(struct sctp_inpcb *inp, 2825 struct sctp_tcb *stcb, 2826 struct sctp_nets *net, 2827 sctp_route_t * ro, 2828 uint32_t vrf_id, 2829 uint8_t dest_is_priv, 2830 uint8_t dest_is_loop, 2831 int non_asoc_addr_ok, 2832 sa_family_t fam) 2833 { 2834 int cur_addr_num = 0, num_preferred = 0; 2835 void *ifn; 2836 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; 2837 struct sctp_ifa *sctp_ifa, *sifa; 2838 uint32_t ifn_index; 2839 struct sctp_vrf *vrf; 2840 2841 /*- 2842 * For boundall we can use any address in the association. 2843 * If non_asoc_addr_ok is set we can use any address (at least in 2844 * theory). So we look for preferred addresses first. If we find one, 2845 * we use it. Otherwise we next try to get an address on the 2846 * interface, which we should be able to do (unless non_asoc_addr_ok 2847 * is false and we are routed out that way). In these cases where we 2848 * can't use the address of the interface we go through all the 2849 * ifn's looking for an address we can use and fill that in. Punting 2850 * means we send back address 0, which will probably cause problems 2851 * actually since then IP will fill in the address of the route ifn, 2852 * which means we probably already rejected it.. i.e. here comes an 2853 * abort :-<. 2854 */ 2855 vrf = sctp_find_vrf(vrf_id); 2856 if (vrf == NULL) 2857 return (NULL); 2858 2859 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2860 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2861 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2862 if (sctp_ifn == NULL) { 2863 /* ?? We don't have this guy ?? */ 2864 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n"); 2865 goto bound_all_plan_b; 2866 } 2867 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n", 2868 ifn_index, sctp_ifn->ifn_name); 2869 2870 if (net) { 2871 cur_addr_num = net->indx_of_eligible_next_to_use; 2872 } 2873 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, 2874 stcb, 2875 non_asoc_addr_ok, 2876 dest_is_loop, 2877 dest_is_priv, fam); 2878 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n", 2879 num_preferred, sctp_ifn->ifn_name); 2880 if (num_preferred == 0) { 2881 /* 2882 * no eligible addresses, we must use some other interface 2883 * address if we can find one. 2884 */ 2885 goto bound_all_plan_b; 2886 } 2887 /* 2888 * Ok we have num_eligible_addr set with how many we can use, this 2889 * may vary from call to call due to addresses being deprecated 2890 * etc.. 2891 */ 2892 if (cur_addr_num >= num_preferred) { 2893 cur_addr_num = 0; 2894 } 2895 /* 2896 * select the nth address from the list (where cur_addr_num is the 2897 * nth) and 0 is the first one, 1 is the second one etc... 2898 */ 2899 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num); 2900 2901 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 2902 dest_is_priv, cur_addr_num, fam, ro); 2903 2904 /* if sctp_ifa is NULL something changed??, fall to plan b. */ 2905 if (sctp_ifa) { 2906 atomic_add_int(&sctp_ifa->refcount, 1); 2907 if (net) { 2908 /* save off where the next one we will want */ 2909 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 2910 } 2911 return (sctp_ifa); 2912 } 2913 /* 2914 * plan_b: Look at all interfaces and find a preferred address. If 2915 * no preferred fall through to plan_c. 2916 */ 2917 bound_all_plan_b: 2918 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n"); 2919 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 2920 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n", 2921 sctp_ifn->ifn_name); 2922 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 2923 /* wrong base scope */ 2924 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n"); 2925 continue; 2926 } 2927 if ((sctp_ifn == looked_at) && looked_at) { 2928 /* already looked at this guy */ 2929 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n"); 2930 continue; 2931 } 2932 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok, 2933 dest_is_loop, dest_is_priv, fam); 2934 SCTPDBG(SCTP_DEBUG_OUTPUT2, 2935 "Found ifn:%p %d preferred source addresses\n", 2936 ifn, num_preferred); 2937 if (num_preferred == 0) { 2938 /* None on this interface. */ 2939 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n"); 2940 continue; 2941 } 2942 SCTPDBG(SCTP_DEBUG_OUTPUT2, 2943 "num preferred:%d on interface:%p cur_addr_num:%d\n", 2944 num_preferred, sctp_ifn, cur_addr_num); 2945 2946 /* 2947 * Ok we have num_eligible_addr set with how many we can 2948 * use, this may vary from call to call due to addresses 2949 * being deprecated etc.. 2950 */ 2951 if (cur_addr_num >= num_preferred) { 2952 cur_addr_num = 0; 2953 } 2954 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 2955 dest_is_priv, cur_addr_num, fam, ro); 2956 if (sifa == NULL) 2957 continue; 2958 if (net) { 2959 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 2960 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n", 2961 cur_addr_num); 2962 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:"); 2963 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 2964 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:"); 2965 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa); 2966 } 2967 atomic_add_int(&sifa->refcount, 1); 2968 return (sifa); 2969 2970 } 2971 2972 /* plan_c: do we have an acceptable address on the emit interface */ 2973 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n"); 2974 if (emit_ifn == NULL) { 2975 goto plan_d; 2976 } 2977 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { 2978 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2979 (non_asoc_addr_ok == 0)) 2980 continue; 2981 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, 2982 dest_is_priv, fam); 2983 if (sifa == NULL) 2984 continue; 2985 if (stcb) { 2986 if (sctp_is_address_in_scope(sifa, 2987 stcb->asoc.ipv4_addr_legal, 2988 stcb->asoc.ipv6_addr_legal, 2989 stcb->asoc.loopback_scope, 2990 stcb->asoc.ipv4_local_scope, 2991 stcb->asoc.local_scope, 2992 stcb->asoc.site_scope, 0) == 0) { 2993 continue; 2994 } 2995 if (((non_asoc_addr_ok == 0) && 2996 (sctp_is_addr_restricted(stcb, sifa))) || 2997 (non_asoc_addr_ok && 2998 (sctp_is_addr_restricted(stcb, sifa)) && 2999 (!sctp_is_addr_pending(stcb, sifa)))) { 3000 /* 3001 * It is restricted for some reason.. 3002 * probably not yet added. 3003 */ 3004 continue; 3005 } 3006 } 3007 atomic_add_int(&sifa->refcount, 1); 3008 return (sifa); 3009 } 3010 plan_d: 3011 /* 3012 * plan_d: We are in trouble. No preferred address on the emit 3013 * interface. And not even a preferred address on all interfaces. Go 3014 * out and see if we can find an acceptable address somewhere 3015 * amongst all interfaces. 3016 */ 3017 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n"); 3018 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 3019 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 3020 /* wrong base scope */ 3021 continue; 3022 } 3023 if ((sctp_ifn == looked_at) && looked_at) 3024 /* already looked at this guy */ 3025 continue; 3026 3027 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 3028 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 3029 (non_asoc_addr_ok == 0)) 3030 continue; 3031 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 3032 dest_is_loop, 3033 dest_is_priv, fam); 3034 if (sifa == NULL) 3035 continue; 3036 if (stcb) { 3037 if (sctp_is_address_in_scope(sifa, 3038 stcb->asoc.ipv4_addr_legal, 3039 stcb->asoc.ipv6_addr_legal, 3040 stcb->asoc.loopback_scope, 3041 stcb->asoc.ipv4_local_scope, 3042 stcb->asoc.local_scope, 3043 stcb->asoc.site_scope, 0) == 0) { 3044 continue; 3045 } 3046 if (((non_asoc_addr_ok == 0) && 3047 (sctp_is_addr_restricted(stcb, sifa))) || 3048 (non_asoc_addr_ok && 3049 (sctp_is_addr_restricted(stcb, sifa)) && 3050 (!sctp_is_addr_pending(stcb, sifa)))) { 3051 /* 3052 * It is restricted for some 3053 * reason.. probably not yet added. 3054 */ 3055 continue; 3056 } 3057 } 3058 atomic_add_int(&sifa->refcount, 1); 3059 return (sifa); 3060 } 3061 } 3062 /* 3063 * Ok we can find NO address to source from that is not on our 3064 * restricted list and non_asoc_address is NOT ok, or it is on our 3065 * restricted list. We can't source to it :-( 3066 */ 3067 return (NULL); 3068 } 3069 3070 3071 3072 /* tcb may be NULL */ 3073 struct sctp_ifa * 3074 sctp_source_address_selection(struct sctp_inpcb *inp, 3075 struct sctp_tcb *stcb, 3076 sctp_route_t * ro, 3077 struct sctp_nets *net, 3078 int non_asoc_addr_ok, uint32_t vrf_id) 3079 { 3080 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; 3081 3082 #ifdef INET6 3083 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; 3084 3085 #endif 3086 struct sctp_ifa *answer; 3087 uint8_t dest_is_priv, dest_is_loop; 3088 sa_family_t fam; 3089 3090 /*- 3091 * Rules: - Find the route if needed, cache if I can. - Look at 3092 * interface address in route, Is it in the bound list. If so we 3093 * have the best source. - If not we must rotate amongst the 3094 * addresses. 3095 * 3096 * Cavets and issues 3097 * 3098 * Do we need to pay attention to scope. We can have a private address 3099 * or a global address we are sourcing or sending to. So if we draw 3100 * it out 3101 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3102 * For V4 3103 * ------------------------------------------ 3104 * source * dest * result 3105 * ----------------------------------------- 3106 * <a> Private * Global * NAT 3107 * ----------------------------------------- 3108 * <b> Private * Private * No problem 3109 * ----------------------------------------- 3110 * <c> Global * Private * Huh, How will this work? 3111 * ----------------------------------------- 3112 * <d> Global * Global * No Problem 3113 *------------------------------------------ 3114 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3115 * For V6 3116 *------------------------------------------ 3117 * source * dest * result 3118 * ----------------------------------------- 3119 * <a> Linklocal * Global * 3120 * ----------------------------------------- 3121 * <b> Linklocal * Linklocal * No problem 3122 * ----------------------------------------- 3123 * <c> Global * Linklocal * Huh, How will this work? 3124 * ----------------------------------------- 3125 * <d> Global * Global * No Problem 3126 *------------------------------------------ 3127 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3128 * 3129 * And then we add to that what happens if there are multiple addresses 3130 * assigned to an interface. Remember the ifa on a ifn is a linked 3131 * list of addresses. So one interface can have more than one IP 3132 * address. What happens if we have both a private and a global 3133 * address? Do we then use context of destination to sort out which 3134 * one is best? And what about NAT's sending P->G may get you a NAT 3135 * translation, or should you select the G thats on the interface in 3136 * preference. 3137 * 3138 * Decisions: 3139 * 3140 * - count the number of addresses on the interface. 3141 * - if it is one, no problem except case <c>. 3142 * For <a> we will assume a NAT out there. 3143 * - if there are more than one, then we need to worry about scope P 3144 * or G. We should prefer G -> G and P -> P if possible. 3145 * Then as a secondary fall back to mixed types G->P being a last 3146 * ditch one. 3147 * - The above all works for bound all, but bound specific we need to 3148 * use the same concept but instead only consider the bound 3149 * addresses. If the bound set is NOT assigned to the interface then 3150 * we must use rotation amongst the bound addresses.. 3151 */ 3152 if (ro->ro_rt == NULL) { 3153 /* 3154 * Need a route to cache. 3155 */ 3156 SCTP_RTALLOC(ro, vrf_id); 3157 } 3158 if (ro->ro_rt == NULL) { 3159 return (NULL); 3160 } 3161 fam = to->sin_family; 3162 dest_is_priv = dest_is_loop = 0; 3163 /* Setup our scopes for the destination */ 3164 switch (fam) { 3165 case AF_INET: 3166 /* Scope based on outbound address */ 3167 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { 3168 dest_is_loop = 1; 3169 if (net != NULL) { 3170 /* mark it as local */ 3171 net->addr_is_local = 1; 3172 } 3173 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { 3174 dest_is_priv = 1; 3175 } 3176 break; 3177 #ifdef INET6 3178 case AF_INET6: 3179 /* Scope based on outbound address */ 3180 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) || 3181 SCTP_ROUTE_IS_REAL_LOOP(ro)) { 3182 /* 3183 * If the address is a loopback address, which 3184 * consists of "::1" OR "fe80::1%lo0", we are 3185 * loopback scope. But we don't use dest_is_priv 3186 * (link local addresses). 3187 */ 3188 dest_is_loop = 1; 3189 if (net != NULL) { 3190 /* mark it as local */ 3191 net->addr_is_local = 1; 3192 } 3193 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { 3194 dest_is_priv = 1; 3195 } 3196 break; 3197 #endif 3198 } 3199 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:"); 3200 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to); 3201 SCTP_IPI_ADDR_RLOCK(); 3202 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3203 /* 3204 * Bound all case 3205 */ 3206 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id, 3207 dest_is_priv, dest_is_loop, 3208 non_asoc_addr_ok, fam); 3209 SCTP_IPI_ADDR_RUNLOCK(); 3210 return (answer); 3211 } 3212 /* 3213 * Subset bound case 3214 */ 3215 if (stcb) { 3216 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro, 3217 vrf_id, dest_is_priv, 3218 dest_is_loop, 3219 non_asoc_addr_ok, fam); 3220 } else { 3221 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, 3222 non_asoc_addr_ok, 3223 dest_is_priv, 3224 dest_is_loop, fam); 3225 } 3226 SCTP_IPI_ADDR_RUNLOCK(); 3227 return (answer); 3228 } 3229 3230 static int 3231 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize) 3232 { 3233 struct cmsghdr cmh; 3234 int tlen, at; 3235 3236 tlen = SCTP_BUF_LEN(control); 3237 at = 0; 3238 /* 3239 * Independent of how many mbufs, find the c_type inside the control 3240 * structure and copy out the data. 3241 */ 3242 while (at < tlen) { 3243 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 3244 /* not enough room for one more we are done. */ 3245 return (0); 3246 } 3247 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 3248 if (((int)cmh.cmsg_len + at) > tlen) { 3249 /* 3250 * this is real messed up since there is not enough 3251 * data here to cover the cmsg header. We are done. 3252 */ 3253 return (0); 3254 } 3255 if ((cmh.cmsg_level == IPPROTO_SCTP) && 3256 (c_type == cmh.cmsg_type)) { 3257 /* found the one we want, copy it out */ 3258 at += CMSG_ALIGN(sizeof(struct cmsghdr)); 3259 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) { 3260 /* 3261 * space of cmsg_len after header not big 3262 * enough 3263 */ 3264 return (0); 3265 } 3266 m_copydata(control, at, cpsize, data); 3267 return (1); 3268 } else { 3269 at += CMSG_ALIGN(cmh.cmsg_len); 3270 if (cmh.cmsg_len == 0) { 3271 break; 3272 } 3273 } 3274 } 3275 /* not found */ 3276 return (0); 3277 } 3278 3279 static struct mbuf * 3280 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset, 3281 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature) 3282 { 3283 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 3284 struct sctp_state_cookie *stc; 3285 struct sctp_paramhdr *ph; 3286 uint8_t *foo; 3287 int sig_offset; 3288 uint16_t cookie_sz; 3289 3290 mret = NULL; 3291 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 3292 sizeof(struct sctp_paramhdr)), 0, 3293 M_DONTWAIT, 1, MT_DATA); 3294 if (mret == NULL) { 3295 return (NULL); 3296 } 3297 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT); 3298 if (copy_init == NULL) { 3299 sctp_m_freem(mret); 3300 return (NULL); 3301 } 3302 #ifdef SCTP_MBUF_LOGGING 3303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 3304 struct mbuf *mat; 3305 3306 mat = copy_init; 3307 while (mat) { 3308 if (SCTP_BUF_IS_EXTENDED(mat)) { 3309 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 3310 } 3311 mat = SCTP_BUF_NEXT(mat); 3312 } 3313 } 3314 #endif 3315 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 3316 M_DONTWAIT); 3317 if (copy_initack == NULL) { 3318 sctp_m_freem(mret); 3319 sctp_m_freem(copy_init); 3320 return (NULL); 3321 } 3322 #ifdef SCTP_MBUF_LOGGING 3323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 3324 struct mbuf *mat; 3325 3326 mat = copy_initack; 3327 while (mat) { 3328 if (SCTP_BUF_IS_EXTENDED(mat)) { 3329 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 3330 } 3331 mat = SCTP_BUF_NEXT(mat); 3332 } 3333 } 3334 #endif 3335 /* easy side we just drop it on the end */ 3336 ph = mtod(mret, struct sctp_paramhdr *); 3337 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 3338 sizeof(struct sctp_paramhdr); 3339 stc = (struct sctp_state_cookie *)((caddr_t)ph + 3340 sizeof(struct sctp_paramhdr)); 3341 ph->param_type = htons(SCTP_STATE_COOKIE); 3342 ph->param_length = 0; /* fill in at the end */ 3343 /* Fill in the stc cookie data */ 3344 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie)); 3345 3346 /* tack the INIT and then the INIT-ACK onto the chain */ 3347 cookie_sz = 0; 3348 m_at = mret; 3349 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3350 cookie_sz += SCTP_BUF_LEN(m_at); 3351 if (SCTP_BUF_NEXT(m_at) == NULL) { 3352 SCTP_BUF_NEXT(m_at) = copy_init; 3353 break; 3354 } 3355 } 3356 3357 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3358 cookie_sz += SCTP_BUF_LEN(m_at); 3359 if (SCTP_BUF_NEXT(m_at) == NULL) { 3360 SCTP_BUF_NEXT(m_at) = copy_initack; 3361 break; 3362 } 3363 } 3364 3365 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3366 cookie_sz += SCTP_BUF_LEN(m_at); 3367 if (SCTP_BUF_NEXT(m_at) == NULL) { 3368 break; 3369 } 3370 } 3371 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA); 3372 if (sig == NULL) { 3373 /* no space, so free the entire chain */ 3374 sctp_m_freem(mret); 3375 return (NULL); 3376 } 3377 SCTP_BUF_LEN(sig) = 0; 3378 SCTP_BUF_NEXT(m_at) = sig; 3379 sig_offset = 0; 3380 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset); 3381 memset(foo, 0, SCTP_SIGNATURE_SIZE); 3382 *signature = foo; 3383 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; 3384 cookie_sz += SCTP_SIGNATURE_SIZE; 3385 ph->param_length = htons(cookie_sz); 3386 return (mret); 3387 } 3388 3389 3390 static uint8_t 3391 sctp_get_ect(struct sctp_tcb *stcb, 3392 struct sctp_tmit_chunk *chk) 3393 { 3394 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) { 3395 return (SCTP_ECT0_BIT); 3396 } else { 3397 return (0); 3398 } 3399 } 3400 3401 static int 3402 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 3403 struct sctp_tcb *stcb, /* may be NULL */ 3404 struct sctp_nets *net, 3405 struct sockaddr *to, 3406 struct mbuf *m, 3407 uint32_t auth_offset, 3408 struct sctp_auth_chunk *auth, 3409 uint16_t auth_keyid, 3410 int nofragment_flag, 3411 int ecn_ok, 3412 struct sctp_tmit_chunk *chk, 3413 int out_of_asoc_ok, 3414 uint16_t src_port, 3415 uint16_t dest_port, 3416 uint32_t v_tag, 3417 uint16_t port, 3418 int so_locked, 3419 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3420 SCTP_UNUSED 3421 #endif 3422 union sctp_sockstore *over_addr, 3423 struct mbuf *init 3424 ) 3425 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 3426 { 3427 /* 3428 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet 3429 * header WITH an SCTPHDR but no IP header, endpoint inp and sa 3430 * structure: - fill in the HMAC digest of any AUTH chunk in the 3431 * packet. - calculate and fill in the SCTP checksum. - prepend an 3432 * IP address header. - if boundall use INADDR_ANY. - if 3433 * boundspecific do source address selection. - set fragmentation 3434 * option for ipV4. - On return from IP output, check/adjust mtu 3435 * size of output interface and smallest_mtu size as well. 3436 */ 3437 /* Will need ifdefs around this */ 3438 struct mbuf *o_pak; 3439 struct mbuf *newm; 3440 struct sctphdr *sctphdr; 3441 int packet_length; 3442 int ret; 3443 uint32_t vrf_id; 3444 sctp_route_t *ro = NULL; 3445 struct udphdr *udp = NULL; 3446 3447 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3448 struct socket *so = NULL; 3449 3450 #endif 3451 3452 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 3453 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 3454 sctp_m_freem(m); 3455 return (EFAULT); 3456 } 3457 if (stcb) { 3458 vrf_id = stcb->asoc.vrf_id; 3459 } else { 3460 vrf_id = inp->def_vrf_id; 3461 } 3462 3463 /* fill in the HMAC digest for any AUTH chunk in the packet */ 3464 if ((auth != NULL) && (stcb != NULL)) { 3465 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid); 3466 } 3467 if (to->sa_family == AF_INET) { 3468 struct ip *ip = NULL; 3469 sctp_route_t iproute; 3470 uint8_t tos_value; 3471 int len; 3472 3473 len = sizeof(struct ip) + sizeof(struct sctphdr); 3474 if (port) { 3475 len += sizeof(struct udphdr); 3476 } 3477 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA); 3478 if (newm == NULL) { 3479 sctp_m_freem(m); 3480 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 3481 return (ENOMEM); 3482 } 3483 SCTP_ALIGN_TO_END(newm, len); 3484 SCTP_BUF_LEN(newm) = len; 3485 SCTP_BUF_NEXT(newm) = m; 3486 m = newm; 3487 if (net != NULL) { 3488 #ifdef INVARIANTS 3489 if (net->flowidset == 0) { 3490 panic("Flow ID not set"); 3491 } 3492 #endif 3493 m->m_pkthdr.flowid = net->flowid; 3494 m->m_flags |= M_FLOWID; 3495 } else { 3496 if ((init != NULL) && (init->m_flags & M_FLOWID)) { 3497 m->m_pkthdr.flowid = init->m_pkthdr.flowid; 3498 m->m_flags |= M_FLOWID; 3499 } 3500 } 3501 packet_length = sctp_calculate_len(m); 3502 ip = mtod(m, struct ip *); 3503 ip->ip_v = IPVERSION; 3504 ip->ip_hl = (sizeof(struct ip) >> 2); 3505 if (net) { 3506 tos_value = net->tos_flowlabel & 0x000000ff; 3507 } else { 3508 tos_value = inp->ip_inp.inp.inp_ip_tos; 3509 } 3510 if ((nofragment_flag) && (port == 0)) { 3511 ip->ip_off = IP_DF; 3512 } else 3513 ip->ip_off = 0; 3514 3515 /* FreeBSD has a function for ip_id's */ 3516 ip->ip_id = ip_newid(); 3517 3518 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 3519 ip->ip_len = packet_length; 3520 ip->ip_tos = tos_value & 0xfc; 3521 if (ecn_ok) { 3522 ip->ip_tos |= sctp_get_ect(stcb, chk); 3523 } 3524 if (port) { 3525 ip->ip_p = IPPROTO_UDP; 3526 } else { 3527 ip->ip_p = IPPROTO_SCTP; 3528 } 3529 ip->ip_sum = 0; 3530 if (net == NULL) { 3531 ro = &iproute; 3532 memset(&iproute, 0, sizeof(iproute)); 3533 memcpy(&ro->ro_dst, to, to->sa_len); 3534 } else { 3535 ro = (sctp_route_t *) & net->ro; 3536 } 3537 /* Now the address selection part */ 3538 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 3539 3540 /* call the routine to select the src address */ 3541 if (net && out_of_asoc_ok == 0) { 3542 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) { 3543 sctp_free_ifa(net->ro._s_addr); 3544 net->ro._s_addr = NULL; 3545 net->src_addr_selected = 0; 3546 if (ro->ro_rt) { 3547 RTFREE(ro->ro_rt); 3548 ro->ro_rt = NULL; 3549 } 3550 } 3551 if (net->src_addr_selected == 0) { 3552 /* Cache the source address */ 3553 net->ro._s_addr = sctp_source_address_selection(inp, stcb, 3554 ro, net, 0, 3555 vrf_id); 3556 net->src_addr_selected = 1; 3557 } 3558 if (net->ro._s_addr == NULL) { 3559 /* No route to host */ 3560 net->src_addr_selected = 0; 3561 goto no_route; 3562 } 3563 ip->ip_src = net->ro._s_addr->address.sin.sin_addr; 3564 } else { 3565 if (over_addr == NULL) { 3566 struct sctp_ifa *_lsrc; 3567 3568 _lsrc = sctp_source_address_selection(inp, stcb, ro, 3569 net, 3570 out_of_asoc_ok, 3571 vrf_id); 3572 if (_lsrc == NULL) { 3573 goto no_route; 3574 } 3575 ip->ip_src = _lsrc->address.sin.sin_addr; 3576 sctp_free_ifa(_lsrc); 3577 } else { 3578 ip->ip_src = over_addr->sin.sin_addr; 3579 SCTP_RTALLOC(ro, vrf_id); 3580 } 3581 } 3582 if (port) { 3583 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 3584 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 3585 udp->uh_dport = port; 3586 udp->uh_ulen = htons(packet_length - sizeof(struct ip)); 3587 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 3588 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 3589 } else { 3590 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip)); 3591 } 3592 3593 sctphdr->src_port = src_port; 3594 sctphdr->dest_port = dest_port; 3595 sctphdr->v_tag = v_tag; 3596 sctphdr->checksum = 0; 3597 3598 /* 3599 * If source address selection fails and we find no route 3600 * then the ip_output should fail as well with a 3601 * NO_ROUTE_TO_HOST type error. We probably should catch 3602 * that somewhere and abort the association right away 3603 * (assuming this is an INIT being sent). 3604 */ 3605 if ((ro->ro_rt == NULL)) { 3606 /* 3607 * src addr selection failed to find a route (or 3608 * valid source addr), so we can't get there from 3609 * here (yet)! 3610 */ 3611 no_route: 3612 SCTPDBG(SCTP_DEBUG_OUTPUT1, 3613 "%s: dropped packet - no valid source addr\n", 3614 __FUNCTION__); 3615 if (net) { 3616 SCTPDBG(SCTP_DEBUG_OUTPUT1, 3617 "Destination was "); 3618 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, 3619 &net->ro._l_addr.sa); 3620 if (net->dest_state & SCTP_ADDR_CONFIRMED) { 3621 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { 3622 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net); 3623 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 3624 stcb, 3625 SCTP_FAILED_THRESHOLD, 3626 (void *)net, 3627 so_locked); 3628 net->dest_state &= ~SCTP_ADDR_REACHABLE; 3629 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 3630 /* 3631 * JRS 5/14/07 - If a 3632 * destination is 3633 * unreachable, the PF bit 3634 * is turned off. This 3635 * allows an unambiguous use 3636 * of the PF bit for 3637 * destinations that are 3638 * reachable but potentially 3639 * failed. If the 3640 * destination is set to the 3641 * unreachable state, also 3642 * set the destination to 3643 * the PF state. 3644 */ 3645 /* 3646 * Add debug message here if 3647 * destination is not in PF 3648 * state. 3649 */ 3650 /* 3651 * Stop any running T3 3652 * timers here? 3653 */ 3654 if ((stcb->asoc.sctp_cmt_on_off > 0) && 3655 (stcb->asoc.sctp_cmt_pf > 0)) { 3656 net->dest_state &= ~SCTP_ADDR_PF; 3657 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n", 3658 net); 3659 } 3660 } 3661 } 3662 if (stcb) { 3663 if (net == stcb->asoc.primary_destination) { 3664 /* need a new primary */ 3665 struct sctp_nets *alt; 3666 3667 alt = sctp_find_alternate_net(stcb, net, 0); 3668 if (alt != net) { 3669 if (sctp_set_primary_addr(stcb, 3670 (struct sockaddr *)NULL, 3671 alt) == 0) { 3672 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 3673 if (net->ro._s_addr) { 3674 sctp_free_ifa(net->ro._s_addr); 3675 net->ro._s_addr = NULL; 3676 } 3677 net->src_addr_selected = 0; 3678 } 3679 } 3680 } 3681 } 3682 } 3683 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 3684 sctp_m_freem(m); 3685 return (EHOSTUNREACH); 3686 } 3687 if (ro != &iproute) { 3688 memcpy(&iproute, ro, sizeof(*ro)); 3689 } 3690 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n", 3691 (uint32_t) (ntohl(ip->ip_src.s_addr))); 3692 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", 3693 (uint32_t) (ntohl(ip->ip_dst.s_addr))); 3694 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", 3695 ro->ro_rt); 3696 3697 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 3698 /* failed to prepend data, give up */ 3699 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 3700 sctp_m_freem(m); 3701 return (ENOMEM); 3702 } 3703 #ifdef SCTP_PACKET_LOGGING 3704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 3705 sctp_packet_log(m, packet_length); 3706 #endif 3707 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 3708 if (port) { 3709 #if defined(SCTP_WITH_NO_CSUM) 3710 SCTP_STAT_INCR(sctps_sendnocrc); 3711 #else 3712 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 3713 (stcb) && 3714 (stcb->asoc.loopback_scope))) { 3715 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr)); 3716 SCTP_STAT_INCR(sctps_sendswcrc); 3717 } else { 3718 SCTP_STAT_INCR(sctps_sendnocrc); 3719 } 3720 #endif 3721 SCTP_ENABLE_UDP_CSUM(o_pak); 3722 } else { 3723 #if defined(SCTP_WITH_NO_CSUM) 3724 SCTP_STAT_INCR(sctps_sendnocrc); 3725 #else 3726 m->m_pkthdr.csum_flags = CSUM_SCTP; 3727 m->m_pkthdr.csum_data = 0; 3728 SCTP_STAT_INCR(sctps_sendhwcrc); 3729 #endif 3730 } 3731 /* send it out. table id is taken from stcb */ 3732 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3733 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 3734 so = SCTP_INP_SO(inp); 3735 SCTP_SOCKET_UNLOCK(so, 0); 3736 } 3737 #endif 3738 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id); 3739 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3740 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 3741 atomic_add_int(&stcb->asoc.refcnt, 1); 3742 SCTP_TCB_UNLOCK(stcb); 3743 SCTP_SOCKET_LOCK(so, 0); 3744 SCTP_TCB_LOCK(stcb); 3745 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3746 } 3747 #endif 3748 SCTP_STAT_INCR(sctps_sendpackets); 3749 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 3750 if (ret) 3751 SCTP_STAT_INCR(sctps_senderrors); 3752 3753 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); 3754 if (net == NULL) { 3755 /* free tempy routes */ 3756 if (ro->ro_rt) { 3757 RTFREE(ro->ro_rt); 3758 ro->ro_rt = NULL; 3759 } 3760 } else { 3761 /* PMTU check versus smallest asoc MTU goes here */ 3762 if ((ro->ro_rt != NULL) && 3763 (net->ro._s_addr)) { 3764 uint32_t mtu; 3765 3766 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 3767 if (net->port) { 3768 mtu -= sizeof(struct udphdr); 3769 } 3770 if (mtu && (stcb->asoc.smallest_mtu > mtu)) { 3771 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 3772 net->mtu = mtu; 3773 } 3774 } else if (ro->ro_rt == NULL) { 3775 /* route was freed */ 3776 if (net->ro._s_addr && 3777 net->src_addr_selected) { 3778 sctp_free_ifa(net->ro._s_addr); 3779 net->ro._s_addr = NULL; 3780 } 3781 net->src_addr_selected = 0; 3782 } 3783 } 3784 return (ret); 3785 } 3786 #ifdef INET6 3787 else if (to->sa_family == AF_INET6) { 3788 uint32_t flowlabel; 3789 struct ip6_hdr *ip6h; 3790 struct route_in6 ip6route; 3791 struct ifnet *ifp; 3792 u_char flowTop; 3793 uint16_t flowBottom; 3794 u_char tosBottom, tosTop; 3795 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 3796 int prev_scope = 0; 3797 struct sockaddr_in6 lsa6_storage; 3798 int error; 3799 u_short prev_port = 0; 3800 int len; 3801 3802 if (net != NULL) { 3803 flowlabel = net->tos_flowlabel; 3804 } else { 3805 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo; 3806 } 3807 3808 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr); 3809 if (port) { 3810 len += sizeof(struct udphdr); 3811 } 3812 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA); 3813 if (newm == NULL) { 3814 sctp_m_freem(m); 3815 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 3816 return (ENOMEM); 3817 } 3818 SCTP_ALIGN_TO_END(newm, len); 3819 SCTP_BUF_LEN(newm) = len; 3820 SCTP_BUF_NEXT(newm) = m; 3821 m = newm; 3822 if (net != NULL) { 3823 #ifdef INVARIANTS 3824 if (net->flowidset == 0) { 3825 panic("Flow ID not set"); 3826 } 3827 #endif 3828 m->m_pkthdr.flowid = net->flowid; 3829 m->m_flags |= M_FLOWID; 3830 } else { 3831 if ((init != NULL) && (init->m_flags & M_FLOWID)) { 3832 m->m_pkthdr.flowid = init->m_pkthdr.flowid; 3833 m->m_flags |= M_FLOWID; 3834 } 3835 } 3836 packet_length = sctp_calculate_len(m); 3837 3838 ip6h = mtod(m, struct ip6_hdr *); 3839 /* 3840 * We assume here that inp_flow is in host byte order within 3841 * the TCB! 3842 */ 3843 flowBottom = flowlabel & 0x0000ffff; 3844 flowTop = ((flowlabel & 0x000f0000) >> 16); 3845 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION); 3846 /* protect *sin6 from overwrite */ 3847 sin6 = (struct sockaddr_in6 *)to; 3848 tmp = *sin6; 3849 sin6 = &tmp; 3850 3851 /* KAME hack: embed scopeid */ 3852 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 3853 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 3854 return (EINVAL); 3855 } 3856 if (net == NULL) { 3857 memset(&ip6route, 0, sizeof(ip6route)); 3858 ro = (sctp_route_t *) & ip6route; 3859 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 3860 } else { 3861 ro = (sctp_route_t *) & net->ro; 3862 } 3863 tosBottom = (((struct in6pcb *)inp)->in6p_flowinfo & 0x0c); 3864 if (ecn_ok) { 3865 tosBottom |= sctp_get_ect(stcb, chk); 3866 } 3867 tosBottom <<= 4; 3868 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom)); 3869 if (port) { 3870 ip6h->ip6_nxt = IPPROTO_UDP; 3871 } else { 3872 ip6h->ip6_nxt = IPPROTO_SCTP; 3873 } 3874 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr)); 3875 ip6h->ip6_dst = sin6->sin6_addr; 3876 3877 /* 3878 * Add SRC address selection here: we can only reuse to a 3879 * limited degree the kame src-addr-sel, since we can try 3880 * their selection but it may not be bound. 3881 */ 3882 bzero(&lsa6_tmp, sizeof(lsa6_tmp)); 3883 lsa6_tmp.sin6_family = AF_INET6; 3884 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 3885 lsa6 = &lsa6_tmp; 3886 if (net && out_of_asoc_ok == 0) { 3887 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) { 3888 sctp_free_ifa(net->ro._s_addr); 3889 net->ro._s_addr = NULL; 3890 net->src_addr_selected = 0; 3891 if (ro->ro_rt) { 3892 RTFREE(ro->ro_rt); 3893 ro->ro_rt = NULL; 3894 } 3895 } 3896 if (net->src_addr_selected == 0) { 3897 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 3898 /* KAME hack: embed scopeid */ 3899 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 3900 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 3901 return (EINVAL); 3902 } 3903 /* Cache the source address */ 3904 net->ro._s_addr = sctp_source_address_selection(inp, 3905 stcb, 3906 ro, 3907 net, 3908 0, 3909 vrf_id); 3910 (void)sa6_recoverscope(sin6); 3911 net->src_addr_selected = 1; 3912 } 3913 if (net->ro._s_addr == NULL) { 3914 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n"); 3915 net->src_addr_selected = 0; 3916 goto no_route; 3917 } 3918 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; 3919 } else { 3920 sin6 = (struct sockaddr_in6 *)&ro->ro_dst; 3921 /* KAME hack: embed scopeid */ 3922 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 3923 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 3924 return (EINVAL); 3925 } 3926 if (over_addr == NULL) { 3927 struct sctp_ifa *_lsrc; 3928 3929 _lsrc = sctp_source_address_selection(inp, stcb, ro, 3930 net, 3931 out_of_asoc_ok, 3932 vrf_id); 3933 if (_lsrc == NULL) { 3934 goto no_route; 3935 } 3936 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; 3937 sctp_free_ifa(_lsrc); 3938 } else { 3939 lsa6->sin6_addr = over_addr->sin6.sin6_addr; 3940 SCTP_RTALLOC(ro, vrf_id); 3941 } 3942 (void)sa6_recoverscope(sin6); 3943 } 3944 lsa6->sin6_port = inp->sctp_lport; 3945 3946 if (ro->ro_rt == NULL) { 3947 /* 3948 * src addr selection failed to find a route (or 3949 * valid source addr), so we can't get there from 3950 * here! 3951 */ 3952 goto no_route; 3953 } 3954 /* 3955 * XXX: sa6 may not have a valid sin6_scope_id in the 3956 * non-SCOPEDROUTING case. 3957 */ 3958 bzero(&lsa6_storage, sizeof(lsa6_storage)); 3959 lsa6_storage.sin6_family = AF_INET6; 3960 lsa6_storage.sin6_len = sizeof(lsa6_storage); 3961 lsa6_storage.sin6_addr = lsa6->sin6_addr; 3962 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 3963 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error); 3964 sctp_m_freem(m); 3965 return (error); 3966 } 3967 /* XXX */ 3968 lsa6_storage.sin6_addr = lsa6->sin6_addr; 3969 lsa6_storage.sin6_port = inp->sctp_lport; 3970 lsa6 = &lsa6_storage; 3971 ip6h->ip6_src = lsa6->sin6_addr; 3972 3973 if (port) { 3974 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); 3975 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 3976 udp->uh_dport = port; 3977 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr)); 3978 udp->uh_sum = 0; 3979 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 3980 } else { 3981 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); 3982 } 3983 3984 sctphdr->src_port = src_port; 3985 sctphdr->dest_port = dest_port; 3986 sctphdr->v_tag = v_tag; 3987 sctphdr->checksum = 0; 3988 3989 /* 3990 * We set the hop limit now since there is a good chance 3991 * that our ro pointer is now filled 3992 */ 3993 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); 3994 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 3995 3996 #ifdef SCTP_DEBUG 3997 /* Copy to be sure something bad is not happening */ 3998 sin6->sin6_addr = ip6h->ip6_dst; 3999 lsa6->sin6_addr = ip6h->ip6_src; 4000 #endif 4001 4002 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n"); 4003 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: "); 4004 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6); 4005 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: "); 4006 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6); 4007 if (net) { 4008 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 4009 /* preserve the port and scope for link local send */ 4010 prev_scope = sin6->sin6_scope_id; 4011 prev_port = sin6->sin6_port; 4012 } 4013 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 4014 /* failed to prepend data, give up */ 4015 sctp_m_freem(m); 4016 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4017 return (ENOMEM); 4018 } 4019 #ifdef SCTP_PACKET_LOGGING 4020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 4021 sctp_packet_log(m, packet_length); 4022 #endif 4023 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 4024 if (port) { 4025 #if defined(SCTP_WITH_NO_CSUM) 4026 SCTP_STAT_INCR(sctps_sendnocrc); 4027 #else 4028 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 4029 (stcb) && 4030 (stcb->asoc.loopback_scope))) { 4031 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 4032 SCTP_STAT_INCR(sctps_sendswcrc); 4033 } else { 4034 SCTP_STAT_INCR(sctps_sendnocrc); 4035 } 4036 #endif 4037 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) { 4038 udp->uh_sum = 0xffff; 4039 } 4040 } else { 4041 #if defined(SCTP_WITH_NO_CSUM) 4042 SCTP_STAT_INCR(sctps_sendnocrc); 4043 #else 4044 m->m_pkthdr.csum_flags = CSUM_SCTP; 4045 m->m_pkthdr.csum_data = 0; 4046 SCTP_STAT_INCR(sctps_sendhwcrc); 4047 #endif 4048 } 4049 /* send it out. table id is taken from stcb */ 4050 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4051 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4052 so = SCTP_INP_SO(inp); 4053 SCTP_SOCKET_UNLOCK(so, 0); 4054 } 4055 #endif 4056 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id); 4057 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4058 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4059 atomic_add_int(&stcb->asoc.refcnt, 1); 4060 SCTP_TCB_UNLOCK(stcb); 4061 SCTP_SOCKET_LOCK(so, 0); 4062 SCTP_TCB_LOCK(stcb); 4063 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4064 } 4065 #endif 4066 if (net) { 4067 /* for link local this must be done */ 4068 sin6->sin6_scope_id = prev_scope; 4069 sin6->sin6_port = prev_port; 4070 } 4071 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); 4072 SCTP_STAT_INCR(sctps_sendpackets); 4073 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 4074 if (ret) { 4075 SCTP_STAT_INCR(sctps_senderrors); 4076 } 4077 if (net == NULL) { 4078 /* Now if we had a temp route free it */ 4079 if (ro->ro_rt) { 4080 RTFREE(ro->ro_rt); 4081 } 4082 } else { 4083 /* PMTU check versus smallest asoc MTU goes here */ 4084 if (ro->ro_rt == NULL) { 4085 /* Route was freed */ 4086 if (net->ro._s_addr && 4087 net->src_addr_selected) { 4088 sctp_free_ifa(net->ro._s_addr); 4089 net->ro._s_addr = NULL; 4090 } 4091 net->src_addr_selected = 0; 4092 } 4093 if ((ro->ro_rt != NULL) && 4094 (net->ro._s_addr)) { 4095 uint32_t mtu; 4096 4097 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 4098 if (mtu && 4099 (stcb->asoc.smallest_mtu > mtu)) { 4100 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 4101 net->mtu = mtu; 4102 if (net->port) { 4103 net->mtu -= sizeof(struct udphdr); 4104 } 4105 } 4106 } else if (ifp) { 4107 if (ND_IFINFO(ifp)->linkmtu && 4108 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 4109 sctp_mtu_size_reset(inp, 4110 &stcb->asoc, 4111 ND_IFINFO(ifp)->linkmtu); 4112 } 4113 } 4114 } 4115 return (ret); 4116 } 4117 #endif 4118 else { 4119 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 4120 ((struct sockaddr *)to)->sa_family); 4121 sctp_m_freem(m); 4122 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 4123 return (EFAULT); 4124 } 4125 } 4126 4127 4128 void 4129 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked 4130 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4131 SCTP_UNUSED 4132 #endif 4133 ) 4134 { 4135 struct mbuf *m, *m_at, *mp_last; 4136 struct sctp_nets *net; 4137 struct sctp_init_chunk *init; 4138 struct sctp_supported_addr_param *sup_addr; 4139 struct sctp_adaptation_layer_indication *ali; 4140 struct sctp_ecn_supported_param *ecn; 4141 struct sctp_prsctp_supported_param *prsctp; 4142 struct sctp_supported_chunk_types_param *pr_supported; 4143 int cnt_inits_to = 0; 4144 int padval, ret; 4145 int num_ext; 4146 int p_len; 4147 4148 /* INIT's always go to the primary (and usually ONLY address) */ 4149 mp_last = NULL; 4150 net = stcb->asoc.primary_destination; 4151 if (net == NULL) { 4152 net = TAILQ_FIRST(&stcb->asoc.nets); 4153 if (net == NULL) { 4154 /* TSNH */ 4155 return; 4156 } 4157 /* we confirm any address we send an INIT to */ 4158 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 4159 (void)sctp_set_primary_addr(stcb, NULL, net); 4160 } else { 4161 /* we confirm any address we send an INIT to */ 4162 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 4163 } 4164 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n"); 4165 #ifdef INET6 4166 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) { 4167 /* 4168 * special hook, if we are sending to link local it will not 4169 * show up in our private address count. 4170 */ 4171 struct sockaddr_in6 *sin6l; 4172 4173 sin6l = &net->ro._l_addr.sin6; 4174 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr)) 4175 cnt_inits_to = 1; 4176 } 4177 #endif 4178 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4179 /* This case should not happen */ 4180 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n"); 4181 return; 4182 } 4183 /* start the INIT timer */ 4184 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 4185 4186 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA); 4187 if (m == NULL) { 4188 /* No memory, INIT timer will re-attempt. */ 4189 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n"); 4190 return; 4191 } 4192 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk); 4193 /* 4194 * assume peer supports asconf in order to be able to queue local 4195 * address changes while an INIT is in flight and before the assoc 4196 * is established. 4197 */ 4198 stcb->asoc.peer_supports_asconf = 1; 4199 /* Now lets put the SCTP header in place */ 4200 init = mtod(m, struct sctp_init_chunk *); 4201 /* now the chunk header */ 4202 init->ch.chunk_type = SCTP_INITIATION; 4203 init->ch.chunk_flags = 0; 4204 /* fill in later from mbuf we build */ 4205 init->ch.chunk_length = 0; 4206 /* place in my tag */ 4207 init->init.initiate_tag = htonl(stcb->asoc.my_vtag); 4208 /* set up some of the credits. */ 4209 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0, 4210 SCTP_MINIMAL_RWND)); 4211 4212 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 4213 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 4214 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number); 4215 /* now the address restriction */ 4216 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init + 4217 sizeof(*init)); 4218 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 4219 #ifdef INET6 4220 /* we support 2 types: IPv6/IPv4 */ 4221 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t)); 4222 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS); 4223 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS); 4224 #else 4225 /* we support 1 type: IPv4 */ 4226 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t)); 4227 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS); 4228 sup_addr->addr_type[1] = htons(0); /* this is the padding */ 4229 #endif 4230 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t); 4231 /* adaptation layer indication parameter */ 4232 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t)); 4233 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 4234 ali->ph.param_length = htons(sizeof(*ali)); 4235 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 4236 SCTP_BUF_LEN(m) += sizeof(*ali); 4237 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali)); 4238 4239 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) { 4240 /* Add NAT friendly parameter */ 4241 struct sctp_paramhdr *ph; 4242 4243 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4244 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); 4245 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4246 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr); 4247 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph)); 4248 } 4249 /* now any cookie time extensions */ 4250 if (stcb->asoc.cookie_preserve_req) { 4251 struct sctp_cookie_perserve_param *cookie_preserve; 4252 4253 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn); 4254 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 4255 cookie_preserve->ph.param_length = htons( 4256 sizeof(*cookie_preserve)); 4257 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 4258 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve); 4259 ecn = (struct sctp_ecn_supported_param *)( 4260 (caddr_t)cookie_preserve + sizeof(*cookie_preserve)); 4261 stcb->asoc.cookie_preserve_req = 0; 4262 } 4263 /* ECN parameter */ 4264 if (stcb->asoc.ecn_allowed == 1) { 4265 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 4266 ecn->ph.param_length = htons(sizeof(*ecn)); 4267 SCTP_BUF_LEN(m) += sizeof(*ecn); 4268 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 4269 sizeof(*ecn)); 4270 } else { 4271 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 4272 } 4273 /* And now tell the peer we do pr-sctp */ 4274 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 4275 prsctp->ph.param_length = htons(sizeof(*prsctp)); 4276 SCTP_BUF_LEN(m) += sizeof(*prsctp); 4277 4278 /* And now tell the peer we do all the extensions */ 4279 pr_supported = (struct sctp_supported_chunk_types_param *) 4280 ((caddr_t)prsctp + sizeof(*prsctp)); 4281 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 4282 num_ext = 0; 4283 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 4284 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 4285 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 4286 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 4287 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 4288 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { 4289 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 4290 } 4291 if (stcb->asoc.sctp_nr_sack_on_off == 1) { 4292 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; 4293 } 4294 p_len = sizeof(*pr_supported) + num_ext; 4295 pr_supported->ph.param_length = htons(p_len); 4296 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 4297 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4298 4299 4300 /* add authentication parameters */ 4301 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { 4302 struct sctp_auth_random *randp; 4303 struct sctp_auth_hmac_algo *hmacs; 4304 struct sctp_auth_chunk_list *chunks; 4305 4306 /* attach RANDOM parameter, if available */ 4307 if (stcb->asoc.authinfo.random != NULL) { 4308 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4309 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len; 4310 /* random key already contains the header */ 4311 bcopy(stcb->asoc.authinfo.random->key, randp, p_len); 4312 /* zero out any padding required */ 4313 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len); 4314 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4315 } 4316 /* add HMAC_ALGO parameter */ 4317 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4318 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs, 4319 (uint8_t *) hmacs->hmac_ids); 4320 if (p_len > 0) { 4321 p_len += sizeof(*hmacs); 4322 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 4323 hmacs->ph.param_length = htons(p_len); 4324 /* zero out any padding required */ 4325 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 4326 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4327 } 4328 /* add CHUNKS parameter */ 4329 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 4330 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, 4331 chunks->chunk_types); 4332 if (p_len > 0) { 4333 p_len += sizeof(*chunks); 4334 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 4335 chunks->ph.param_length = htons(p_len); 4336 /* zero out any padding required */ 4337 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 4338 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 4339 } 4340 } 4341 m_at = m; 4342 /* now the addresses */ 4343 { 4344 struct sctp_scoping scp; 4345 4346 /* 4347 * To optimize this we could put the scoping stuff into a 4348 * structure and remove the individual uint8's from the 4349 * assoc structure. Then we could just sifa in the address 4350 * within the stcb.. but for now this is a quick hack to get 4351 * the address stuff teased apart. 4352 */ 4353 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal; 4354 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal; 4355 scp.loopback_scope = stcb->asoc.loopback_scope; 4356 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope; 4357 scp.local_scope = stcb->asoc.local_scope; 4358 scp.site_scope = stcb->asoc.site_scope; 4359 4360 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 4361 } 4362 4363 /* calulate the size and update pkt header and chunk header */ 4364 p_len = 0; 4365 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 4366 if (SCTP_BUF_NEXT(m_at) == NULL) 4367 mp_last = m_at; 4368 p_len += SCTP_BUF_LEN(m_at); 4369 } 4370 init->ch.chunk_length = htons(p_len); 4371 /* 4372 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 4373 * here since the timer will drive a retranmission. 4374 */ 4375 4376 /* I don't expect this to execute but we will be safe here */ 4377 padval = p_len % 4; 4378 if ((padval) && (mp_last)) { 4379 /* 4380 * The compiler worries that mp_last may not be set even 4381 * though I think it is impossible :-> however we add 4382 * mp_last here just in case. 4383 */ 4384 ret = sctp_add_pad_tombuf(mp_last, (4 - padval)); 4385 if (ret) { 4386 /* Houston we have a problem, no space */ 4387 sctp_m_freem(m); 4388 return; 4389 } 4390 p_len += padval; 4391 } 4392 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n"); 4393 ret = sctp_lowlevel_chunk_output(inp, stcb, net, 4394 (struct sockaddr *)&net->ro._l_addr, 4395 m, 0, NULL, 0, 0, 0, NULL, 0, 4396 inp->sctp_lport, stcb->rport, htonl(0), 4397 net->port, so_locked, NULL, NULL); 4398 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret); 4399 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 4400 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 4401 } 4402 4403 struct mbuf * 4404 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 4405 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly) 4406 { 4407 /* 4408 * Given a mbuf containing an INIT or INIT-ACK with the param_offset 4409 * being equal to the beginning of the params i.e. (iphlen + 4410 * sizeof(struct sctp_init_msg) parse through the parameters to the 4411 * end of the mbuf verifying that all parameters are known. 4412 * 4413 * For unknown parameters build and return a mbuf with 4414 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 4415 * processing this chunk stop, and set *abort_processing to 1. 4416 * 4417 * By having param_offset be pre-set to where parameters begin it is 4418 * hoped that this routine may be reused in the future by new 4419 * features. 4420 */ 4421 struct sctp_paramhdr *phdr, params; 4422 4423 struct mbuf *mat, *op_err; 4424 char tempbuf[SCTP_PARAM_BUFFER_SIZE]; 4425 int at, limit, pad_needed; 4426 uint16_t ptype, plen, padded_size; 4427 int err_at; 4428 4429 *abort_processing = 0; 4430 mat = in_initpkt; 4431 err_at = 0; 4432 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 4433 at = param_offset; 4434 op_err = NULL; 4435 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n"); 4436 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 4437 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 4438 ptype = ntohs(phdr->param_type); 4439 plen = ntohs(phdr->param_length); 4440 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) { 4441 /* wacked parameter */ 4442 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen); 4443 goto invalid_size; 4444 } 4445 limit -= SCTP_SIZE32(plen); 4446 /*- 4447 * All parameters for all chunks that we know/understand are 4448 * listed here. We process them other places and make 4449 * appropriate stop actions per the upper bits. However this 4450 * is the generic routine processor's can call to get back 4451 * an operr.. to either incorporate (init-ack) or send. 4452 */ 4453 padded_size = SCTP_SIZE32(plen); 4454 switch (ptype) { 4455 /* Param's with variable size */ 4456 case SCTP_HEARTBEAT_INFO: 4457 case SCTP_STATE_COOKIE: 4458 case SCTP_UNRECOG_PARAM: 4459 case SCTP_ERROR_CAUSE_IND: 4460 /* ok skip fwd */ 4461 at += padded_size; 4462 break; 4463 /* Param's with variable size within a range */ 4464 case SCTP_CHUNK_LIST: 4465 case SCTP_SUPPORTED_CHUNK_EXT: 4466 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) { 4467 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen); 4468 goto invalid_size; 4469 } 4470 at += padded_size; 4471 break; 4472 case SCTP_SUPPORTED_ADDRTYPE: 4473 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) { 4474 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen); 4475 goto invalid_size; 4476 } 4477 at += padded_size; 4478 break; 4479 case SCTP_RANDOM: 4480 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) { 4481 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen); 4482 goto invalid_size; 4483 } 4484 at += padded_size; 4485 break; 4486 case SCTP_SET_PRIM_ADDR: 4487 case SCTP_DEL_IP_ADDRESS: 4488 case SCTP_ADD_IP_ADDRESS: 4489 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) && 4490 (padded_size != sizeof(struct sctp_asconf_addr_param))) { 4491 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen); 4492 goto invalid_size; 4493 } 4494 at += padded_size; 4495 break; 4496 /* Param's with a fixed size */ 4497 case SCTP_IPV4_ADDRESS: 4498 if (padded_size != sizeof(struct sctp_ipv4addr_param)) { 4499 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen); 4500 goto invalid_size; 4501 } 4502 at += padded_size; 4503 break; 4504 case SCTP_IPV6_ADDRESS: 4505 if (padded_size != sizeof(struct sctp_ipv6addr_param)) { 4506 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen); 4507 goto invalid_size; 4508 } 4509 at += padded_size; 4510 break; 4511 case SCTP_COOKIE_PRESERVE: 4512 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) { 4513 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen); 4514 goto invalid_size; 4515 } 4516 at += padded_size; 4517 break; 4518 case SCTP_HAS_NAT_SUPPORT: 4519 *nat_friendly = 1; 4520 /* fall through */ 4521 case SCTP_PRSCTP_SUPPORTED: 4522 4523 if (padded_size != sizeof(struct sctp_paramhdr)) { 4524 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen); 4525 goto invalid_size; 4526 } 4527 at += padded_size; 4528 break; 4529 case SCTP_ECN_CAPABLE: 4530 if (padded_size != sizeof(struct sctp_ecn_supported_param)) { 4531 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen); 4532 goto invalid_size; 4533 } 4534 at += padded_size; 4535 break; 4536 case SCTP_ULP_ADAPTATION: 4537 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) { 4538 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen); 4539 goto invalid_size; 4540 } 4541 at += padded_size; 4542 break; 4543 case SCTP_SUCCESS_REPORT: 4544 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) { 4545 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen); 4546 goto invalid_size; 4547 } 4548 at += padded_size; 4549 break; 4550 case SCTP_HOSTNAME_ADDRESS: 4551 { 4552 /* We can NOT handle HOST NAME addresses!! */ 4553 int l_len; 4554 4555 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n"); 4556 *abort_processing = 1; 4557 if (op_err == NULL) { 4558 /* Ok need to try to get a mbuf */ 4559 #ifdef INET6 4560 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4561 #else 4562 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4563 #endif 4564 l_len += plen; 4565 l_len += sizeof(struct sctp_paramhdr); 4566 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4567 if (op_err) { 4568 SCTP_BUF_LEN(op_err) = 0; 4569 /* 4570 * pre-reserve space for ip 4571 * and sctp header and 4572 * chunk hdr 4573 */ 4574 #ifdef INET6 4575 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4576 #else 4577 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 4578 #endif 4579 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4580 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4581 } 4582 } 4583 if (op_err) { 4584 /* If we have space */ 4585 struct sctp_paramhdr s; 4586 4587 if (err_at % 4) { 4588 uint32_t cpthis = 0; 4589 4590 pad_needed = 4 - (err_at % 4); 4591 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4592 err_at += pad_needed; 4593 } 4594 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 4595 s.param_length = htons(sizeof(s) + plen); 4596 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4597 err_at += sizeof(s); 4598 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen)); 4599 if (phdr == NULL) { 4600 sctp_m_freem(op_err); 4601 /* 4602 * we are out of memory but 4603 * we still need to have a 4604 * look at what to do (the 4605 * system is in trouble 4606 * though). 4607 */ 4608 return (NULL); 4609 } 4610 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 4611 err_at += plen; 4612 } 4613 return (op_err); 4614 break; 4615 } 4616 default: 4617 /* 4618 * we do not recognize the parameter figure out what 4619 * we do. 4620 */ 4621 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype); 4622 if ((ptype & 0x4000) == 0x4000) { 4623 /* Report bit is set?? */ 4624 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n"); 4625 if (op_err == NULL) { 4626 int l_len; 4627 4628 /* Ok need to try to get an mbuf */ 4629 #ifdef INET6 4630 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4631 #else 4632 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4633 #endif 4634 l_len += plen; 4635 l_len += sizeof(struct sctp_paramhdr); 4636 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4637 if (op_err) { 4638 SCTP_BUF_LEN(op_err) = 0; 4639 #ifdef INET6 4640 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4641 #else 4642 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 4643 #endif 4644 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4645 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4646 } 4647 } 4648 if (op_err) { 4649 /* If we have space */ 4650 struct sctp_paramhdr s; 4651 4652 if (err_at % 4) { 4653 uint32_t cpthis = 0; 4654 4655 pad_needed = 4 - (err_at % 4); 4656 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4657 err_at += pad_needed; 4658 } 4659 s.param_type = htons(SCTP_UNRECOG_PARAM); 4660 s.param_length = htons(sizeof(s) + plen); 4661 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4662 err_at += sizeof(s); 4663 if (plen > sizeof(tempbuf)) { 4664 plen = sizeof(tempbuf); 4665 } 4666 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen)); 4667 if (phdr == NULL) { 4668 sctp_m_freem(op_err); 4669 /* 4670 * we are out of memory but 4671 * we still need to have a 4672 * look at what to do (the 4673 * system is in trouble 4674 * though). 4675 */ 4676 op_err = NULL; 4677 goto more_processing; 4678 } 4679 m_copyback(op_err, err_at, plen, (caddr_t)phdr); 4680 err_at += plen; 4681 } 4682 } 4683 more_processing: 4684 if ((ptype & 0x8000) == 0x0000) { 4685 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n"); 4686 return (op_err); 4687 } else { 4688 /* skip this chunk and continue processing */ 4689 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n"); 4690 at += SCTP_SIZE32(plen); 4691 } 4692 break; 4693 4694 } 4695 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 4696 } 4697 return (op_err); 4698 invalid_size: 4699 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n"); 4700 *abort_processing = 1; 4701 if ((op_err == NULL) && phdr) { 4702 int l_len; 4703 4704 #ifdef INET6 4705 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4706 #else 4707 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 4708 #endif 4709 l_len += (2 * sizeof(struct sctp_paramhdr)); 4710 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA); 4711 if (op_err) { 4712 SCTP_BUF_LEN(op_err) = 0; 4713 #ifdef INET6 4714 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 4715 #else 4716 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 4717 #endif 4718 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 4719 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 4720 } 4721 } 4722 if ((op_err) && phdr) { 4723 struct sctp_paramhdr s; 4724 4725 if (err_at % 4) { 4726 uint32_t cpthis = 0; 4727 4728 pad_needed = 4 - (err_at % 4); 4729 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 4730 err_at += pad_needed; 4731 } 4732 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4733 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr)); 4734 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 4735 err_at += sizeof(s); 4736 /* Only copy back the p-hdr that caused the issue */ 4737 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr); 4738 } 4739 return (op_err); 4740 } 4741 4742 static int 4743 sctp_are_there_new_addresses(struct sctp_association *asoc, 4744 struct mbuf *in_initpkt, int iphlen, int offset) 4745 { 4746 /* 4747 * Given a INIT packet, look through the packet to verify that there 4748 * are NO new addresses. As we go through the parameters add reports 4749 * of any un-understood parameters that require an error. Also we 4750 * must return (1) to drop the packet if we see a un-understood 4751 * parameter that tells us to drop the chunk. 4752 */ 4753 struct sockaddr_in sin4, *sa4; 4754 4755 #ifdef INET6 4756 struct sockaddr_in6 sin6, *sa6; 4757 4758 #endif 4759 struct sockaddr *sa_touse; 4760 struct sockaddr *sa; 4761 struct sctp_paramhdr *phdr, params; 4762 struct ip *iph; 4763 4764 #ifdef INET6 4765 struct ip6_hdr *ip6h; 4766 4767 #endif 4768 struct mbuf *mat; 4769 uint16_t ptype, plen; 4770 int err_at; 4771 uint8_t fnd; 4772 struct sctp_nets *net; 4773 4774 memset(&sin4, 0, sizeof(sin4)); 4775 #ifdef INET6 4776 memset(&sin6, 0, sizeof(sin6)); 4777 #endif 4778 sin4.sin_family = AF_INET; 4779 sin4.sin_len = sizeof(sin4); 4780 #ifdef INET6 4781 sin6.sin6_family = AF_INET6; 4782 sin6.sin6_len = sizeof(sin6); 4783 #endif 4784 sa_touse = NULL; 4785 /* First what about the src address of the pkt ? */ 4786 iph = mtod(in_initpkt, struct ip *); 4787 switch (iph->ip_v) { 4788 case IPVERSION: 4789 /* source addr is IPv4 */ 4790 sin4.sin_addr = iph->ip_src; 4791 sa_touse = (struct sockaddr *)&sin4; 4792 break; 4793 #ifdef INET6 4794 case IPV6_VERSION >> 4: 4795 /* source addr is IPv6 */ 4796 ip6h = mtod(in_initpkt, struct ip6_hdr *); 4797 sin6.sin6_addr = ip6h->ip6_src; 4798 sa_touse = (struct sockaddr *)&sin6; 4799 break; 4800 #endif 4801 default: 4802 return (1); 4803 } 4804 4805 fnd = 0; 4806 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4807 sa = (struct sockaddr *)&net->ro._l_addr; 4808 if (sa->sa_family == sa_touse->sa_family) { 4809 if (sa->sa_family == AF_INET) { 4810 sa4 = (struct sockaddr_in *)sa; 4811 if (sa4->sin_addr.s_addr == 4812 sin4.sin_addr.s_addr) { 4813 fnd = 1; 4814 break; 4815 } 4816 } 4817 #ifdef INET6 4818 if (sa->sa_family == AF_INET6) { 4819 sa6 = (struct sockaddr_in6 *)sa; 4820 if (SCTP6_ARE_ADDR_EQUAL(sa6, 4821 &sin6)) { 4822 fnd = 1; 4823 break; 4824 } 4825 } 4826 #endif 4827 } 4828 } 4829 if (fnd == 0) { 4830 /* New address added! no need to look futher. */ 4831 return (1); 4832 } 4833 /* Ok so far lets munge through the rest of the packet */ 4834 mat = in_initpkt; 4835 err_at = 0; 4836 sa_touse = NULL; 4837 offset += sizeof(struct sctp_init_chunk); 4838 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 4839 while (phdr) { 4840 ptype = ntohs(phdr->param_type); 4841 plen = ntohs(phdr->param_length); 4842 if (ptype == SCTP_IPV4_ADDRESS) { 4843 struct sctp_ipv4addr_param *p4, p4_buf; 4844 4845 phdr = sctp_get_next_param(mat, offset, 4846 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 4847 if (plen != sizeof(struct sctp_ipv4addr_param) || 4848 phdr == NULL) { 4849 return (1); 4850 } 4851 p4 = (struct sctp_ipv4addr_param *)phdr; 4852 sin4.sin_addr.s_addr = p4->addr; 4853 sa_touse = (struct sockaddr *)&sin4; 4854 } else if (ptype == SCTP_IPV6_ADDRESS) { 4855 struct sctp_ipv6addr_param *p6, p6_buf; 4856 4857 phdr = sctp_get_next_param(mat, offset, 4858 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 4859 if (plen != sizeof(struct sctp_ipv6addr_param) || 4860 phdr == NULL) { 4861 return (1); 4862 } 4863 p6 = (struct sctp_ipv6addr_param *)phdr; 4864 #ifdef INET6 4865 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 4866 sizeof(p6->addr)); 4867 #endif 4868 sa_touse = (struct sockaddr *)&sin4; 4869 } 4870 if (sa_touse) { 4871 /* ok, sa_touse points to one to check */ 4872 fnd = 0; 4873 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4874 sa = (struct sockaddr *)&net->ro._l_addr; 4875 if (sa->sa_family != sa_touse->sa_family) { 4876 continue; 4877 } 4878 if (sa->sa_family == AF_INET) { 4879 sa4 = (struct sockaddr_in *)sa; 4880 if (sa4->sin_addr.s_addr == 4881 sin4.sin_addr.s_addr) { 4882 fnd = 1; 4883 break; 4884 } 4885 } 4886 #ifdef INET6 4887 if (sa->sa_family == AF_INET6) { 4888 sa6 = (struct sockaddr_in6 *)sa; 4889 if (SCTP6_ARE_ADDR_EQUAL( 4890 sa6, &sin6)) { 4891 fnd = 1; 4892 break; 4893 } 4894 } 4895 #endif 4896 } 4897 if (!fnd) { 4898 /* New addr added! no need to look further */ 4899 return (1); 4900 } 4901 } 4902 offset += SCTP_SIZE32(plen); 4903 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params)); 4904 } 4905 return (0); 4906 } 4907 4908 /* 4909 * Given a MBUF chain that was sent into us containing an INIT. Build a 4910 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 4911 * a pullup to include IPv6/4header, SCTP header and initial part of INIT 4912 * message (i.e. the struct sctp_init_msg). 4913 */ 4914 void 4915 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 4916 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh, 4917 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock) 4918 { 4919 struct sctp_association *asoc; 4920 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last; 4921 struct sctp_init_ack_chunk *initack; 4922 struct sctp_adaptation_layer_indication *ali; 4923 struct sctp_ecn_supported_param *ecn; 4924 struct sctp_prsctp_supported_param *prsctp; 4925 struct sctp_supported_chunk_types_param *pr_supported; 4926 union sctp_sockstore store, store1, *over_addr; 4927 struct sockaddr_in *sin, *to_sin; 4928 4929 #ifdef INET6 4930 struct sockaddr_in6 *sin6, *to_sin6; 4931 4932 #endif 4933 struct ip *iph; 4934 4935 #ifdef INET6 4936 struct ip6_hdr *ip6; 4937 4938 #endif 4939 struct sockaddr *to; 4940 struct sctp_state_cookie stc; 4941 struct sctp_nets *net = NULL; 4942 uint8_t *signature = NULL; 4943 int cnt_inits_to = 0; 4944 uint16_t his_limit, i_want; 4945 int abort_flag, padval; 4946 int num_ext; 4947 int p_len; 4948 int nat_friendly = 0; 4949 struct socket *so; 4950 4951 if (stcb) 4952 asoc = &stcb->asoc; 4953 else 4954 asoc = NULL; 4955 mp_last = NULL; 4956 if ((asoc != NULL) && 4957 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && 4958 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) { 4959 /* new addresses, out of here in non-cookie-wait states */ 4960 /* 4961 * Send a ABORT, we don't add the new address error clause 4962 * though we even set the T bit and copy in the 0 tag.. this 4963 * looks no different than if no listener was present. 4964 */ 4965 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port); 4966 return; 4967 } 4968 abort_flag = 0; 4969 op_err = sctp_arethere_unrecognized_parameters(init_pkt, 4970 (offset + sizeof(struct sctp_init_chunk)), 4971 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly); 4972 if (abort_flag) { 4973 do_a_abort: 4974 sctp_send_abort(init_pkt, iphlen, sh, 4975 init_chk->init.initiate_tag, op_err, vrf_id, port); 4976 return; 4977 } 4978 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 4979 if (m == NULL) { 4980 /* No memory, INIT timer will re-attempt. */ 4981 if (op_err) 4982 sctp_m_freem(op_err); 4983 return; 4984 } 4985 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk); 4986 4987 /* the time I built cookie */ 4988 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered); 4989 4990 /* populate any tie tags */ 4991 if (asoc != NULL) { 4992 /* unlock before tag selections */ 4993 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 4994 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 4995 stc.cookie_life = asoc->cookie_life; 4996 net = asoc->primary_destination; 4997 } else { 4998 stc.tie_tag_my_vtag = 0; 4999 stc.tie_tag_peer_vtag = 0; 5000 /* life I will award this cookie */ 5001 stc.cookie_life = inp->sctp_ep.def_cookie_life; 5002 } 5003 5004 /* copy in the ports for later check */ 5005 stc.myport = sh->dest_port; 5006 stc.peerport = sh->src_port; 5007 5008 /* 5009 * If we wanted to honor cookie life extentions, we would add to 5010 * stc.cookie_life. For now we should NOT honor any extension 5011 */ 5012 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 5013 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 5014 struct inpcb *in_inp; 5015 5016 /* Its a V6 socket */ 5017 in_inp = (struct inpcb *)inp; 5018 stc.ipv6_addr_legal = 1; 5019 /* Now look at the binding flag to see if V4 will be legal */ 5020 if (SCTP_IPV6_V6ONLY(in_inp) == 0) { 5021 stc.ipv4_addr_legal = 1; 5022 } else { 5023 /* V4 addresses are NOT legal on the association */ 5024 stc.ipv4_addr_legal = 0; 5025 } 5026 } else { 5027 /* Its a V4 socket, no - V6 */ 5028 stc.ipv4_addr_legal = 1; 5029 stc.ipv6_addr_legal = 0; 5030 } 5031 5032 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 5033 stc.ipv4_scope = 1; 5034 #else 5035 stc.ipv4_scope = 0; 5036 #endif 5037 /* now for scope setup */ 5038 memset((caddr_t)&store, 0, sizeof(store)); 5039 memset((caddr_t)&store1, 0, sizeof(store1)); 5040 sin = &store.sin; 5041 to_sin = &store1.sin; 5042 #ifdef INET6 5043 sin6 = &store.sin6; 5044 to_sin6 = &store1.sin6; 5045 #endif 5046 iph = mtod(init_pkt, struct ip *); 5047 /* establish the to_addr's */ 5048 switch (iph->ip_v) { 5049 case IPVERSION: 5050 to_sin->sin_port = sh->dest_port; 5051 to_sin->sin_family = AF_INET; 5052 to_sin->sin_len = sizeof(struct sockaddr_in); 5053 to_sin->sin_addr = iph->ip_dst; 5054 break; 5055 #ifdef INET6 5056 case IPV6_VERSION >> 4: 5057 ip6 = mtod(init_pkt, struct ip6_hdr *); 5058 to_sin6->sin6_addr = ip6->ip6_dst; 5059 to_sin6->sin6_scope_id = 0; 5060 to_sin6->sin6_port = sh->dest_port; 5061 to_sin6->sin6_family = AF_INET6; 5062 to_sin6->sin6_len = sizeof(struct sockaddr_in6); 5063 break; 5064 #endif 5065 default: 5066 goto do_a_abort; 5067 break; 5068 }; 5069 5070 if (net == NULL) { 5071 to = (struct sockaddr *)&store; 5072 switch (iph->ip_v) { 5073 case IPVERSION: 5074 { 5075 sin->sin_family = AF_INET; 5076 sin->sin_len = sizeof(struct sockaddr_in); 5077 sin->sin_port = sh->src_port; 5078 sin->sin_addr = iph->ip_src; 5079 /* lookup address */ 5080 stc.address[0] = sin->sin_addr.s_addr; 5081 stc.address[1] = 0; 5082 stc.address[2] = 0; 5083 stc.address[3] = 0; 5084 stc.addr_type = SCTP_IPV4_ADDRESS; 5085 /* local from address */ 5086 stc.laddress[0] = to_sin->sin_addr.s_addr; 5087 stc.laddress[1] = 0; 5088 stc.laddress[2] = 0; 5089 stc.laddress[3] = 0; 5090 stc.laddr_type = SCTP_IPV4_ADDRESS; 5091 /* scope_id is only for v6 */ 5092 stc.scope_id = 0; 5093 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE 5094 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 5095 stc.ipv4_scope = 1; 5096 } 5097 #else 5098 stc.ipv4_scope = 1; 5099 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 5100 /* Must use the address in this case */ 5101 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) { 5102 stc.loopback_scope = 1; 5103 stc.ipv4_scope = 1; 5104 stc.site_scope = 1; 5105 stc.local_scope = 0; 5106 } 5107 break; 5108 } 5109 #ifdef INET6 5110 case IPV6_VERSION >> 4: 5111 { 5112 ip6 = mtod(init_pkt, struct ip6_hdr *); 5113 sin6->sin6_family = AF_INET6; 5114 sin6->sin6_len = sizeof(struct sockaddr_in6); 5115 sin6->sin6_port = sh->src_port; 5116 sin6->sin6_addr = ip6->ip6_src; 5117 /* lookup address */ 5118 memcpy(&stc.address, &sin6->sin6_addr, 5119 sizeof(struct in6_addr)); 5120 sin6->sin6_scope_id = 0; 5121 stc.addr_type = SCTP_IPV6_ADDRESS; 5122 stc.scope_id = 0; 5123 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) { 5124 /* 5125 * FIX ME: does this have scope from 5126 * rcvif? 5127 */ 5128 (void)sa6_recoverscope(sin6); 5129 stc.scope_id = sin6->sin6_scope_id; 5130 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 5131 stc.loopback_scope = 1; 5132 stc.local_scope = 0; 5133 stc.site_scope = 1; 5134 stc.ipv4_scope = 1; 5135 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 5136 /* 5137 * If the new destination is a 5138 * LINK_LOCAL we must have common 5139 * both site and local scope. Don't 5140 * set local scope though since we 5141 * must depend on the source to be 5142 * added implicitly. We cannot 5143 * assure just because we share one 5144 * link that all links are common. 5145 */ 5146 stc.local_scope = 0; 5147 stc.site_scope = 1; 5148 stc.ipv4_scope = 1; 5149 /* 5150 * we start counting for the private 5151 * address stuff at 1. since the 5152 * link local we source from won't 5153 * show up in our scoped count. 5154 */ 5155 cnt_inits_to = 1; 5156 /* 5157 * pull out the scope_id from 5158 * incoming pkt 5159 */ 5160 /* 5161 * FIX ME: does this have scope from 5162 * rcvif? 5163 */ 5164 (void)sa6_recoverscope(sin6); 5165 stc.scope_id = sin6->sin6_scope_id; 5166 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 5167 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 5168 /* 5169 * If the new destination is 5170 * SITE_LOCAL then we must have site 5171 * scope in common. 5172 */ 5173 stc.site_scope = 1; 5174 } 5175 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr)); 5176 stc.laddr_type = SCTP_IPV6_ADDRESS; 5177 break; 5178 } 5179 #endif 5180 default: 5181 /* TSNH */ 5182 goto do_a_abort; 5183 break; 5184 } 5185 } else { 5186 /* set the scope per the existing tcb */ 5187 5188 #ifdef INET6 5189 struct sctp_nets *lnet; 5190 5191 #endif 5192 5193 stc.loopback_scope = asoc->loopback_scope; 5194 stc.ipv4_scope = asoc->ipv4_local_scope; 5195 stc.site_scope = asoc->site_scope; 5196 stc.local_scope = asoc->local_scope; 5197 #ifdef INET6 5198 /* Why do we not consider IPv4 LL addresses? */ 5199 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 5200 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 5201 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 5202 /* 5203 * if we have a LL address, start 5204 * counting at 1. 5205 */ 5206 cnt_inits_to = 1; 5207 } 5208 } 5209 } 5210 #endif 5211 /* use the net pointer */ 5212 to = (struct sockaddr *)&net->ro._l_addr; 5213 switch (to->sa_family) { 5214 case AF_INET: 5215 sin = (struct sockaddr_in *)to; 5216 stc.address[0] = sin->sin_addr.s_addr; 5217 stc.address[1] = 0; 5218 stc.address[2] = 0; 5219 stc.address[3] = 0; 5220 stc.addr_type = SCTP_IPV4_ADDRESS; 5221 if (net->src_addr_selected == 0) { 5222 /* 5223 * strange case here, the INIT should have 5224 * did the selection. 5225 */ 5226 net->ro._s_addr = sctp_source_address_selection(inp, 5227 stcb, (sctp_route_t *) & net->ro, 5228 net, 0, vrf_id); 5229 if (net->ro._s_addr == NULL) 5230 return; 5231 5232 net->src_addr_selected = 1; 5233 5234 } 5235 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; 5236 stc.laddress[1] = 0; 5237 stc.laddress[2] = 0; 5238 stc.laddress[3] = 0; 5239 stc.laddr_type = SCTP_IPV4_ADDRESS; 5240 break; 5241 #ifdef INET6 5242 case AF_INET6: 5243 sin6 = (struct sockaddr_in6 *)to; 5244 memcpy(&stc.address, &sin6->sin6_addr, 5245 sizeof(struct in6_addr)); 5246 stc.addr_type = SCTP_IPV6_ADDRESS; 5247 if (net->src_addr_selected == 0) { 5248 /* 5249 * strange case here, the INIT should have 5250 * did the selection. 5251 */ 5252 net->ro._s_addr = sctp_source_address_selection(inp, 5253 stcb, (sctp_route_t *) & net->ro, 5254 net, 0, vrf_id); 5255 if (net->ro._s_addr == NULL) 5256 return; 5257 5258 net->src_addr_selected = 1; 5259 } 5260 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, 5261 sizeof(struct in6_addr)); 5262 stc.laddr_type = SCTP_IPV6_ADDRESS; 5263 break; 5264 #endif 5265 } 5266 } 5267 /* Now lets put the SCTP header in place */ 5268 initack = mtod(m, struct sctp_init_ack_chunk *); 5269 /* Save it off for quick ref */ 5270 stc.peers_vtag = init_chk->init.initiate_tag; 5271 /* who are we */ 5272 memcpy(stc.identification, SCTP_VERSION_STRING, 5273 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 5274 /* now the chunk header */ 5275 initack->ch.chunk_type = SCTP_INITIATION_ACK; 5276 initack->ch.chunk_flags = 0; 5277 /* fill in later from mbuf we build */ 5278 initack->ch.chunk_length = 0; 5279 /* place in my tag */ 5280 if ((asoc != NULL) && 5281 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 5282 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || 5283 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { 5284 /* re-use the v-tags and init-seq here */ 5285 initack->init.initiate_tag = htonl(asoc->my_vtag); 5286 initack->init.initial_tsn = htonl(asoc->init_seq_number); 5287 } else { 5288 uint32_t vtag, itsn; 5289 5290 if (hold_inp_lock) { 5291 SCTP_INP_INCR_REF(inp); 5292 SCTP_INP_RUNLOCK(inp); 5293 } 5294 if (asoc) { 5295 atomic_add_int(&asoc->refcnt, 1); 5296 SCTP_TCB_UNLOCK(stcb); 5297 new_tag: 5298 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); 5299 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) { 5300 /* 5301 * Got a duplicate vtag on some guy behind a 5302 * nat make sure we don't use it. 5303 */ 5304 goto new_tag; 5305 } 5306 initack->init.initiate_tag = htonl(vtag); 5307 /* get a TSN to use too */ 5308 itsn = sctp_select_initial_TSN(&inp->sctp_ep); 5309 initack->init.initial_tsn = htonl(itsn); 5310 SCTP_TCB_LOCK(stcb); 5311 atomic_add_int(&asoc->refcnt, -1); 5312 } else { 5313 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); 5314 initack->init.initiate_tag = htonl(vtag); 5315 /* get a TSN to use too */ 5316 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 5317 } 5318 if (hold_inp_lock) { 5319 SCTP_INP_RLOCK(inp); 5320 SCTP_INP_DECR_REF(inp); 5321 } 5322 } 5323 /* save away my tag to */ 5324 stc.my_vtag = initack->init.initiate_tag; 5325 5326 /* set up some of the credits. */ 5327 so = inp->sctp_socket; 5328 if (so == NULL) { 5329 /* memory problem */ 5330 sctp_m_freem(m); 5331 return; 5332 } else { 5333 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND)); 5334 } 5335 /* set what I want */ 5336 his_limit = ntohs(init_chk->init.num_inbound_streams); 5337 /* choose what I want */ 5338 if (asoc != NULL) { 5339 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { 5340 i_want = asoc->streamoutcnt; 5341 } else { 5342 i_want = inp->sctp_ep.pre_open_stream_count; 5343 } 5344 } else { 5345 i_want = inp->sctp_ep.pre_open_stream_count; 5346 } 5347 if (his_limit < i_want) { 5348 /* I Want more :< */ 5349 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams; 5350 } else { 5351 /* I can have what I want :> */ 5352 initack->init.num_outbound_streams = htons(i_want); 5353 } 5354 /* tell him his limt. */ 5355 initack->init.num_inbound_streams = 5356 htons(inp->sctp_ep.max_open_streams_intome); 5357 5358 /* adaptation layer indication parameter */ 5359 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack)); 5360 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 5361 ali->ph.param_length = htons(sizeof(*ali)); 5362 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 5363 SCTP_BUF_LEN(m) += sizeof(*ali); 5364 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali)); 5365 5366 /* ECN parameter */ 5367 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) || 5368 (inp->sctp_ecn_enable == 1)) { 5369 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 5370 ecn->ph.param_length = htons(sizeof(*ecn)); 5371 SCTP_BUF_LEN(m) += sizeof(*ecn); 5372 5373 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 5374 sizeof(*ecn)); 5375 } else { 5376 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 5377 } 5378 /* And now tell the peer we do pr-sctp */ 5379 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 5380 prsctp->ph.param_length = htons(sizeof(*prsctp)); 5381 SCTP_BUF_LEN(m) += sizeof(*prsctp); 5382 if (nat_friendly) { 5383 /* Add NAT friendly parameter */ 5384 struct sctp_paramhdr *ph; 5385 5386 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 5387 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); 5388 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 5389 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr); 5390 } 5391 /* And now tell the peer we do all the extensions */ 5392 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 5393 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 5394 num_ext = 0; 5395 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 5396 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 5397 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 5398 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 5399 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 5400 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) 5401 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 5402 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) 5403 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; 5404 p_len = sizeof(*pr_supported) + num_ext; 5405 pr_supported->ph.param_length = htons(p_len); 5406 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 5407 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 5408 5409 /* add authentication parameters */ 5410 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { 5411 struct sctp_auth_random *randp; 5412 struct sctp_auth_hmac_algo *hmacs; 5413 struct sctp_auth_chunk_list *chunks; 5414 uint16_t random_len; 5415 5416 /* generate and add RANDOM parameter */ 5417 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT; 5418 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 5419 randp->ph.param_type = htons(SCTP_RANDOM); 5420 p_len = sizeof(*randp) + random_len; 5421 randp->ph.param_length = htons(p_len); 5422 SCTP_READ_RANDOM(randp->random_data, random_len); 5423 /* zero out any padding required */ 5424 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len); 5425 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 5426 5427 /* add HMAC_ALGO parameter */ 5428 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 5429 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 5430 (uint8_t *) hmacs->hmac_ids); 5431 if (p_len > 0) { 5432 p_len += sizeof(*hmacs); 5433 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 5434 hmacs->ph.param_length = htons(p_len); 5435 /* zero out any padding required */ 5436 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 5437 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 5438 } 5439 /* add CHUNKS parameter */ 5440 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 5441 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 5442 chunks->chunk_types); 5443 if (p_len > 0) { 5444 p_len += sizeof(*chunks); 5445 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 5446 chunks->ph.param_length = htons(p_len); 5447 /* zero out any padding required */ 5448 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 5449 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 5450 } 5451 } 5452 m_at = m; 5453 /* now the addresses */ 5454 { 5455 struct sctp_scoping scp; 5456 5457 /* 5458 * To optimize this we could put the scoping stuff into a 5459 * structure and remove the individual uint8's from the stc 5460 * structure. Then we could just sifa in the address within 5461 * the stc.. but for now this is a quick hack to get the 5462 * address stuff teased apart. 5463 */ 5464 scp.ipv4_addr_legal = stc.ipv4_addr_legal; 5465 scp.ipv6_addr_legal = stc.ipv6_addr_legal; 5466 scp.loopback_scope = stc.loopback_scope; 5467 scp.ipv4_local_scope = stc.ipv4_scope; 5468 scp.local_scope = stc.local_scope; 5469 scp.site_scope = stc.site_scope; 5470 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to); 5471 } 5472 5473 /* tack on the operational error if present */ 5474 if (op_err) { 5475 struct mbuf *ol; 5476 int llen; 5477 5478 llen = 0; 5479 ol = op_err; 5480 while (ol) { 5481 llen += SCTP_BUF_LEN(ol); 5482 ol = SCTP_BUF_NEXT(ol); 5483 } 5484 if (llen % 4) { 5485 /* must add a pad to the param */ 5486 uint32_t cpthis = 0; 5487 int padlen; 5488 5489 padlen = 4 - (llen % 4); 5490 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis); 5491 } 5492 while (SCTP_BUF_NEXT(m_at) != NULL) { 5493 m_at = SCTP_BUF_NEXT(m_at); 5494 } 5495 SCTP_BUF_NEXT(m_at) = op_err; 5496 while (SCTP_BUF_NEXT(m_at) != NULL) { 5497 m_at = SCTP_BUF_NEXT(m_at); 5498 } 5499 } 5500 /* pre-calulate the size and update pkt header and chunk header */ 5501 p_len = 0; 5502 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 5503 p_len += SCTP_BUF_LEN(m_tmp); 5504 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5505 /* m_tmp should now point to last one */ 5506 break; 5507 } 5508 } 5509 5510 /* Now we must build a cookie */ 5511 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 0, &stc, &signature); 5512 if (m_cookie == NULL) { 5513 /* memory problem */ 5514 sctp_m_freem(m); 5515 return; 5516 } 5517 /* Now append the cookie to the end and update the space/size */ 5518 SCTP_BUF_NEXT(m_tmp) = m_cookie; 5519 5520 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 5521 p_len += SCTP_BUF_LEN(m_tmp); 5522 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5523 /* m_tmp should now point to last one */ 5524 mp_last = m_tmp; 5525 break; 5526 } 5527 } 5528 /* 5529 * Place in the size, but we don't include the last pad (if any) in 5530 * the INIT-ACK. 5531 */ 5532 initack->ch.chunk_length = htons(p_len); 5533 5534 /* 5535 * Time to sign the cookie, we don't sign over the cookie signature 5536 * though thus we set trailer. 5537 */ 5538 (void)sctp_hmac_m(SCTP_HMAC, 5539 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 5540 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr), 5541 (uint8_t *) signature, SCTP_SIGNATURE_SIZE); 5542 /* 5543 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 5544 * here since the timer will drive a retranmission. 5545 */ 5546 padval = p_len % 4; 5547 if ((padval) && (mp_last)) { 5548 /* see my previous comments on mp_last */ 5549 int ret; 5550 5551 ret = sctp_add_pad_tombuf(mp_last, (4 - padval)); 5552 if (ret) { 5553 /* Houston we have a problem, no space */ 5554 sctp_m_freem(m); 5555 return; 5556 } 5557 p_len += padval; 5558 } 5559 if (stc.loopback_scope) { 5560 over_addr = &store1; 5561 } else { 5562 over_addr = NULL; 5563 } 5564 5565 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 5566 0, NULL, 0, 5567 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag, 5568 port, SCTP_SO_NOT_LOCKED, over_addr, init_pkt); 5569 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5570 } 5571 5572 5573 static void 5574 sctp_prune_prsctp(struct sctp_tcb *stcb, 5575 struct sctp_association *asoc, 5576 struct sctp_sndrcvinfo *srcv, 5577 int dataout) 5578 { 5579 int freed_spc = 0; 5580 struct sctp_tmit_chunk *chk, *nchk; 5581 5582 SCTP_TCB_LOCK_ASSERT(stcb); 5583 if ((asoc->peer_supports_prsctp) && 5584 (asoc->sent_queue_cnt_removeable > 0)) { 5585 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 5586 /* 5587 * Look for chunks marked with the PR_SCTP flag AND 5588 * the buffer space flag. If the one being sent is 5589 * equal or greater priority then purge the old one 5590 * and free some space. 5591 */ 5592 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 5593 /* 5594 * This one is PR-SCTP AND buffer space 5595 * limited type 5596 */ 5597 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 5598 /* 5599 * Lower numbers equates to higher 5600 * priority so if the one we are 5601 * looking at has a larger or equal 5602 * priority we want to drop the data 5603 * and NOT retransmit it. 5604 */ 5605 if (chk->data) { 5606 /* 5607 * We release the book_size 5608 * if the mbuf is here 5609 */ 5610 int ret_spc; 5611 int cause; 5612 5613 if (chk->sent > SCTP_DATAGRAM_UNSENT) 5614 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT; 5615 else 5616 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT; 5617 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 5618 cause, 5619 SCTP_SO_LOCKED); 5620 freed_spc += ret_spc; 5621 if (freed_spc >= dataout) { 5622 return; 5623 } 5624 } /* if chunk was present */ 5625 } /* if of sufficent priority */ 5626 } /* if chunk has enabled */ 5627 } /* tailqforeach */ 5628 5629 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 5630 /* Here we must move to the sent queue and mark */ 5631 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 5632 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 5633 if (chk->data) { 5634 /* 5635 * We release the book_size 5636 * if the mbuf is here 5637 */ 5638 int ret_spc; 5639 5640 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 5641 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT, 5642 SCTP_SO_LOCKED); 5643 5644 freed_spc += ret_spc; 5645 if (freed_spc >= dataout) { 5646 return; 5647 } 5648 } /* end if chk->data */ 5649 } /* end if right class */ 5650 } /* end if chk pr-sctp */ 5651 } /* tailqforeachsafe (chk) */ 5652 } /* if enabled in asoc */ 5653 } 5654 5655 int 5656 sctp_get_frag_point(struct sctp_tcb *stcb, 5657 struct sctp_association *asoc) 5658 { 5659 int siz, ovh; 5660 5661 /* 5662 * For endpoints that have both v6 and v4 addresses we must reserve 5663 * room for the ipv6 header, for those that are only dealing with V4 5664 * we use a larger frag point. 5665 */ 5666 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 5667 ovh = SCTP_MED_OVERHEAD; 5668 } else { 5669 ovh = SCTP_MED_V4_OVERHEAD; 5670 } 5671 5672 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu) 5673 siz = asoc->smallest_mtu - ovh; 5674 else 5675 siz = (stcb->asoc.sctp_frag_point - ovh); 5676 /* 5677 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { 5678 */ 5679 /* A data chunk MUST fit in a cluster */ 5680 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ 5681 /* } */ 5682 5683 /* adjust for an AUTH chunk if DATA requires auth */ 5684 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) 5685 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 5686 5687 if (siz % 4) { 5688 /* make it an even word boundary please */ 5689 siz -= (siz % 4); 5690 } 5691 return (siz); 5692 } 5693 5694 static void 5695 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp) 5696 { 5697 sp->pr_sctp_on = 0; 5698 /* 5699 * We assume that the user wants PR_SCTP_TTL if the user provides a 5700 * positive lifetime but does not specify any PR_SCTP policy. This 5701 * is a BAD assumption and causes problems at least with the 5702 * U-Vancovers MPI folks. I will change this to be no policy means 5703 * NO PR-SCTP. 5704 */ 5705 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 5706 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 5707 sp->pr_sctp_on = 1; 5708 } else { 5709 return; 5710 } 5711 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 5712 case CHUNK_FLAGS_PR_SCTP_BUF: 5713 /* 5714 * Time to live is a priority stored in tv_sec when doing 5715 * the buffer drop thing. 5716 */ 5717 sp->ts.tv_sec = sp->timetolive; 5718 sp->ts.tv_usec = 0; 5719 break; 5720 case CHUNK_FLAGS_PR_SCTP_TTL: 5721 { 5722 struct timeval tv; 5723 5724 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 5725 tv.tv_sec = sp->timetolive / 1000; 5726 tv.tv_usec = (sp->timetolive * 1000) % 1000000; 5727 /* 5728 * TODO sctp_constants.h needs alternative time 5729 * macros when _KERNEL is undefined. 5730 */ 5731 timevaladd(&sp->ts, &tv); 5732 } 5733 break; 5734 case CHUNK_FLAGS_PR_SCTP_RTX: 5735 /* 5736 * Time to live is a the number or retransmissions stored in 5737 * tv_sec. 5738 */ 5739 sp->ts.tv_sec = sp->timetolive; 5740 sp->ts.tv_usec = 0; 5741 break; 5742 default: 5743 SCTPDBG(SCTP_DEBUG_USRREQ1, 5744 "Unknown PR_SCTP policy %u.\n", 5745 PR_SCTP_POLICY(sp->sinfo_flags)); 5746 break; 5747 } 5748 } 5749 5750 static int 5751 sctp_msg_append(struct sctp_tcb *stcb, 5752 struct sctp_nets *net, 5753 struct mbuf *m, 5754 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) 5755 { 5756 int error = 0, holds_lock; 5757 struct mbuf *at; 5758 struct sctp_stream_queue_pending *sp = NULL; 5759 struct sctp_stream_out *strm; 5760 5761 /* 5762 * Given an mbuf chain, put it into the association send queue and 5763 * place it on the wheel 5764 */ 5765 holds_lock = hold_stcb_lock; 5766 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 5767 /* Invalid stream number */ 5768 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 5769 error = EINVAL; 5770 goto out_now; 5771 } 5772 if ((stcb->asoc.stream_locked) && 5773 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 5774 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 5775 error = EINVAL; 5776 goto out_now; 5777 } 5778 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 5779 /* Now can we send this? */ 5780 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || 5781 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 5782 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 5783 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 5784 /* got data while shutting down */ 5785 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 5786 error = ECONNRESET; 5787 goto out_now; 5788 } 5789 sctp_alloc_a_strmoq(stcb, sp); 5790 if (sp == NULL) { 5791 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 5792 error = ENOMEM; 5793 goto out_now; 5794 } 5795 sp->sinfo_flags = srcv->sinfo_flags; 5796 sp->timetolive = srcv->sinfo_timetolive; 5797 sp->ppid = srcv->sinfo_ppid; 5798 sp->context = srcv->sinfo_context; 5799 sp->strseq = 0; 5800 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 5801 sp->net = net; 5802 atomic_add_int(&sp->net->ref_count, 1); 5803 } else { 5804 sp->net = NULL; 5805 } 5806 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 5807 sp->stream = srcv->sinfo_stream; 5808 sp->msg_is_complete = 1; 5809 sp->sender_all_done = 1; 5810 sp->some_taken = 0; 5811 sp->data = m; 5812 sp->tail_mbuf = NULL; 5813 sp->length = 0; 5814 at = m; 5815 sctp_set_prsctp_policy(sp); 5816 /* 5817 * We could in theory (for sendall) sifa the length in, but we would 5818 * still have to hunt through the chain since we need to setup the 5819 * tail_mbuf 5820 */ 5821 while (at) { 5822 if (SCTP_BUF_NEXT(at) == NULL) 5823 sp->tail_mbuf = at; 5824 sp->length += SCTP_BUF_LEN(at); 5825 at = SCTP_BUF_NEXT(at); 5826 } 5827 SCTP_TCB_SEND_LOCK(stcb); 5828 sctp_snd_sb_alloc(stcb, sp->length); 5829 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); 5830 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 5831 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 5832 sp->strseq = strm->next_sequence_sent; 5833 strm->next_sequence_sent++; 5834 } 5835 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1); 5836 m = NULL; 5837 SCTP_TCB_SEND_UNLOCK(stcb); 5838 out_now: 5839 if (m) { 5840 sctp_m_freem(m); 5841 } 5842 return (error); 5843 } 5844 5845 5846 static struct mbuf * 5847 sctp_copy_mbufchain(struct mbuf *clonechain, 5848 struct mbuf *outchain, 5849 struct mbuf **endofchain, 5850 int can_take_mbuf, 5851 int sizeofcpy, 5852 uint8_t copy_by_ref) 5853 { 5854 struct mbuf *m; 5855 struct mbuf *appendchain; 5856 caddr_t cp; 5857 int len; 5858 5859 if (endofchain == NULL) { 5860 /* error */ 5861 error_out: 5862 if (outchain) 5863 sctp_m_freem(outchain); 5864 return (NULL); 5865 } 5866 if (can_take_mbuf) { 5867 appendchain = clonechain; 5868 } else { 5869 if (!copy_by_ref && 5870 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN))) 5871 ) { 5872 /* Its not in a cluster */ 5873 if (*endofchain == NULL) { 5874 /* lets get a mbuf cluster */ 5875 if (outchain == NULL) { 5876 /* This is the general case */ 5877 new_mbuf: 5878 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 5879 if (outchain == NULL) { 5880 goto error_out; 5881 } 5882 SCTP_BUF_LEN(outchain) = 0; 5883 *endofchain = outchain; 5884 /* get the prepend space */ 5885 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4)); 5886 } else { 5887 /* 5888 * We really should not get a NULL 5889 * in endofchain 5890 */ 5891 /* find end */ 5892 m = outchain; 5893 while (m) { 5894 if (SCTP_BUF_NEXT(m) == NULL) { 5895 *endofchain = m; 5896 break; 5897 } 5898 m = SCTP_BUF_NEXT(m); 5899 } 5900 /* sanity */ 5901 if (*endofchain == NULL) { 5902 /* 5903 * huh, TSNH XXX maybe we 5904 * should panic 5905 */ 5906 sctp_m_freem(outchain); 5907 goto new_mbuf; 5908 } 5909 } 5910 /* get the new end of length */ 5911 len = M_TRAILINGSPACE(*endofchain); 5912 } else { 5913 /* how much is left at the end? */ 5914 len = M_TRAILINGSPACE(*endofchain); 5915 } 5916 /* Find the end of the data, for appending */ 5917 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain))); 5918 5919 /* Now lets copy it out */ 5920 if (len >= sizeofcpy) { 5921 /* It all fits, copy it in */ 5922 m_copydata(clonechain, 0, sizeofcpy, cp); 5923 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 5924 } else { 5925 /* fill up the end of the chain */ 5926 if (len > 0) { 5927 m_copydata(clonechain, 0, len, cp); 5928 SCTP_BUF_LEN((*endofchain)) += len; 5929 /* now we need another one */ 5930 sizeofcpy -= len; 5931 } 5932 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER); 5933 if (m == NULL) { 5934 /* We failed */ 5935 goto error_out; 5936 } 5937 SCTP_BUF_NEXT((*endofchain)) = m; 5938 *endofchain = m; 5939 cp = mtod((*endofchain), caddr_t); 5940 m_copydata(clonechain, len, sizeofcpy, cp); 5941 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 5942 } 5943 return (outchain); 5944 } else { 5945 /* copy the old fashion way */ 5946 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT); 5947 #ifdef SCTP_MBUF_LOGGING 5948 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5949 struct mbuf *mat; 5950 5951 mat = appendchain; 5952 while (mat) { 5953 if (SCTP_BUF_IS_EXTENDED(mat)) { 5954 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 5955 } 5956 mat = SCTP_BUF_NEXT(mat); 5957 } 5958 } 5959 #endif 5960 } 5961 } 5962 if (appendchain == NULL) { 5963 /* error */ 5964 if (outchain) 5965 sctp_m_freem(outchain); 5966 return (NULL); 5967 } 5968 if (outchain) { 5969 /* tack on to the end */ 5970 if (*endofchain != NULL) { 5971 SCTP_BUF_NEXT(((*endofchain))) = appendchain; 5972 } else { 5973 m = outchain; 5974 while (m) { 5975 if (SCTP_BUF_NEXT(m) == NULL) { 5976 SCTP_BUF_NEXT(m) = appendchain; 5977 break; 5978 } 5979 m = SCTP_BUF_NEXT(m); 5980 } 5981 } 5982 /* 5983 * save off the end and update the end-chain postion 5984 */ 5985 m = appendchain; 5986 while (m) { 5987 if (SCTP_BUF_NEXT(m) == NULL) { 5988 *endofchain = m; 5989 break; 5990 } 5991 m = SCTP_BUF_NEXT(m); 5992 } 5993 return (outchain); 5994 } else { 5995 /* save off the end and update the end-chain postion */ 5996 m = appendchain; 5997 while (m) { 5998 if (SCTP_BUF_NEXT(m) == NULL) { 5999 *endofchain = m; 6000 break; 6001 } 6002 m = SCTP_BUF_NEXT(m); 6003 } 6004 return (appendchain); 6005 } 6006 } 6007 6008 int 6009 sctp_med_chunk_output(struct sctp_inpcb *inp, 6010 struct sctp_tcb *stcb, 6011 struct sctp_association *asoc, 6012 int *num_out, 6013 int *reason_code, 6014 int control_only, int from_where, 6015 struct timeval *now, int *now_filled, int frag_point, int so_locked 6016 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 6017 SCTP_UNUSED 6018 #endif 6019 ); 6020 6021 static void 6022 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 6023 uint32_t val) 6024 { 6025 struct sctp_copy_all *ca; 6026 struct mbuf *m; 6027 int ret = 0; 6028 int added_control = 0; 6029 int un_sent, do_chunk_output = 1; 6030 struct sctp_association *asoc; 6031 6032 ca = (struct sctp_copy_all *)ptr; 6033 if (ca->m == NULL) { 6034 return; 6035 } 6036 if (ca->inp != inp) { 6037 /* TSNH */ 6038 return; 6039 } 6040 if ((ca->m) && ca->sndlen) { 6041 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT); 6042 if (m == NULL) { 6043 /* can't copy so we are done */ 6044 ca->cnt_failed++; 6045 return; 6046 } 6047 #ifdef SCTP_MBUF_LOGGING 6048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6049 struct mbuf *mat; 6050 6051 mat = m; 6052 while (mat) { 6053 if (SCTP_BUF_IS_EXTENDED(mat)) { 6054 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 6055 } 6056 mat = SCTP_BUF_NEXT(mat); 6057 } 6058 } 6059 #endif 6060 } else { 6061 m = NULL; 6062 } 6063 SCTP_TCB_LOCK_ASSERT(stcb); 6064 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 6065 /* Abort this assoc with m as the user defined reason */ 6066 if (m) { 6067 struct sctp_paramhdr *ph; 6068 6069 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT); 6070 if (m) { 6071 ph = mtod(m, struct sctp_paramhdr *); 6072 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 6073 ph->param_length = htons(ca->sndlen); 6074 } 6075 /* 6076 * We add one here to keep the assoc from 6077 * dis-appearing on us. 6078 */ 6079 atomic_add_int(&stcb->asoc.refcnt, 1); 6080 sctp_abort_an_association(inp, stcb, 6081 SCTP_RESPONSE_TO_USER_REQ, 6082 m, SCTP_SO_NOT_LOCKED); 6083 /* 6084 * sctp_abort_an_association calls sctp_free_asoc() 6085 * free association will NOT free it since we 6086 * incremented the refcnt .. we do this to prevent 6087 * it being freed and things getting tricky since we 6088 * could end up (from free_asoc) calling inpcb_free 6089 * which would get a recursive lock call to the 6090 * iterator lock.. But as a consequence of that the 6091 * stcb will return to us un-locked.. since 6092 * free_asoc returns with either no TCB or the TCB 6093 * unlocked, we must relock.. to unlock in the 6094 * iterator timer :-0 6095 */ 6096 SCTP_TCB_LOCK(stcb); 6097 atomic_add_int(&stcb->asoc.refcnt, -1); 6098 goto no_chunk_output; 6099 } 6100 } else { 6101 if (m) { 6102 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m, 6103 &ca->sndrcv, 1); 6104 } 6105 asoc = &stcb->asoc; 6106 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 6107 /* shutdown this assoc */ 6108 int cnt; 6109 6110 cnt = sctp_is_there_unsent_data(stcb); 6111 6112 if (TAILQ_EMPTY(&asoc->send_queue) && 6113 TAILQ_EMPTY(&asoc->sent_queue) && 6114 (cnt == 0)) { 6115 if (asoc->locked_on_sending) { 6116 goto abort_anyway; 6117 } 6118 /* 6119 * there is nothing queued to send, so I'm 6120 * done... 6121 */ 6122 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 6123 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 6124 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 6125 /* 6126 * only send SHUTDOWN the first time 6127 * through 6128 */ 6129 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 6130 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 6131 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 6132 } 6133 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 6134 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 6135 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 6136 asoc->primary_destination); 6137 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 6138 asoc->primary_destination); 6139 added_control = 1; 6140 do_chunk_output = 0; 6141 } 6142 } else { 6143 /* 6144 * we still got (or just got) data to send, 6145 * so set SHUTDOWN_PENDING 6146 */ 6147 /* 6148 * XXX sockets draft says that SCTP_EOF 6149 * should be sent with no data. currently, 6150 * we will allow user data to be sent first 6151 * and move to SHUTDOWN-PENDING 6152 */ 6153 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 6154 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 6155 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 6156 if (asoc->locked_on_sending) { 6157 /* 6158 * Locked to send out the 6159 * data 6160 */ 6161 struct sctp_stream_queue_pending *sp; 6162 6163 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 6164 if (sp) { 6165 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 6166 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 6167 } 6168 } 6169 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 6170 if (TAILQ_EMPTY(&asoc->send_queue) && 6171 TAILQ_EMPTY(&asoc->sent_queue) && 6172 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 6173 abort_anyway: 6174 atomic_add_int(&stcb->asoc.refcnt, 1); 6175 sctp_abort_an_association(stcb->sctp_ep, stcb, 6176 SCTP_RESPONSE_TO_USER_REQ, 6177 NULL, SCTP_SO_NOT_LOCKED); 6178 atomic_add_int(&stcb->asoc.refcnt, -1); 6179 goto no_chunk_output; 6180 } 6181 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 6182 asoc->primary_destination); 6183 } 6184 } 6185 6186 } 6187 } 6188 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 6189 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 6190 6191 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 6192 (stcb->asoc.total_flight > 0) && 6193 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) 6194 ) { 6195 do_chunk_output = 0; 6196 } 6197 if (do_chunk_output) 6198 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED); 6199 else if (added_control) { 6200 int num_out = 0, reason = 0, now_filled = 0; 6201 struct timeval now; 6202 int frag_point; 6203 6204 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 6205 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 6206 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED); 6207 } 6208 no_chunk_output: 6209 if (ret) { 6210 ca->cnt_failed++; 6211 } else { 6212 ca->cnt_sent++; 6213 } 6214 } 6215 6216 static void 6217 sctp_sendall_completes(void *ptr, uint32_t val) 6218 { 6219 struct sctp_copy_all *ca; 6220 6221 ca = (struct sctp_copy_all *)ptr; 6222 /* 6223 * Do a notify here? Kacheong suggests that the notify be done at 6224 * the send time.. so you would push up a notification if any send 6225 * failed. Don't know if this is feasable since the only failures we 6226 * have is "memory" related and if you cannot get an mbuf to send 6227 * the data you surely can't get an mbuf to send up to notify the 6228 * user you can't send the data :-> 6229 */ 6230 6231 /* now free everything */ 6232 sctp_m_freem(ca->m); 6233 SCTP_FREE(ca, SCTP_M_COPYAL); 6234 } 6235 6236 6237 #define MC_ALIGN(m, len) do { \ 6238 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ 6239 } while (0) 6240 6241 6242 6243 static struct mbuf * 6244 sctp_copy_out_all(struct uio *uio, int len) 6245 { 6246 struct mbuf *ret, *at; 6247 int left, willcpy, cancpy, error; 6248 6249 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA); 6250 if (ret == NULL) { 6251 /* TSNH */ 6252 return (NULL); 6253 } 6254 left = len; 6255 SCTP_BUF_LEN(ret) = 0; 6256 /* save space for the data chunk header */ 6257 cancpy = M_TRAILINGSPACE(ret); 6258 willcpy = min(cancpy, left); 6259 at = ret; 6260 while (left > 0) { 6261 /* Align data to the end */ 6262 error = uiomove(mtod(at, caddr_t), willcpy, uio); 6263 if (error) { 6264 err_out_now: 6265 sctp_m_freem(at); 6266 return (NULL); 6267 } 6268 SCTP_BUF_LEN(at) = willcpy; 6269 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 6270 left -= willcpy; 6271 if (left > 0) { 6272 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA); 6273 if (SCTP_BUF_NEXT(at) == NULL) { 6274 goto err_out_now; 6275 } 6276 at = SCTP_BUF_NEXT(at); 6277 SCTP_BUF_LEN(at) = 0; 6278 cancpy = M_TRAILINGSPACE(at); 6279 willcpy = min(cancpy, left); 6280 } 6281 } 6282 return (ret); 6283 } 6284 6285 static int 6286 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 6287 struct sctp_sndrcvinfo *srcv) 6288 { 6289 int ret; 6290 struct sctp_copy_all *ca; 6291 6292 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 6293 SCTP_M_COPYAL); 6294 if (ca == NULL) { 6295 sctp_m_freem(m); 6296 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 6297 return (ENOMEM); 6298 } 6299 memset(ca, 0, sizeof(struct sctp_copy_all)); 6300 6301 ca->inp = inp; 6302 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo)); 6303 /* 6304 * take off the sendall flag, it would be bad if we failed to do 6305 * this :-0 6306 */ 6307 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 6308 /* get length and mbuf chain */ 6309 if (uio) { 6310 ca->sndlen = uio->uio_resid; 6311 ca->m = sctp_copy_out_all(uio, ca->sndlen); 6312 if (ca->m == NULL) { 6313 SCTP_FREE(ca, SCTP_M_COPYAL); 6314 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 6315 return (ENOMEM); 6316 } 6317 } else { 6318 /* Gather the length of the send */ 6319 struct mbuf *mat; 6320 6321 mat = m; 6322 ca->sndlen = 0; 6323 while (m) { 6324 ca->sndlen += SCTP_BUF_LEN(m); 6325 m = SCTP_BUF_NEXT(m); 6326 } 6327 ca->m = mat; 6328 } 6329 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, 6330 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, 6331 SCTP_ASOC_ANY_STATE, 6332 (void *)ca, 0, 6333 sctp_sendall_completes, inp, 1); 6334 if (ret) { 6335 SCTP_PRINTF("Failed to initiate iterator for sendall\n"); 6336 SCTP_FREE(ca, SCTP_M_COPYAL); 6337 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 6338 return (EFAULT); 6339 } 6340 return (0); 6341 } 6342 6343 6344 void 6345 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 6346 { 6347 struct sctp_tmit_chunk *chk, *nchk; 6348 6349 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 6350 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 6351 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 6352 if (chk->data) { 6353 sctp_m_freem(chk->data); 6354 chk->data = NULL; 6355 } 6356 asoc->ctrl_queue_cnt--; 6357 sctp_free_a_chunk(stcb, chk); 6358 } 6359 } 6360 } 6361 6362 void 6363 sctp_toss_old_asconf(struct sctp_tcb *stcb) 6364 { 6365 struct sctp_association *asoc; 6366 struct sctp_tmit_chunk *chk, *nchk; 6367 struct sctp_asconf_chunk *acp; 6368 6369 asoc = &stcb->asoc; 6370 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 6371 /* find SCTP_ASCONF chunk in queue */ 6372 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 6373 if (chk->data) { 6374 acp = mtod(chk->data, struct sctp_asconf_chunk *); 6375 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) { 6376 /* Not Acked yet */ 6377 break; 6378 } 6379 } 6380 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); 6381 if (chk->data) { 6382 sctp_m_freem(chk->data); 6383 chk->data = NULL; 6384 } 6385 asoc->ctrl_queue_cnt--; 6386 sctp_free_a_chunk(stcb, chk); 6387 } 6388 } 6389 } 6390 6391 6392 static void 6393 sctp_clean_up_datalist(struct sctp_tcb *stcb, 6394 struct sctp_association *asoc, 6395 struct sctp_tmit_chunk **data_list, 6396 int bundle_at, 6397 struct sctp_nets *net) 6398 { 6399 int i; 6400 struct sctp_tmit_chunk *tp1; 6401 6402 for (i = 0; i < bundle_at; i++) { 6403 /* off of the send queue */ 6404 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next); 6405 asoc->send_queue_cnt--; 6406 if (i > 0) { 6407 /* 6408 * Any chunk NOT 0 you zap the time chunk 0 gets 6409 * zapped or set based on if a RTO measurment is 6410 * needed. 6411 */ 6412 data_list[i]->do_rtt = 0; 6413 } 6414 /* record time */ 6415 data_list[i]->sent_rcv_time = net->last_sent_time; 6416 data_list[i]->rec.data.cwnd_at_send = net->cwnd; 6417 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; 6418 if (data_list[i]->whoTo == NULL) { 6419 data_list[i]->whoTo = net; 6420 atomic_add_int(&net->ref_count, 1); 6421 } 6422 /* on to the sent queue */ 6423 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 6424 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) { 6425 struct sctp_tmit_chunk *tpp; 6426 6427 /* need to move back */ 6428 back_up_more: 6429 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 6430 if (tpp == NULL) { 6431 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 6432 goto all_done; 6433 } 6434 tp1 = tpp; 6435 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) { 6436 goto back_up_more; 6437 } 6438 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 6439 } else { 6440 TAILQ_INSERT_TAIL(&asoc->sent_queue, 6441 data_list[i], 6442 sctp_next); 6443 } 6444 all_done: 6445 /* This does not lower until the cum-ack passes it */ 6446 asoc->sent_queue_cnt++; 6447 if ((asoc->peers_rwnd <= 0) && 6448 (asoc->total_flight == 0) && 6449 (bundle_at == 1)) { 6450 /* Mark the chunk as being a window probe */ 6451 SCTP_STAT_INCR(sctps_windowprobed); 6452 } 6453 #ifdef SCTP_AUDITING_ENABLED 6454 sctp_audit_log(0xC2, 3); 6455 #endif 6456 data_list[i]->sent = SCTP_DATAGRAM_SENT; 6457 data_list[i]->snd_count = 1; 6458 data_list[i]->rec.data.chunk_was_revoked = 0; 6459 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 6460 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 6461 data_list[i]->whoTo->flight_size, 6462 data_list[i]->book_size, 6463 (uintptr_t) data_list[i]->whoTo, 6464 data_list[i]->rec.data.TSN_seq); 6465 } 6466 sctp_flight_size_increase(data_list[i]); 6467 sctp_total_flight_increase(stcb, data_list[i]); 6468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 6469 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 6470 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 6471 } 6472 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 6473 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); 6474 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 6475 /* SWS sender side engages */ 6476 asoc->peers_rwnd = 0; 6477 } 6478 } 6479 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) { 6480 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net); 6481 } 6482 } 6483 6484 static void 6485 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc) 6486 { 6487 struct sctp_tmit_chunk *chk, *nchk; 6488 6489 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 6490 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 6491 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ 6492 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 6493 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 6494 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) || 6495 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 6496 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 6497 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 6498 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 6499 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 6500 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 6501 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 6502 /* Stray chunks must be cleaned up */ 6503 clean_up_anyway: 6504 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 6505 if (chk->data) { 6506 sctp_m_freem(chk->data); 6507 chk->data = NULL; 6508 } 6509 asoc->ctrl_queue_cnt--; 6510 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) 6511 asoc->fwd_tsn_cnt--; 6512 sctp_free_a_chunk(stcb, chk); 6513 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 6514 /* special handling, we must look into the param */ 6515 if (chk != asoc->str_reset) { 6516 goto clean_up_anyway; 6517 } 6518 } 6519 } 6520 } 6521 6522 6523 static int 6524 sctp_can_we_split_this(struct sctp_tcb *stcb, 6525 uint32_t length, 6526 uint32_t goal_mtu, uint32_t frag_point, int eeor_on) 6527 { 6528 /* 6529 * Make a decision on if I should split a msg into multiple parts. 6530 * This is only asked of incomplete messages. 6531 */ 6532 if (eeor_on) { 6533 /* 6534 * If we are doing EEOR we need to always send it if its the 6535 * entire thing, since it might be all the guy is putting in 6536 * the hopper. 6537 */ 6538 if (goal_mtu >= length) { 6539 /*- 6540 * If we have data outstanding, 6541 * we get another chance when the sack 6542 * arrives to transmit - wait for more data 6543 */ 6544 if (stcb->asoc.total_flight == 0) { 6545 /* 6546 * If nothing is in flight, we zero the 6547 * packet counter. 6548 */ 6549 return (length); 6550 } 6551 return (0); 6552 6553 } else { 6554 /* You can fill the rest */ 6555 return (goal_mtu); 6556 } 6557 } 6558 /*- 6559 * For those strange folk that make the send buffer 6560 * smaller than our fragmentation point, we can't 6561 * get a full msg in so we have to allow splitting. 6562 */ 6563 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) { 6564 return (length); 6565 } 6566 if ((length <= goal_mtu) || 6567 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) { 6568 /* Sub-optimial residual don't split in non-eeor mode. */ 6569 return (0); 6570 } 6571 /* 6572 * If we reach here length is larger than the goal_mtu. Do we wish 6573 * to split it for the sake of packet putting together? 6574 */ 6575 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) { 6576 /* Its ok to split it */ 6577 return (min(goal_mtu, frag_point)); 6578 } 6579 /* Nope, can't split */ 6580 return (0); 6581 6582 } 6583 6584 static uint32_t 6585 sctp_move_to_outqueue(struct sctp_tcb *stcb, 6586 struct sctp_stream_out *strq, 6587 uint32_t goal_mtu, 6588 uint32_t frag_point, 6589 int *locked, 6590 int *giveup, 6591 int eeor_mode, 6592 int *bail) 6593 { 6594 /* Move from the stream to the send_queue keeping track of the total */ 6595 struct sctp_association *asoc; 6596 struct sctp_stream_queue_pending *sp; 6597 struct sctp_tmit_chunk *chk; 6598 struct sctp_data_chunk *dchkh; 6599 uint32_t to_move, length; 6600 uint8_t rcv_flags = 0; 6601 uint8_t some_taken; 6602 uint8_t send_lock_up = 0; 6603 6604 SCTP_TCB_LOCK_ASSERT(stcb); 6605 asoc = &stcb->asoc; 6606 one_more_time: 6607 /* sa_ignore FREED_MEMORY */ 6608 sp = TAILQ_FIRST(&strq->outqueue); 6609 if (sp == NULL) { 6610 *locked = 0; 6611 if (send_lock_up == 0) { 6612 SCTP_TCB_SEND_LOCK(stcb); 6613 send_lock_up = 1; 6614 } 6615 sp = TAILQ_FIRST(&strq->outqueue); 6616 if (sp) { 6617 goto one_more_time; 6618 } 6619 if (strq->last_msg_incomplete) { 6620 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 6621 strq->stream_no, 6622 strq->last_msg_incomplete); 6623 strq->last_msg_incomplete = 0; 6624 } 6625 to_move = 0; 6626 if (send_lock_up) { 6627 SCTP_TCB_SEND_UNLOCK(stcb); 6628 send_lock_up = 0; 6629 } 6630 goto out_of; 6631 } 6632 if ((sp->msg_is_complete) && (sp->length == 0)) { 6633 if (sp->sender_all_done) { 6634 /* 6635 * We are doing differed cleanup. Last time through 6636 * when we took all the data the sender_all_done was 6637 * not set. 6638 */ 6639 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) { 6640 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 6641 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 6642 sp->sender_all_done, 6643 sp->length, 6644 sp->msg_is_complete, 6645 sp->put_last_out, 6646 send_lock_up); 6647 } 6648 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) { 6649 SCTP_TCB_SEND_LOCK(stcb); 6650 send_lock_up = 1; 6651 } 6652 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 6653 TAILQ_REMOVE(&strq->outqueue, sp, next); 6654 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up); 6655 if (sp->net) { 6656 sctp_free_remote_addr(sp->net); 6657 sp->net = NULL; 6658 } 6659 if (sp->data) { 6660 sctp_m_freem(sp->data); 6661 sp->data = NULL; 6662 } 6663 sctp_free_a_strmoq(stcb, sp); 6664 /* we can't be locked to it */ 6665 *locked = 0; 6666 stcb->asoc.locked_on_sending = NULL; 6667 if (send_lock_up) { 6668 SCTP_TCB_SEND_UNLOCK(stcb); 6669 send_lock_up = 0; 6670 } 6671 /* back to get the next msg */ 6672 goto one_more_time; 6673 } else { 6674 /* 6675 * sender just finished this but still holds a 6676 * reference 6677 */ 6678 *locked = 1; 6679 *giveup = 1; 6680 to_move = 0; 6681 goto out_of; 6682 } 6683 } else { 6684 /* is there some to get */ 6685 if (sp->length == 0) { 6686 /* no */ 6687 *locked = 1; 6688 *giveup = 1; 6689 to_move = 0; 6690 goto out_of; 6691 } else if (sp->discard_rest) { 6692 if (send_lock_up == 0) { 6693 SCTP_TCB_SEND_LOCK(stcb); 6694 send_lock_up = 1; 6695 } 6696 /* Whack down the size */ 6697 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length); 6698 if ((stcb->sctp_socket != NULL) && \ 6699 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 6700 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 6701 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length); 6702 } 6703 if (sp->data) { 6704 sctp_m_freem(sp->data); 6705 sp->data = NULL; 6706 sp->tail_mbuf = NULL; 6707 } 6708 sp->length = 0; 6709 sp->some_taken = 1; 6710 *locked = 1; 6711 *giveup = 1; 6712 to_move = 0; 6713 goto out_of; 6714 } 6715 } 6716 some_taken = sp->some_taken; 6717 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 6718 sp->msg_is_complete = 1; 6719 } 6720 re_look: 6721 length = sp->length; 6722 if (sp->msg_is_complete) { 6723 /* The message is complete */ 6724 to_move = min(length, frag_point); 6725 if (to_move == length) { 6726 /* All of it fits in the MTU */ 6727 if (sp->some_taken) { 6728 rcv_flags |= SCTP_DATA_LAST_FRAG; 6729 sp->put_last_out = 1; 6730 } else { 6731 rcv_flags |= SCTP_DATA_NOT_FRAG; 6732 sp->put_last_out = 1; 6733 } 6734 } else { 6735 /* Not all of it fits, we fragment */ 6736 if (sp->some_taken == 0) { 6737 rcv_flags |= SCTP_DATA_FIRST_FRAG; 6738 } 6739 sp->some_taken = 1; 6740 } 6741 } else { 6742 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode); 6743 if (to_move) { 6744 /*- 6745 * We use a snapshot of length in case it 6746 * is expanding during the compare. 6747 */ 6748 uint32_t llen; 6749 6750 llen = length; 6751 if (to_move >= llen) { 6752 to_move = llen; 6753 if (send_lock_up == 0) { 6754 /*- 6755 * We are taking all of an incomplete msg 6756 * thus we need a send lock. 6757 */ 6758 SCTP_TCB_SEND_LOCK(stcb); 6759 send_lock_up = 1; 6760 if (sp->msg_is_complete) { 6761 /* 6762 * the sender finished the 6763 * msg 6764 */ 6765 goto re_look; 6766 } 6767 } 6768 } 6769 if (sp->some_taken == 0) { 6770 rcv_flags |= SCTP_DATA_FIRST_FRAG; 6771 sp->some_taken = 1; 6772 } 6773 } else { 6774 /* Nothing to take. */ 6775 if (sp->some_taken) { 6776 *locked = 1; 6777 } 6778 *giveup = 1; 6779 to_move = 0; 6780 goto out_of; 6781 } 6782 } 6783 6784 /* If we reach here, we can copy out a chunk */ 6785 sctp_alloc_a_chunk(stcb, chk); 6786 if (chk == NULL) { 6787 /* No chunk memory */ 6788 *giveup = 1; 6789 to_move = 0; 6790 goto out_of; 6791 } 6792 /* 6793 * Setup for unordered if needed by looking at the user sent info 6794 * flags. 6795 */ 6796 if (sp->sinfo_flags & SCTP_UNORDERED) { 6797 rcv_flags |= SCTP_DATA_UNORDERED; 6798 } 6799 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) || 6800 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) { 6801 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY; 6802 } 6803 /* clear out the chunk before setting up */ 6804 memset(chk, 0, sizeof(*chk)); 6805 chk->rec.data.rcv_flags = rcv_flags; 6806 6807 if (to_move >= length) { 6808 /* we think we can steal the whole thing */ 6809 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) { 6810 SCTP_TCB_SEND_LOCK(stcb); 6811 send_lock_up = 1; 6812 } 6813 if (to_move < sp->length) { 6814 /* bail, it changed */ 6815 goto dont_do_it; 6816 } 6817 chk->data = sp->data; 6818 chk->last_mbuf = sp->tail_mbuf; 6819 /* register the stealing */ 6820 sp->data = sp->tail_mbuf = NULL; 6821 } else { 6822 struct mbuf *m; 6823 6824 dont_do_it: 6825 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT); 6826 chk->last_mbuf = NULL; 6827 if (chk->data == NULL) { 6828 sp->some_taken = some_taken; 6829 sctp_free_a_chunk(stcb, chk); 6830 *bail = 1; 6831 to_move = 0; 6832 goto out_of; 6833 } 6834 #ifdef SCTP_MBUF_LOGGING 6835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6836 struct mbuf *mat; 6837 6838 mat = chk->data; 6839 while (mat) { 6840 if (SCTP_BUF_IS_EXTENDED(mat)) { 6841 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 6842 } 6843 mat = SCTP_BUF_NEXT(mat); 6844 } 6845 } 6846 #endif 6847 /* Pull off the data */ 6848 m_adj(sp->data, to_move); 6849 /* Now lets work our way down and compact it */ 6850 m = sp->data; 6851 while (m && (SCTP_BUF_LEN(m) == 0)) { 6852 sp->data = SCTP_BUF_NEXT(m); 6853 SCTP_BUF_NEXT(m) = NULL; 6854 if (sp->tail_mbuf == m) { 6855 /*- 6856 * Freeing tail? TSNH since 6857 * we supposedly were taking less 6858 * than the sp->length. 6859 */ 6860 #ifdef INVARIANTS 6861 panic("Huh, freing tail? - TSNH"); 6862 #else 6863 SCTP_PRINTF("Huh, freeing tail? - TSNH\n"); 6864 sp->tail_mbuf = sp->data = NULL; 6865 sp->length = 0; 6866 #endif 6867 6868 } 6869 sctp_m_free(m); 6870 m = sp->data; 6871 } 6872 } 6873 if (SCTP_BUF_IS_EXTENDED(chk->data)) { 6874 chk->copy_by_ref = 1; 6875 } else { 6876 chk->copy_by_ref = 0; 6877 } 6878 /* 6879 * get last_mbuf and counts of mb useage This is ugly but hopefully 6880 * its only one mbuf. 6881 */ 6882 if (chk->last_mbuf == NULL) { 6883 chk->last_mbuf = chk->data; 6884 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 6885 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 6886 } 6887 } 6888 if (to_move > length) { 6889 /*- This should not happen either 6890 * since we always lower to_move to the size 6891 * of sp->length if its larger. 6892 */ 6893 #ifdef INVARIANTS 6894 panic("Huh, how can to_move be larger?"); 6895 #else 6896 SCTP_PRINTF("Huh, how can to_move be larger?\n"); 6897 sp->length = 0; 6898 #endif 6899 } else { 6900 atomic_subtract_int(&sp->length, to_move); 6901 } 6902 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) { 6903 /* Not enough room for a chunk header, get some */ 6904 struct mbuf *m; 6905 6906 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA); 6907 if (m == NULL) { 6908 /* 6909 * we're in trouble here. _PREPEND below will free 6910 * all the data if there is no leading space, so we 6911 * must put the data back and restore. 6912 */ 6913 if (send_lock_up == 0) { 6914 SCTP_TCB_SEND_LOCK(stcb); 6915 send_lock_up = 1; 6916 } 6917 if (chk->data == NULL) { 6918 /* unsteal the data */ 6919 sp->data = chk->data; 6920 sp->tail_mbuf = chk->last_mbuf; 6921 } else { 6922 struct mbuf *m_tmp; 6923 6924 /* reassemble the data */ 6925 m_tmp = sp->data; 6926 sp->data = chk->data; 6927 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp; 6928 } 6929 sp->some_taken = some_taken; 6930 atomic_add_int(&sp->length, to_move); 6931 chk->data = NULL; 6932 *bail = 1; 6933 sctp_free_a_chunk(stcb, chk); 6934 to_move = 0; 6935 goto out_of; 6936 } else { 6937 SCTP_BUF_LEN(m) = 0; 6938 SCTP_BUF_NEXT(m) = chk->data; 6939 chk->data = m; 6940 M_ALIGN(chk->data, 4); 6941 } 6942 } 6943 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT); 6944 if (chk->data == NULL) { 6945 /* HELP, TSNH since we assured it would not above? */ 6946 #ifdef INVARIANTS 6947 panic("prepend failes HELP?"); 6948 #else 6949 SCTP_PRINTF("prepend fails HELP?\n"); 6950 sctp_free_a_chunk(stcb, chk); 6951 #endif 6952 *bail = 1; 6953 to_move = 0; 6954 goto out_of; 6955 } 6956 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); 6957 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk)); 6958 chk->book_size_scale = 0; 6959 chk->sent = SCTP_DATAGRAM_UNSENT; 6960 6961 chk->flags = 0; 6962 chk->asoc = &stcb->asoc; 6963 chk->pad_inplace = 0; 6964 chk->no_fr_allowed = 0; 6965 chk->rec.data.stream_seq = sp->strseq; 6966 chk->rec.data.stream_number = sp->stream; 6967 chk->rec.data.payloadtype = sp->ppid; 6968 chk->rec.data.context = sp->context; 6969 chk->rec.data.doing_fast_retransmit = 0; 6970 6971 chk->rec.data.timetodrop = sp->ts; 6972 chk->flags = sp->act_flags; 6973 6974 if (sp->net) { 6975 chk->whoTo = sp->net; 6976 atomic_add_int(&chk->whoTo->ref_count, 1); 6977 } else 6978 chk->whoTo = NULL; 6979 6980 if (sp->holds_key_ref) { 6981 chk->auth_keyid = sp->auth_keyid; 6982 sctp_auth_key_acquire(stcb, chk->auth_keyid); 6983 chk->holds_key_ref = 1; 6984 } 6985 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); 6986 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) { 6987 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 6988 (uintptr_t) stcb, sp->length, 6989 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), 6990 chk->rec.data.TSN_seq); 6991 } 6992 dchkh = mtod(chk->data, struct sctp_data_chunk *); 6993 /* 6994 * Put the rest of the things in place now. Size was done earlier in 6995 * previous loop prior to padding. 6996 */ 6997 6998 #ifdef SCTP_ASOCLOG_OF_TSNS 6999 SCTP_TCB_LOCK_ASSERT(stcb); 7000 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { 7001 asoc->tsn_out_at = 0; 7002 asoc->tsn_out_wrapped = 1; 7003 } 7004 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq; 7005 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number; 7006 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq; 7007 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size; 7008 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags; 7009 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb; 7010 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at; 7011 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2; 7012 asoc->tsn_out_at++; 7013 #endif 7014 7015 dchkh->ch.chunk_type = SCTP_DATA; 7016 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 7017 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); 7018 dchkh->dp.stream_id = htons(strq->stream_no); 7019 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); 7020 dchkh->dp.protocol_id = chk->rec.data.payloadtype; 7021 dchkh->ch.chunk_length = htons(chk->send_size); 7022 /* Now advance the chk->send_size by the actual pad needed. */ 7023 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 7024 /* need a pad */ 7025 struct mbuf *lm; 7026 int pads; 7027 7028 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 7029 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) { 7030 chk->pad_inplace = 1; 7031 } 7032 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) { 7033 /* pad added an mbuf */ 7034 chk->last_mbuf = lm; 7035 } 7036 chk->send_size += pads; 7037 } 7038 /* We only re-set the policy if it is on */ 7039 if (sp->pr_sctp_on) { 7040 sctp_set_prsctp_policy(sp); 7041 asoc->pr_sctp_cnt++; 7042 chk->pr_sctp_on = 1; 7043 } else { 7044 chk->pr_sctp_on = 0; 7045 } 7046 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) { 7047 /* All done pull and kill the message */ 7048 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 7049 if (sp->put_last_out == 0) { 7050 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n"); 7051 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 7052 sp->sender_all_done, 7053 sp->length, 7054 sp->msg_is_complete, 7055 sp->put_last_out, 7056 send_lock_up); 7057 } 7058 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) { 7059 SCTP_TCB_SEND_LOCK(stcb); 7060 send_lock_up = 1; 7061 } 7062 TAILQ_REMOVE(&strq->outqueue, sp, next); 7063 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up); 7064 if (sp->net) { 7065 sctp_free_remote_addr(sp->net); 7066 sp->net = NULL; 7067 } 7068 if (sp->data) { 7069 sctp_m_freem(sp->data); 7070 sp->data = NULL; 7071 } 7072 sctp_free_a_strmoq(stcb, sp); 7073 7074 /* we can't be locked to it */ 7075 *locked = 0; 7076 stcb->asoc.locked_on_sending = NULL; 7077 } else { 7078 /* more to go, we are locked */ 7079 *locked = 1; 7080 } 7081 asoc->chunks_on_out_queue++; 7082 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 7083 asoc->send_queue_cnt++; 7084 out_of: 7085 if (send_lock_up) { 7086 SCTP_TCB_SEND_UNLOCK(stcb); 7087 send_lock_up = 0; 7088 } 7089 return (to_move); 7090 } 7091 7092 7093 static void 7094 sctp_fill_outqueue(struct sctp_tcb *stcb, 7095 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now) 7096 { 7097 struct sctp_association *asoc; 7098 struct sctp_stream_out *strq, *strqn; 7099 int goal_mtu, moved_how_much, total_moved = 0, bail = 0; 7100 int locked, giveup; 7101 7102 SCTP_TCB_LOCK_ASSERT(stcb); 7103 asoc = &stcb->asoc; 7104 #ifdef INET6 7105 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) { 7106 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 7107 } else { 7108 /* ?? not sure what else to do */ 7109 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 7110 } 7111 #else 7112 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 7113 #endif 7114 /* Need an allowance for the data chunk header too */ 7115 goal_mtu -= sizeof(struct sctp_data_chunk); 7116 7117 /* must make even word boundary */ 7118 goal_mtu &= 0xfffffffc; 7119 if (asoc->locked_on_sending) { 7120 /* We are stuck on one stream until the message completes. */ 7121 strq = asoc->locked_on_sending; 7122 locked = 1; 7123 } else { 7124 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); 7125 locked = 0; 7126 } 7127 strqn = strq; 7128 while ((goal_mtu > 0) && strq) { 7129 giveup = 0; 7130 bail = 0; 7131 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked, 7132 &giveup, eeor_mode, &bail); 7133 if (moved_how_much) 7134 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much); 7135 7136 if (locked) { 7137 asoc->locked_on_sending = strq; 7138 if ((moved_how_much == 0) || (giveup) || bail) 7139 /* no more to move for now */ 7140 break; 7141 } else { 7142 asoc->locked_on_sending = NULL; 7143 if ((giveup) || bail) { 7144 break; 7145 } 7146 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); 7147 if (strq == NULL) { 7148 break; 7149 } 7150 } 7151 total_moved += moved_how_much; 7152 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk)); 7153 goal_mtu &= 0xfffffffc; 7154 } 7155 if (bail) 7156 *quit_now = 1; 7157 7158 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc); 7159 7160 if (total_moved == 0) { 7161 if ((stcb->asoc.sctp_cmt_on_off == 0) && 7162 (net == stcb->asoc.primary_destination)) { 7163 /* ran dry for primary network net */ 7164 SCTP_STAT_INCR(sctps_primary_randry); 7165 } else if (stcb->asoc.sctp_cmt_on_off > 0) { 7166 /* ran dry with CMT on */ 7167 SCTP_STAT_INCR(sctps_cmt_randry); 7168 } 7169 } 7170 } 7171 7172 void 7173 sctp_fix_ecn_echo(struct sctp_association *asoc) 7174 { 7175 struct sctp_tmit_chunk *chk; 7176 7177 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7178 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 7179 chk->sent = SCTP_DATAGRAM_UNSENT; 7180 } 7181 } 7182 } 7183 7184 void 7185 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net) 7186 { 7187 struct sctp_association *asoc; 7188 struct sctp_tmit_chunk *chk; 7189 struct sctp_stream_queue_pending *sp; 7190 unsigned int i; 7191 7192 if (net == NULL) { 7193 return; 7194 } 7195 asoc = &stcb->asoc; 7196 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 7197 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 7198 if (sp->net == net) { 7199 sctp_free_remote_addr(sp->net); 7200 sp->net = NULL; 7201 } 7202 } 7203 } 7204 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 7205 if (chk->whoTo == net) { 7206 sctp_free_remote_addr(chk->whoTo); 7207 chk->whoTo = NULL; 7208 } 7209 } 7210 } 7211 7212 int 7213 sctp_med_chunk_output(struct sctp_inpcb *inp, 7214 struct sctp_tcb *stcb, 7215 struct sctp_association *asoc, 7216 int *num_out, 7217 int *reason_code, 7218 int control_only, int from_where, 7219 struct timeval *now, int *now_filled, int frag_point, int so_locked 7220 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 7221 SCTP_UNUSED 7222 #endif 7223 ) 7224 { 7225 /* 7226 * Ok this is the generic chunk service queue. we must do the 7227 * following: - Service the stream queue that is next, moving any 7228 * message (note I must get a complete message i.e. FIRST/MIDDLE and 7229 * LAST to the out queue in one pass) and assigning TSN's - Check to 7230 * see if the cwnd/rwnd allows any output, if so we go ahead and 7231 * fomulate and send the low level chunks. Making sure to combine 7232 * any control in the control chunk queue also. 7233 */ 7234 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL; 7235 struct mbuf *outchain, *endoutchain; 7236 struct sctp_tmit_chunk *chk, *nchk; 7237 7238 /* temp arrays for unlinking */ 7239 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 7240 int no_fragmentflg, error; 7241 unsigned int max_rwnd_per_dest, max_send_per_dest; 7242 int one_chunk, hbflag, skip_data_for_this_net; 7243 int asconf, cookie, no_out_cnt; 7244 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode; 7245 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 7246 int tsns_sent = 0; 7247 uint32_t auth_offset = 0; 7248 struct sctp_auth_chunk *auth = NULL; 7249 uint16_t auth_keyid; 7250 int override_ok = 1; 7251 int data_auth_reqd = 0; 7252 7253 /* 7254 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the 7255 * destination. 7256 */ 7257 int pf_hbflag = 0; 7258 int quit_now = 0; 7259 7260 *num_out = 0; 7261 auth_keyid = stcb->asoc.authinfo.active_keyid; 7262 7263 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 7264 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || 7265 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 7266 eeor_mode = 1; 7267 } else { 7268 eeor_mode = 0; 7269 } 7270 ctl_cnt = no_out_cnt = asconf = cookie = 0; 7271 /* 7272 * First lets prime the pump. For each destination, if there is room 7273 * in the flight size, attempt to pull an MTU's worth out of the 7274 * stream queues into the general send_queue 7275 */ 7276 #ifdef SCTP_AUDITING_ENABLED 7277 sctp_audit_log(0xC2, 2); 7278 #endif 7279 SCTP_TCB_LOCK_ASSERT(stcb); 7280 hbflag = 0; 7281 if ((control_only) || (asoc->stream_reset_outstanding)) 7282 no_data_chunks = 1; 7283 else 7284 no_data_chunks = 0; 7285 7286 /* Nothing to possible to send? */ 7287 if ((TAILQ_EMPTY(&asoc->control_send_queue) || 7288 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) && 7289 TAILQ_EMPTY(&asoc->asconf_send_queue) && 7290 TAILQ_EMPTY(&asoc->send_queue) && 7291 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 7292 nothing_to_send: 7293 *reason_code = 9; 7294 return (0); 7295 } 7296 if (asoc->peers_rwnd == 0) { 7297 /* No room in peers rwnd */ 7298 *reason_code = 1; 7299 if (asoc->total_flight > 0) { 7300 /* we are allowed one chunk in flight */ 7301 no_data_chunks = 1; 7302 } 7303 } 7304 if (stcb->asoc.ecn_echo_cnt_onq) { 7305 /* Record where a sack goes, if any */ 7306 if (no_data_chunks && 7307 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) { 7308 /* Nothing but ECNe to send - we don't do that */ 7309 goto nothing_to_send; 7310 } 7311 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 7312 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 7313 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { 7314 sack_goes_to = chk->whoTo; 7315 break; 7316 } 7317 } 7318 } 7319 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets); 7320 if (stcb->sctp_socket) 7321 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets; 7322 else 7323 max_send_per_dest = 0; 7324 if ((no_data_chunks == 0) && 7325 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) { 7326 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 7327 /* 7328 * This for loop we are in takes in each net, if 7329 * its's got space in cwnd and has data sent to it 7330 * (when CMT is off) then it calls 7331 * sctp_fill_outqueue for the net. This gets data on 7332 * the send queue for that network. 7333 * 7334 * In sctp_fill_outqueue TSN's are assigned and data is 7335 * copied out of the stream buffers. Note mostly 7336 * copy by reference (we hope). 7337 */ 7338 net->window_probe = 0; 7339 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) || 7340 (net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 7341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 7342 sctp_log_cwnd(stcb, net, 1, 7343 SCTP_CWND_LOG_FILL_OUTQ_CALLED); 7344 } 7345 continue; 7346 } 7347 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && 7348 (net->flight_size == 0)) { 7349 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net); 7350 } 7351 if ((asoc->sctp_cmt_on_off == 0) && 7352 (asoc->primary_destination != net) && 7353 (net->ref_count < 2)) { 7354 /* nothing can be in queue for this guy */ 7355 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 7356 sctp_log_cwnd(stcb, net, 2, 7357 SCTP_CWND_LOG_FILL_OUTQ_CALLED); 7358 } 7359 continue; 7360 } 7361 if (net->flight_size >= net->cwnd) { 7362 /* skip this network, no room - can't fill */ 7363 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 7364 sctp_log_cwnd(stcb, net, 3, 7365 SCTP_CWND_LOG_FILL_OUTQ_CALLED); 7366 } 7367 continue; 7368 } 7369 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 7370 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 7371 } 7372 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now); 7373 if (quit_now) { 7374 /* memory alloc failure */ 7375 no_data_chunks = 1; 7376 break; 7377 } 7378 } 7379 } 7380 /* now service each destination and send out what we can for it */ 7381 /* Nothing to send? */ 7382 if (TAILQ_EMPTY(&asoc->control_send_queue) && 7383 TAILQ_EMPTY(&asoc->asconf_send_queue) && 7384 TAILQ_EMPTY(&asoc->send_queue)) { 7385 *reason_code = 8; 7386 return (0); 7387 } 7388 if (asoc->sctp_cmt_on_off > 0) { 7389 /* get the last start point */ 7390 start_at = asoc->last_net_cmt_send_started; 7391 if (start_at == NULL) { 7392 /* null so to beginning */ 7393 start_at = TAILQ_FIRST(&asoc->nets); 7394 } else { 7395 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next); 7396 if (start_at == NULL) { 7397 start_at = TAILQ_FIRST(&asoc->nets); 7398 } 7399 } 7400 asoc->last_net_cmt_send_started = start_at; 7401 } else { 7402 start_at = TAILQ_FIRST(&asoc->nets); 7403 } 7404 old_start_at = NULL; 7405 again_one_more_time: 7406 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 7407 /* how much can we send? */ 7408 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ 7409 if (old_start_at && (old_start_at == net)) { 7410 /* through list ocmpletely. */ 7411 break; 7412 } 7413 tsns_sent = 0xa; 7414 if ((asoc->sctp_cmt_on_off == 0) && 7415 (asoc->primary_destination != net) && 7416 (net->ref_count < 2)) { 7417 /* 7418 * Ref-count of 1 so we cannot have data or control 7419 * queued to this address. Skip it (non-CMT). 7420 */ 7421 continue; 7422 } 7423 if (TAILQ_EMPTY(&asoc->control_send_queue) && 7424 TAILQ_EMPTY(&asoc->asconf_send_queue) && 7425 (net->flight_size >= net->cwnd)) { 7426 /* 7427 * Nothing on control or asconf and flight is full, 7428 * we can skip even in the CMT case. 7429 */ 7430 continue; 7431 } 7432 ctl_cnt = bundle_at = 0; 7433 endoutchain = outchain = NULL; 7434 no_fragmentflg = 1; 7435 one_chunk = 0; 7436 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 7437 skip_data_for_this_net = 1; 7438 } else { 7439 skip_data_for_this_net = 0; 7440 } 7441 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { 7442 /* 7443 * if we have a route and an ifp check to see if we 7444 * have room to send to this guy 7445 */ 7446 struct ifnet *ifp; 7447 7448 ifp = net->ro.ro_rt->rt_ifp; 7449 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { 7450 SCTP_STAT_INCR(sctps_ifnomemqueued); 7451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 7452 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); 7453 } 7454 continue; 7455 } 7456 } 7457 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 7458 case AF_INET: 7459 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 7460 break; 7461 #ifdef INET6 7462 case AF_INET6: 7463 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 7464 break; 7465 #endif 7466 default: 7467 /* TSNH */ 7468 mtu = net->mtu; 7469 break; 7470 } 7471 mx_mtu = mtu; 7472 to_out = 0; 7473 if (mtu > asoc->peers_rwnd) { 7474 if (asoc->total_flight > 0) { 7475 /* We have a packet in flight somewhere */ 7476 r_mtu = asoc->peers_rwnd; 7477 } else { 7478 /* We are always allowed to send one MTU out */ 7479 one_chunk = 1; 7480 r_mtu = mtu; 7481 } 7482 } else { 7483 r_mtu = mtu; 7484 } 7485 /************************/ 7486 /* ASCONF transmission */ 7487 /************************/ 7488 /* Now first lets go through the asconf queue */ 7489 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 7490 if (chk->rec.chunk_id.id != SCTP_ASCONF) { 7491 continue; 7492 } 7493 if (chk->whoTo != net) { 7494 /* 7495 * No, not sent to the network we are 7496 * looking at 7497 */ 7498 break; 7499 } 7500 if (chk->data == NULL) { 7501 break; 7502 } 7503 if (chk->sent != SCTP_DATAGRAM_UNSENT && 7504 chk->sent != SCTP_DATAGRAM_RESEND) { 7505 break; 7506 } 7507 /* 7508 * if no AUTH is yet included and this chunk 7509 * requires it, make sure to account for it. We 7510 * don't apply the size until the AUTH chunk is 7511 * actually added below in case there is no room for 7512 * this chunk. NOTE: we overload the use of "omtu" 7513 * here 7514 */ 7515 if ((auth == NULL) && 7516 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7517 stcb->asoc.peer_auth_chunks)) { 7518 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 7519 } else 7520 omtu = 0; 7521 /* Here we do NOT factor the r_mtu */ 7522 if ((chk->send_size < (int)(mtu - omtu)) || 7523 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 7524 /* 7525 * We probably should glom the mbuf chain 7526 * from the chk->data for control but the 7527 * problem is it becomes yet one more level 7528 * of tracking to do if for some reason 7529 * output fails. Then I have got to 7530 * reconstruct the merged control chain.. el 7531 * yucko.. for now we take the easy way and 7532 * do the copy 7533 */ 7534 /* 7535 * Add an AUTH chunk, if chunk requires it 7536 * save the offset into the chain for AUTH 7537 */ 7538 if ((auth == NULL) && 7539 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7540 stcb->asoc.peer_auth_chunks))) { 7541 outchain = sctp_add_auth_chunk(outchain, 7542 &endoutchain, 7543 &auth, 7544 &auth_offset, 7545 stcb, 7546 chk->rec.chunk_id.id); 7547 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7548 } 7549 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 7550 (int)chk->rec.chunk_id.can_take_data, 7551 chk->send_size, chk->copy_by_ref); 7552 if (outchain == NULL) { 7553 *reason_code = 8; 7554 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7555 return (ENOMEM); 7556 } 7557 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7558 /* update our MTU size */ 7559 if (mtu > (chk->send_size + omtu)) 7560 mtu -= (chk->send_size + omtu); 7561 else 7562 mtu = 0; 7563 to_out += (chk->send_size + omtu); 7564 /* Do clear IP_DF ? */ 7565 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 7566 no_fragmentflg = 0; 7567 } 7568 if (chk->rec.chunk_id.can_take_data) 7569 chk->data = NULL; 7570 /* 7571 * set hb flag since we can use these for 7572 * RTO 7573 */ 7574 hbflag = 1; 7575 asconf = 1; 7576 /* 7577 * should sysctl this: don't bundle data 7578 * with ASCONF since it requires AUTH 7579 */ 7580 no_data_chunks = 1; 7581 chk->sent = SCTP_DATAGRAM_SENT; 7582 chk->snd_count++; 7583 if (mtu == 0) { 7584 /* 7585 * Ok we are out of room but we can 7586 * output without effecting the 7587 * flight size since this little guy 7588 * is a control only packet. 7589 */ 7590 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 7591 /* 7592 * do NOT clear the asconf flag as 7593 * it is used to do appropriate 7594 * source address selection. 7595 */ 7596 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 7597 (struct sockaddr *)&net->ro._l_addr, 7598 outchain, auth_offset, auth, 7599 stcb->asoc.authinfo.active_keyid, 7600 no_fragmentflg, 0, NULL, asconf, 7601 inp->sctp_lport, stcb->rport, 7602 htonl(stcb->asoc.peer_vtag), 7603 net->port, so_locked, NULL, NULL))) { 7604 if (error == ENOBUFS) { 7605 asoc->ifp_had_enobuf = 1; 7606 SCTP_STAT_INCR(sctps_lowlevelerr); 7607 } 7608 if (from_where == 0) { 7609 SCTP_STAT_INCR(sctps_lowlevelerrusr); 7610 } 7611 if (*now_filled == 0) { 7612 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7613 *now_filled = 1; 7614 *now = net->last_sent_time; 7615 } else { 7616 net->last_sent_time = *now; 7617 } 7618 hbflag = 0; 7619 /* error, could not output */ 7620 if (error == EHOSTUNREACH) { 7621 /* 7622 * Destination went 7623 * unreachable 7624 * during this send 7625 */ 7626 sctp_move_chunks_from_net(stcb, net); 7627 } 7628 *reason_code = 7; 7629 continue; 7630 } else 7631 asoc->ifp_had_enobuf = 0; 7632 if (*now_filled == 0) { 7633 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7634 *now_filled = 1; 7635 *now = net->last_sent_time; 7636 } else { 7637 net->last_sent_time = *now; 7638 } 7639 hbflag = 0; 7640 /* 7641 * increase the number we sent, if a 7642 * cookie is sent we don't tell them 7643 * any was sent out. 7644 */ 7645 outchain = endoutchain = NULL; 7646 auth = NULL; 7647 auth_offset = 0; 7648 if (!no_out_cnt) 7649 *num_out += ctl_cnt; 7650 /* recalc a clean slate and setup */ 7651 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7652 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 7653 } else { 7654 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD); 7655 } 7656 to_out = 0; 7657 no_fragmentflg = 1; 7658 } 7659 } 7660 } 7661 /************************/ 7662 /* Control transmission */ 7663 /************************/ 7664 /* Now first lets go through the control queue */ 7665 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 7666 if ((sack_goes_to) && 7667 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) && 7668 (chk->whoTo != sack_goes_to)) { 7669 /* 7670 * if we have a sack in queue, and we are 7671 * looking at an ecn echo that is NOT queued 7672 * to where the sack is going.. 7673 */ 7674 if (chk->whoTo == net) { 7675 /* 7676 * Don't transmit it to where its 7677 * going (current net) 7678 */ 7679 continue; 7680 } else if (sack_goes_to == net) { 7681 /* 7682 * But do transmit it to this 7683 * address 7684 */ 7685 goto skip_net_check; 7686 } 7687 } 7688 if (chk->whoTo != net) { 7689 /* 7690 * No, not sent to the network we are 7691 * looking at 7692 */ 7693 continue; 7694 } 7695 skip_net_check: 7696 if (chk->data == NULL) { 7697 continue; 7698 } 7699 if (chk->sent != SCTP_DATAGRAM_UNSENT) { 7700 /* 7701 * It must be unsent. Cookies and ASCONF's 7702 * hang around but there timers will force 7703 * when marked for resend. 7704 */ 7705 continue; 7706 } 7707 /* 7708 * if no AUTH is yet included and this chunk 7709 * requires it, make sure to account for it. We 7710 * don't apply the size until the AUTH chunk is 7711 * actually added below in case there is no room for 7712 * this chunk. NOTE: we overload the use of "omtu" 7713 * here 7714 */ 7715 if ((auth == NULL) && 7716 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7717 stcb->asoc.peer_auth_chunks)) { 7718 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 7719 } else 7720 omtu = 0; 7721 /* Here we do NOT factor the r_mtu */ 7722 if ((chk->send_size <= (int)(mtu - omtu)) || 7723 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 7724 /* 7725 * We probably should glom the mbuf chain 7726 * from the chk->data for control but the 7727 * problem is it becomes yet one more level 7728 * of tracking to do if for some reason 7729 * output fails. Then I have got to 7730 * reconstruct the merged control chain.. el 7731 * yucko.. for now we take the easy way and 7732 * do the copy 7733 */ 7734 /* 7735 * Add an AUTH chunk, if chunk requires it 7736 * save the offset into the chain for AUTH 7737 */ 7738 if ((auth == NULL) && 7739 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 7740 stcb->asoc.peer_auth_chunks))) { 7741 outchain = sctp_add_auth_chunk(outchain, 7742 &endoutchain, 7743 &auth, 7744 &auth_offset, 7745 stcb, 7746 chk->rec.chunk_id.id); 7747 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7748 } 7749 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 7750 (int)chk->rec.chunk_id.can_take_data, 7751 chk->send_size, chk->copy_by_ref); 7752 if (outchain == NULL) { 7753 *reason_code = 8; 7754 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7755 return (ENOMEM); 7756 } 7757 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 7758 /* update our MTU size */ 7759 if (mtu > (chk->send_size + omtu)) 7760 mtu -= (chk->send_size + omtu); 7761 else 7762 mtu = 0; 7763 to_out += (chk->send_size + omtu); 7764 /* Do clear IP_DF ? */ 7765 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 7766 no_fragmentflg = 0; 7767 } 7768 if (chk->rec.chunk_id.can_take_data) 7769 chk->data = NULL; 7770 /* Mark things to be removed, if needed */ 7771 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 7772 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ 7773 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 7774 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 7775 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 7776 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 7777 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 7778 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 7779 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 7780 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 7781 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 7782 7783 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) { 7784 hbflag = 1; 7785 /* 7786 * JRS 5/14/07 - Set the 7787 * flag to say a heartbeat 7788 * is being sent. 7789 */ 7790 pf_hbflag = 1; 7791 } 7792 /* remove these chunks at the end */ 7793 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 7794 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { 7795 /* turn off the timer */ 7796 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 7797 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 7798 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); 7799 } 7800 } 7801 ctl_cnt++; 7802 } else { 7803 /* 7804 * Other chunks, since they have 7805 * timers running (i.e. COOKIE) we 7806 * just "trust" that it gets sent or 7807 * retransmitted. 7808 */ 7809 ctl_cnt++; 7810 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 7811 cookie = 1; 7812 no_out_cnt = 1; 7813 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 7814 /* 7815 * Increment ecne send count 7816 * here this means we may be 7817 * over-zealous in our 7818 * counting if the send 7819 * fails, but its the best 7820 * place to do it (we used 7821 * to do it in the queue of 7822 * the chunk, but that did 7823 * not tell how many times 7824 * it was sent. 7825 */ 7826 SCTP_STAT_INCR(sctps_sendecne); 7827 } 7828 chk->sent = SCTP_DATAGRAM_SENT; 7829 chk->snd_count++; 7830 } 7831 if (mtu == 0) { 7832 /* 7833 * Ok we are out of room but we can 7834 * output without effecting the 7835 * flight size since this little guy 7836 * is a control only packet. 7837 */ 7838 if (asconf) { 7839 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 7840 /* 7841 * do NOT clear the asconf 7842 * flag as it is used to do 7843 * appropriate source 7844 * address selection. 7845 */ 7846 } 7847 if (cookie) { 7848 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 7849 cookie = 0; 7850 } 7851 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 7852 (struct sockaddr *)&net->ro._l_addr, 7853 outchain, 7854 auth_offset, auth, 7855 stcb->asoc.authinfo.active_keyid, 7856 no_fragmentflg, 0, NULL, asconf, 7857 inp->sctp_lport, stcb->rport, 7858 htonl(stcb->asoc.peer_vtag), 7859 net->port, so_locked, NULL, NULL))) { 7860 if (error == ENOBUFS) { 7861 asoc->ifp_had_enobuf = 1; 7862 SCTP_STAT_INCR(sctps_lowlevelerr); 7863 } 7864 if (from_where == 0) { 7865 SCTP_STAT_INCR(sctps_lowlevelerrusr); 7866 } 7867 /* error, could not output */ 7868 if (hbflag) { 7869 if (*now_filled == 0) { 7870 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7871 *now_filled = 1; 7872 *now = net->last_sent_time; 7873 } else { 7874 net->last_sent_time = *now; 7875 } 7876 hbflag = 0; 7877 } 7878 if (error == EHOSTUNREACH) { 7879 /* 7880 * Destination went 7881 * unreachable 7882 * during this send 7883 */ 7884 sctp_move_chunks_from_net(stcb, net); 7885 } 7886 *reason_code = 7; 7887 continue; 7888 } else 7889 asoc->ifp_had_enobuf = 0; 7890 /* Only HB or ASCONF advances time */ 7891 if (hbflag) { 7892 if (*now_filled == 0) { 7893 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 7894 *now_filled = 1; 7895 *now = net->last_sent_time; 7896 } else { 7897 net->last_sent_time = *now; 7898 } 7899 hbflag = 0; 7900 } 7901 /* 7902 * increase the number we sent, if a 7903 * cookie is sent we don't tell them 7904 * any was sent out. 7905 */ 7906 outchain = endoutchain = NULL; 7907 auth = NULL; 7908 auth_offset = 0; 7909 if (!no_out_cnt) 7910 *num_out += ctl_cnt; 7911 /* recalc a clean slate and setup */ 7912 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 7913 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 7914 } else { 7915 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD); 7916 } 7917 to_out = 0; 7918 no_fragmentflg = 1; 7919 } 7920 } 7921 } 7922 /* JRI: if dest is in PF state, do not send data to it */ 7923 if ((asoc->sctp_cmt_on_off > 0) && 7924 (asoc->sctp_cmt_pf > 0) && 7925 (net->dest_state & SCTP_ADDR_PF)) { 7926 goto no_data_fill; 7927 } 7928 if (net->flight_size >= net->cwnd) { 7929 goto no_data_fill; 7930 } 7931 if ((asoc->sctp_cmt_on_off > 0) && 7932 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) && 7933 (net->flight_size > max_rwnd_per_dest)) { 7934 goto no_data_fill; 7935 } 7936 /* 7937 * We need a specific accounting for the usage of the send 7938 * buffer. We also need to check the number of messages per 7939 * net. For now, this is better than nothing and it disabled 7940 * by default... 7941 */ 7942 if ((asoc->sctp_cmt_on_off > 0) && 7943 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) && 7944 (max_send_per_dest > 0) && 7945 (net->flight_size > max_send_per_dest)) { 7946 goto no_data_fill; 7947 } 7948 /*********************/ 7949 /* Data transmission */ 7950 /*********************/ 7951 /* 7952 * if AUTH for DATA is required and no AUTH has been added 7953 * yet, account for this in the mtu now... if no data can be 7954 * bundled, this adjustment won't matter anyways since the 7955 * packet will be going out... 7956 */ 7957 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, 7958 stcb->asoc.peer_auth_chunks); 7959 if (data_auth_reqd && (auth == NULL)) { 7960 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 7961 } 7962 /* now lets add any data within the MTU constraints */ 7963 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 7964 case AF_INET: 7965 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) 7966 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 7967 else 7968 omtu = 0; 7969 break; 7970 #ifdef INET6 7971 case AF_INET6: 7972 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) 7973 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 7974 else 7975 omtu = 0; 7976 break; 7977 #endif 7978 default: 7979 /* TSNH */ 7980 omtu = 0; 7981 break; 7982 } 7983 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && 7984 (skip_data_for_this_net == 0)) || 7985 (cookie)) { 7986 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 7987 if (no_data_chunks) { 7988 /* let only control go out */ 7989 *reason_code = 1; 7990 break; 7991 } 7992 if (net->flight_size >= net->cwnd) { 7993 /* skip this net, no room for data */ 7994 *reason_code = 2; 7995 break; 7996 } 7997 if ((chk->whoTo != NULL) && 7998 (chk->whoTo != net)) { 7999 /* Don't send the chunk on this net */ 8000 continue; 8001 } 8002 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 8003 /*- 8004 * strange, we have a chunk that is 8005 * to big for its destination and 8006 * yet no fragment ok flag. 8007 * Something went wrong when the 8008 * PMTU changed...we did not mark 8009 * this chunk for some reason?? I 8010 * will fix it here by letting IP 8011 * fragment it for now and printing 8012 * a warning. This really should not 8013 * happen ... 8014 */ 8015 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 8016 chk->send_size, mtu); 8017 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 8018 } 8019 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && 8020 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) { 8021 struct sctp_data_chunk *dchkh; 8022 8023 dchkh = mtod(chk->data, struct sctp_data_chunk *); 8024 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY; 8025 } 8026 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 8027 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 8028 /* ok we will add this one */ 8029 8030 /* 8031 * Add an AUTH chunk, if chunk 8032 * requires it, save the offset into 8033 * the chain for AUTH 8034 */ 8035 if (data_auth_reqd) { 8036 if (auth == NULL) { 8037 outchain = sctp_add_auth_chunk(outchain, 8038 &endoutchain, 8039 &auth, 8040 &auth_offset, 8041 stcb, 8042 SCTP_DATA); 8043 auth_keyid = chk->auth_keyid; 8044 override_ok = 0; 8045 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8046 } else if (override_ok) { 8047 /* 8048 * use this data's 8049 * keyid 8050 */ 8051 auth_keyid = chk->auth_keyid; 8052 override_ok = 0; 8053 } else if (auth_keyid != chk->auth_keyid) { 8054 /* 8055 * different keyid, 8056 * so done bundling 8057 */ 8058 break; 8059 } 8060 } 8061 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 8062 chk->send_size, chk->copy_by_ref); 8063 if (outchain == NULL) { 8064 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n"); 8065 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 8066 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8067 } 8068 *reason_code = 3; 8069 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 8070 return (ENOMEM); 8071 } 8072 /* upate our MTU size */ 8073 /* Do clear IP_DF ? */ 8074 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8075 no_fragmentflg = 0; 8076 } 8077 /* unsigned subtraction of mtu */ 8078 if (mtu > chk->send_size) 8079 mtu -= chk->send_size; 8080 else 8081 mtu = 0; 8082 /* unsigned subtraction of r_mtu */ 8083 if (r_mtu > chk->send_size) 8084 r_mtu -= chk->send_size; 8085 else 8086 r_mtu = 0; 8087 8088 to_out += chk->send_size; 8089 if ((to_out > mx_mtu) && no_fragmentflg) { 8090 #ifdef INVARIANTS 8091 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out); 8092 #else 8093 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n", 8094 mx_mtu, to_out); 8095 #endif 8096 } 8097 chk->window_probe = 0; 8098 data_list[bundle_at++] = chk; 8099 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 8100 mtu = 0; 8101 break; 8102 } 8103 if (chk->sent == SCTP_DATAGRAM_UNSENT) { 8104 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 8105 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 8106 } else { 8107 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 8108 } 8109 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 8110 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 8111 /* 8112 * Count number of 8113 * user msg's that 8114 * were fragmented 8115 * we do this by 8116 * counting when we 8117 * see a LAST 8118 * fragment only. 8119 */ 8120 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 8121 } 8122 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 8123 if ((one_chunk) && (stcb->asoc.total_flight == 0)) { 8124 data_list[0]->window_probe = 1; 8125 net->window_probe = 1; 8126 } 8127 break; 8128 } 8129 } else { 8130 /* 8131 * Must be sent in order of the 8132 * TSN's (on a network) 8133 */ 8134 break; 8135 } 8136 } /* for (chunk gather loop for this net) */ 8137 } /* if asoc.state OPEN */ 8138 no_data_fill: 8139 /* Is there something to send for this destination? */ 8140 if (outchain) { 8141 /* We may need to start a control timer or two */ 8142 if (asconf) { 8143 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, 8144 stcb, net); 8145 /* 8146 * do NOT clear the asconf flag as it is 8147 * used to do appropriate source address 8148 * selection. 8149 */ 8150 } 8151 if (cookie) { 8152 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 8153 cookie = 0; 8154 } 8155 /* must start a send timer if data is being sent */ 8156 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 8157 /* 8158 * no timer running on this destination 8159 * restart it. 8160 */ 8161 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8162 } else if ((asoc->sctp_cmt_on_off > 0) && 8163 (asoc->sctp_cmt_pf > 0) && 8164 pf_hbflag && 8165 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) && 8166 (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 8167 /* 8168 * JRS 5/14/07 - If a HB has been sent to a 8169 * PF destination and no T3 timer is 8170 * currently running, start the T3 timer to 8171 * track the HBs that were sent. 8172 */ 8173 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 8174 } 8175 /* Now send it, if there is anything to send :> */ 8176 if ((error = sctp_lowlevel_chunk_output(inp, 8177 stcb, 8178 net, 8179 (struct sockaddr *)&net->ro._l_addr, 8180 outchain, 8181 auth_offset, 8182 auth, 8183 auth_keyid, 8184 no_fragmentflg, 8185 bundle_at, 8186 data_list[0], 8187 asconf, 8188 inp->sctp_lport, stcb->rport, 8189 htonl(stcb->asoc.peer_vtag), 8190 net->port, so_locked, NULL, NULL))) { 8191 /* error, we could not output */ 8192 if (error == ENOBUFS) { 8193 SCTP_STAT_INCR(sctps_lowlevelerr); 8194 asoc->ifp_had_enobuf = 1; 8195 } 8196 if (from_where == 0) { 8197 SCTP_STAT_INCR(sctps_lowlevelerrusr); 8198 } 8199 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 8200 if (hbflag) { 8201 if (*now_filled == 0) { 8202 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 8203 *now_filled = 1; 8204 *now = net->last_sent_time; 8205 } else { 8206 net->last_sent_time = *now; 8207 } 8208 hbflag = 0; 8209 } 8210 if (error == EHOSTUNREACH) { 8211 /* 8212 * Destination went unreachable 8213 * during this send 8214 */ 8215 sctp_move_chunks_from_net(stcb, net); 8216 } 8217 *reason_code = 6; 8218 /*- 8219 * I add this line to be paranoid. As far as 8220 * I can tell the continue, takes us back to 8221 * the top of the for, but just to make sure 8222 * I will reset these again here. 8223 */ 8224 ctl_cnt = bundle_at = 0; 8225 continue; /* This takes us back to the 8226 * for() for the nets. */ 8227 } else { 8228 asoc->ifp_had_enobuf = 0; 8229 } 8230 outchain = endoutchain = NULL; 8231 auth = NULL; 8232 auth_offset = 0; 8233 if (bundle_at || hbflag) { 8234 /* For data/asconf and hb set time */ 8235 if (*now_filled == 0) { 8236 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 8237 *now_filled = 1; 8238 *now = net->last_sent_time; 8239 } else { 8240 net->last_sent_time = *now; 8241 } 8242 } 8243 if (!no_out_cnt) { 8244 *num_out += (ctl_cnt + bundle_at); 8245 } 8246 if (bundle_at) { 8247 /* setup for a RTO measurement */ 8248 tsns_sent = data_list[0]->rec.data.TSN_seq; 8249 /* fill time if not already filled */ 8250 if (*now_filled == 0) { 8251 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 8252 *now_filled = 1; 8253 *now = asoc->time_last_sent; 8254 } else { 8255 asoc->time_last_sent = *now; 8256 } 8257 data_list[0]->do_rtt = 1; 8258 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 8259 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 8260 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 8261 if (net->flight_size < net->cwnd) { 8262 /* start or restart it */ 8263 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 8264 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 8265 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); 8266 } 8267 SCTP_STAT_INCR(sctps_earlyfrstrout); 8268 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net); 8269 } else { 8270 /* stop it if its running */ 8271 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 8272 SCTP_STAT_INCR(sctps_earlyfrstpout); 8273 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net, 8274 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); 8275 } 8276 } 8277 } 8278 } 8279 if (one_chunk) { 8280 break; 8281 } 8282 } 8283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 8284 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 8285 } 8286 } 8287 if (old_start_at == NULL) { 8288 old_start_at = start_at; 8289 start_at = TAILQ_FIRST(&asoc->nets); 8290 if (old_start_at) 8291 goto again_one_more_time; 8292 } 8293 /* 8294 * At the end there should be no NON timed chunks hanging on this 8295 * queue. 8296 */ 8297 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 8298 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 8299 } 8300 if ((*num_out == 0) && (*reason_code == 0)) { 8301 *reason_code = 4; 8302 } else { 8303 *reason_code = 5; 8304 } 8305 sctp_clean_up_ctl(stcb, asoc); 8306 return (0); 8307 } 8308 8309 void 8310 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 8311 { 8312 /*- 8313 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 8314 * the control chunk queue. 8315 */ 8316 struct sctp_chunkhdr *hdr; 8317 struct sctp_tmit_chunk *chk; 8318 struct mbuf *mat; 8319 8320 SCTP_TCB_LOCK_ASSERT(stcb); 8321 sctp_alloc_a_chunk(stcb, chk); 8322 if (chk == NULL) { 8323 /* no memory */ 8324 sctp_m_freem(op_err); 8325 return; 8326 } 8327 chk->copy_by_ref = 0; 8328 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT); 8329 if (op_err == NULL) { 8330 sctp_free_a_chunk(stcb, chk); 8331 return; 8332 } 8333 chk->send_size = 0; 8334 mat = op_err; 8335 while (mat != NULL) { 8336 chk->send_size += SCTP_BUF_LEN(mat); 8337 mat = SCTP_BUF_NEXT(mat); 8338 } 8339 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 8340 chk->rec.chunk_id.can_take_data = 1; 8341 chk->sent = SCTP_DATAGRAM_UNSENT; 8342 chk->snd_count = 0; 8343 chk->flags = 0; 8344 chk->asoc = &stcb->asoc; 8345 chk->data = op_err; 8346 chk->whoTo = chk->asoc->primary_destination; 8347 atomic_add_int(&chk->whoTo->ref_count, 1); 8348 hdr = mtod(op_err, struct sctp_chunkhdr *); 8349 hdr->chunk_type = SCTP_OPERATION_ERROR; 8350 hdr->chunk_flags = 0; 8351 hdr->chunk_length = htons(chk->send_size); 8352 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, 8353 chk, 8354 sctp_next); 8355 chk->asoc->ctrl_queue_cnt++; 8356 } 8357 8358 int 8359 sctp_send_cookie_echo(struct mbuf *m, 8360 int offset, 8361 struct sctp_tcb *stcb, 8362 struct sctp_nets *net) 8363 { 8364 /*- 8365 * pull out the cookie and put it at the front of the control chunk 8366 * queue. 8367 */ 8368 int at; 8369 struct mbuf *cookie; 8370 struct sctp_paramhdr parm, *phdr; 8371 struct sctp_chunkhdr *hdr; 8372 struct sctp_tmit_chunk *chk; 8373 uint16_t ptype, plen; 8374 8375 /* First find the cookie in the param area */ 8376 cookie = NULL; 8377 at = offset + sizeof(struct sctp_init_chunk); 8378 8379 SCTP_TCB_LOCK_ASSERT(stcb); 8380 do { 8381 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); 8382 if (phdr == NULL) { 8383 return (-3); 8384 } 8385 ptype = ntohs(phdr->param_type); 8386 plen = ntohs(phdr->param_length); 8387 if (ptype == SCTP_STATE_COOKIE) { 8388 int pad; 8389 8390 /* found the cookie */ 8391 if ((pad = (plen % 4))) { 8392 plen += 4 - pad; 8393 } 8394 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT); 8395 if (cookie == NULL) { 8396 /* No memory */ 8397 return (-2); 8398 } 8399 #ifdef SCTP_MBUF_LOGGING 8400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 8401 struct mbuf *mat; 8402 8403 mat = cookie; 8404 while (mat) { 8405 if (SCTP_BUF_IS_EXTENDED(mat)) { 8406 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 8407 } 8408 mat = SCTP_BUF_NEXT(mat); 8409 } 8410 } 8411 #endif 8412 break; 8413 } 8414 at += SCTP_SIZE32(plen); 8415 } while (phdr); 8416 if (cookie == NULL) { 8417 /* Did not find the cookie */ 8418 return (-3); 8419 } 8420 /* ok, we got the cookie lets change it into a cookie echo chunk */ 8421 8422 /* first the change from param to cookie */ 8423 hdr = mtod(cookie, struct sctp_chunkhdr *); 8424 hdr->chunk_type = SCTP_COOKIE_ECHO; 8425 hdr->chunk_flags = 0; 8426 /* get the chunk stuff now and place it in the FRONT of the queue */ 8427 sctp_alloc_a_chunk(stcb, chk); 8428 if (chk == NULL) { 8429 /* no memory */ 8430 sctp_m_freem(cookie); 8431 return (-5); 8432 } 8433 chk->copy_by_ref = 0; 8434 chk->send_size = plen; 8435 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 8436 chk->rec.chunk_id.can_take_data = 0; 8437 chk->sent = SCTP_DATAGRAM_UNSENT; 8438 chk->snd_count = 0; 8439 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 8440 chk->asoc = &stcb->asoc; 8441 chk->data = cookie; 8442 chk->whoTo = chk->asoc->primary_destination; 8443 atomic_add_int(&chk->whoTo->ref_count, 1); 8444 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 8445 chk->asoc->ctrl_queue_cnt++; 8446 return (0); 8447 } 8448 8449 void 8450 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 8451 struct mbuf *m, 8452 int offset, 8453 int chk_length, 8454 struct sctp_nets *net) 8455 { 8456 /* 8457 * take a HB request and make it into a HB ack and send it. 8458 */ 8459 struct mbuf *outchain; 8460 struct sctp_chunkhdr *chdr; 8461 struct sctp_tmit_chunk *chk; 8462 8463 8464 if (net == NULL) 8465 /* must have a net pointer */ 8466 return; 8467 8468 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT); 8469 if (outchain == NULL) { 8470 /* gak out of memory */ 8471 return; 8472 } 8473 #ifdef SCTP_MBUF_LOGGING 8474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 8475 struct mbuf *mat; 8476 8477 mat = outchain; 8478 while (mat) { 8479 if (SCTP_BUF_IS_EXTENDED(mat)) { 8480 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 8481 } 8482 mat = SCTP_BUF_NEXT(mat); 8483 } 8484 } 8485 #endif 8486 chdr = mtod(outchain, struct sctp_chunkhdr *); 8487 chdr->chunk_type = SCTP_HEARTBEAT_ACK; 8488 chdr->chunk_flags = 0; 8489 if (chk_length % 4) { 8490 /* need pad */ 8491 uint32_t cpthis = 0; 8492 int padlen; 8493 8494 padlen = 4 - (chk_length % 4); 8495 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); 8496 } 8497 sctp_alloc_a_chunk(stcb, chk); 8498 if (chk == NULL) { 8499 /* no memory */ 8500 sctp_m_freem(outchain); 8501 return; 8502 } 8503 chk->copy_by_ref = 0; 8504 chk->send_size = chk_length; 8505 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 8506 chk->rec.chunk_id.can_take_data = 1; 8507 chk->sent = SCTP_DATAGRAM_UNSENT; 8508 chk->snd_count = 0; 8509 chk->flags = 0; 8510 chk->asoc = &stcb->asoc; 8511 chk->data = outchain; 8512 chk->whoTo = net; 8513 atomic_add_int(&chk->whoTo->ref_count, 1); 8514 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8515 chk->asoc->ctrl_queue_cnt++; 8516 } 8517 8518 void 8519 sctp_send_cookie_ack(struct sctp_tcb *stcb) 8520 { 8521 /* formulate and queue a cookie-ack back to sender */ 8522 struct mbuf *cookie_ack; 8523 struct sctp_chunkhdr *hdr; 8524 struct sctp_tmit_chunk *chk; 8525 8526 cookie_ack = NULL; 8527 SCTP_TCB_LOCK_ASSERT(stcb); 8528 8529 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER); 8530 if (cookie_ack == NULL) { 8531 /* no mbuf's */ 8532 return; 8533 } 8534 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 8535 sctp_alloc_a_chunk(stcb, chk); 8536 if (chk == NULL) { 8537 /* no memory */ 8538 sctp_m_freem(cookie_ack); 8539 return; 8540 } 8541 chk->copy_by_ref = 0; 8542 chk->send_size = sizeof(struct sctp_chunkhdr); 8543 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 8544 chk->rec.chunk_id.can_take_data = 1; 8545 chk->sent = SCTP_DATAGRAM_UNSENT; 8546 chk->snd_count = 0; 8547 chk->flags = 0; 8548 chk->asoc = &stcb->asoc; 8549 chk->data = cookie_ack; 8550 if (chk->asoc->last_control_chunk_from != NULL) { 8551 chk->whoTo = chk->asoc->last_control_chunk_from; 8552 } else { 8553 chk->whoTo = chk->asoc->primary_destination; 8554 } 8555 atomic_add_int(&chk->whoTo->ref_count, 1); 8556 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 8557 hdr->chunk_type = SCTP_COOKIE_ACK; 8558 hdr->chunk_flags = 0; 8559 hdr->chunk_length = htons(chk->send_size); 8560 SCTP_BUF_LEN(cookie_ack) = chk->send_size; 8561 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8562 chk->asoc->ctrl_queue_cnt++; 8563 return; 8564 } 8565 8566 8567 void 8568 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 8569 { 8570 /* formulate and queue a SHUTDOWN-ACK back to the sender */ 8571 struct mbuf *m_shutdown_ack; 8572 struct sctp_shutdown_ack_chunk *ack_cp; 8573 struct sctp_tmit_chunk *chk; 8574 8575 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 8576 if (m_shutdown_ack == NULL) { 8577 /* no mbuf's */ 8578 return; 8579 } 8580 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 8581 sctp_alloc_a_chunk(stcb, chk); 8582 if (chk == NULL) { 8583 /* no memory */ 8584 sctp_m_freem(m_shutdown_ack); 8585 return; 8586 } 8587 chk->copy_by_ref = 0; 8588 chk->send_size = sizeof(struct sctp_chunkhdr); 8589 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 8590 chk->rec.chunk_id.can_take_data = 1; 8591 chk->sent = SCTP_DATAGRAM_UNSENT; 8592 chk->snd_count = 0; 8593 chk->flags = 0; 8594 chk->asoc = &stcb->asoc; 8595 chk->data = m_shutdown_ack; 8596 chk->whoTo = net; 8597 atomic_add_int(&net->ref_count, 1); 8598 8599 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 8600 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 8601 ack_cp->ch.chunk_flags = 0; 8602 ack_cp->ch.chunk_length = htons(chk->send_size); 8603 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 8604 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8605 chk->asoc->ctrl_queue_cnt++; 8606 return; 8607 } 8608 8609 void 8610 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 8611 { 8612 /* formulate and queue a SHUTDOWN to the sender */ 8613 struct mbuf *m_shutdown; 8614 struct sctp_shutdown_chunk *shutdown_cp; 8615 struct sctp_tmit_chunk *chk; 8616 8617 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 8618 if (m_shutdown == NULL) { 8619 /* no mbuf's */ 8620 return; 8621 } 8622 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 8623 sctp_alloc_a_chunk(stcb, chk); 8624 if (chk == NULL) { 8625 /* no memory */ 8626 sctp_m_freem(m_shutdown); 8627 return; 8628 } 8629 chk->copy_by_ref = 0; 8630 chk->send_size = sizeof(struct sctp_shutdown_chunk); 8631 chk->rec.chunk_id.id = SCTP_SHUTDOWN; 8632 chk->rec.chunk_id.can_take_data = 1; 8633 chk->sent = SCTP_DATAGRAM_UNSENT; 8634 chk->snd_count = 0; 8635 chk->flags = 0; 8636 chk->asoc = &stcb->asoc; 8637 chk->data = m_shutdown; 8638 chk->whoTo = net; 8639 atomic_add_int(&net->ref_count, 1); 8640 8641 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 8642 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 8643 shutdown_cp->ch.chunk_flags = 0; 8644 shutdown_cp->ch.chunk_length = htons(chk->send_size); 8645 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 8646 SCTP_BUF_LEN(m_shutdown) = chk->send_size; 8647 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8648 chk->asoc->ctrl_queue_cnt++; 8649 return; 8650 } 8651 8652 void 8653 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked) 8654 { 8655 /* 8656 * formulate and queue an ASCONF to the peer. ASCONF parameters 8657 * should be queued on the assoc queue. 8658 */ 8659 struct sctp_tmit_chunk *chk; 8660 struct mbuf *m_asconf; 8661 int len; 8662 8663 SCTP_TCB_LOCK_ASSERT(stcb); 8664 8665 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) && 8666 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) { 8667 /* can't send a new one if there is one in flight already */ 8668 return; 8669 } 8670 /* compose an ASCONF chunk, maximum length is PMTU */ 8671 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked); 8672 if (m_asconf == NULL) { 8673 return; 8674 } 8675 sctp_alloc_a_chunk(stcb, chk); 8676 if (chk == NULL) { 8677 /* no memory */ 8678 sctp_m_freem(m_asconf); 8679 return; 8680 } 8681 chk->copy_by_ref = 0; 8682 chk->data = m_asconf; 8683 chk->send_size = len; 8684 chk->rec.chunk_id.id = SCTP_ASCONF; 8685 chk->rec.chunk_id.can_take_data = 0; 8686 chk->sent = SCTP_DATAGRAM_UNSENT; 8687 chk->snd_count = 0; 8688 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 8689 chk->asoc = &stcb->asoc; 8690 chk->whoTo = net; 8691 atomic_add_int(&chk->whoTo->ref_count, 1); 8692 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next); 8693 chk->asoc->ctrl_queue_cnt++; 8694 return; 8695 } 8696 8697 void 8698 sctp_send_asconf_ack(struct sctp_tcb *stcb) 8699 { 8700 /* 8701 * formulate and queue a asconf-ack back to sender. the asconf-ack 8702 * must be stored in the tcb. 8703 */ 8704 struct sctp_tmit_chunk *chk; 8705 struct sctp_asconf_ack *ack, *latest_ack; 8706 struct mbuf *m_ack, *m; 8707 struct sctp_nets *net = NULL; 8708 8709 SCTP_TCB_LOCK_ASSERT(stcb); 8710 /* Get the latest ASCONF-ACK */ 8711 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead); 8712 if (latest_ack == NULL) { 8713 return; 8714 } 8715 if (latest_ack->last_sent_to != NULL && 8716 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) { 8717 /* we're doing a retransmission */ 8718 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 8719 if (net == NULL) { 8720 /* no alternate */ 8721 if (stcb->asoc.last_control_chunk_from == NULL) 8722 net = stcb->asoc.primary_destination; 8723 else 8724 net = stcb->asoc.last_control_chunk_from; 8725 } 8726 } else { 8727 /* normal case */ 8728 if (stcb->asoc.last_control_chunk_from == NULL) 8729 net = stcb->asoc.primary_destination; 8730 else 8731 net = stcb->asoc.last_control_chunk_from; 8732 } 8733 latest_ack->last_sent_to = net; 8734 8735 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) { 8736 if (ack->data == NULL) { 8737 continue; 8738 } 8739 /* copy the asconf_ack */ 8740 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT); 8741 if (m_ack == NULL) { 8742 /* couldn't copy it */ 8743 return; 8744 } 8745 #ifdef SCTP_MBUF_LOGGING 8746 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 8747 struct mbuf *mat; 8748 8749 mat = m_ack; 8750 while (mat) { 8751 if (SCTP_BUF_IS_EXTENDED(mat)) { 8752 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 8753 } 8754 mat = SCTP_BUF_NEXT(mat); 8755 } 8756 } 8757 #endif 8758 8759 sctp_alloc_a_chunk(stcb, chk); 8760 if (chk == NULL) { 8761 /* no memory */ 8762 if (m_ack) 8763 sctp_m_freem(m_ack); 8764 return; 8765 } 8766 chk->copy_by_ref = 0; 8767 8768 chk->whoTo = net; 8769 chk->data = m_ack; 8770 chk->send_size = 0; 8771 /* Get size */ 8772 m = m_ack; 8773 chk->send_size = ack->len; 8774 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 8775 chk->rec.chunk_id.can_take_data = 1; 8776 chk->sent = SCTP_DATAGRAM_UNSENT; 8777 chk->snd_count = 0; 8778 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */ 8779 chk->asoc = &stcb->asoc; 8780 atomic_add_int(&chk->whoTo->ref_count, 1); 8781 8782 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 8783 chk->asoc->ctrl_queue_cnt++; 8784 } 8785 return; 8786 } 8787 8788 8789 static int 8790 sctp_chunk_retransmission(struct sctp_inpcb *inp, 8791 struct sctp_tcb *stcb, 8792 struct sctp_association *asoc, 8793 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked 8794 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 8795 SCTP_UNUSED 8796 #endif 8797 ) 8798 { 8799 /*- 8800 * send out one MTU of retransmission. If fast_retransmit is 8801 * happening we ignore the cwnd. Otherwise we obey the cwnd and 8802 * rwnd. For a Cookie or Asconf in the control chunk queue we 8803 * retransmit them by themselves. 8804 * 8805 * For data chunks we will pick out the lowest TSN's in the sent_queue 8806 * marked for resend and bundle them all together (up to a MTU of 8807 * destination). The address to send to should have been 8808 * selected/changed where the retransmission was marked (i.e. in FR 8809 * or t3-timeout routines). 8810 */ 8811 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 8812 struct sctp_tmit_chunk *chk, *fwd; 8813 struct mbuf *m, *endofchain; 8814 struct sctp_nets *net = NULL; 8815 uint32_t tsns_sent = 0; 8816 int no_fragmentflg, bundle_at, cnt_thru; 8817 unsigned int mtu; 8818 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 8819 struct sctp_auth_chunk *auth = NULL; 8820 uint32_t auth_offset = 0; 8821 uint16_t auth_keyid; 8822 int override_ok = 1; 8823 int data_auth_reqd = 0; 8824 uint32_t dmtu = 0; 8825 8826 SCTP_TCB_LOCK_ASSERT(stcb); 8827 tmr_started = ctl_cnt = bundle_at = error = 0; 8828 no_fragmentflg = 1; 8829 fwd_tsn = 0; 8830 *cnt_out = 0; 8831 fwd = NULL; 8832 endofchain = m = NULL; 8833 auth_keyid = stcb->asoc.authinfo.active_keyid; 8834 #ifdef SCTP_AUDITING_ENABLED 8835 sctp_audit_log(0xC3, 1); 8836 #endif 8837 if ((TAILQ_EMPTY(&asoc->sent_queue)) && 8838 (TAILQ_EMPTY(&asoc->control_send_queue))) { 8839 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n", 8840 asoc->sent_queue_retran_cnt); 8841 asoc->sent_queue_cnt = 0; 8842 asoc->sent_queue_cnt_removeable = 0; 8843 /* send back 0/0 so we enter normal transmission */ 8844 *cnt_out = 0; 8845 return (0); 8846 } 8847 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8848 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 8849 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 8850 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 8851 if (chk->sent != SCTP_DATAGRAM_RESEND) { 8852 continue; 8853 } 8854 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 8855 if (chk != asoc->str_reset) { 8856 /* 8857 * not eligible for retran if its 8858 * not ours 8859 */ 8860 continue; 8861 } 8862 } 8863 ctl_cnt++; 8864 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 8865 fwd_tsn = 1; 8866 fwd = chk; 8867 } 8868 /* 8869 * Add an AUTH chunk, if chunk requires it save the 8870 * offset into the chain for AUTH 8871 */ 8872 if ((auth == NULL) && 8873 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8874 stcb->asoc.peer_auth_chunks))) { 8875 m = sctp_add_auth_chunk(m, &endofchain, 8876 &auth, &auth_offset, 8877 stcb, 8878 chk->rec.chunk_id.id); 8879 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8880 } 8881 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 8882 break; 8883 } 8884 } 8885 one_chunk = 0; 8886 cnt_thru = 0; 8887 /* do we have control chunks to retransmit? */ 8888 if (m != NULL) { 8889 /* Start a timer no matter if we suceed or fail */ 8890 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 8891 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 8892 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) 8893 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 8894 chk->snd_count++; /* update our count */ 8895 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 8896 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, 8897 auth_offset, auth, stcb->asoc.authinfo.active_keyid, 8898 no_fragmentflg, 0, NULL, 0, 8899 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 8900 chk->whoTo->port, so_locked, NULL, NULL))) { 8901 SCTP_STAT_INCR(sctps_lowlevelerr); 8902 return (error); 8903 } 8904 m = endofchain = NULL; 8905 auth = NULL; 8906 auth_offset = 0; 8907 /* 8908 * We don't want to mark the net->sent time here since this 8909 * we use this for HB and retrans cannot measure RTT 8910 */ 8911 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 8912 *cnt_out += 1; 8913 chk->sent = SCTP_DATAGRAM_SENT; 8914 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 8915 if (fwd_tsn == 0) { 8916 return (0); 8917 } else { 8918 /* Clean up the fwd-tsn list */ 8919 sctp_clean_up_ctl(stcb, asoc); 8920 return (0); 8921 } 8922 } 8923 /* 8924 * Ok, it is just data retransmission we need to do or that and a 8925 * fwd-tsn with it all. 8926 */ 8927 if (TAILQ_EMPTY(&asoc->sent_queue)) { 8928 return (SCTP_RETRAN_DONE); 8929 } 8930 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || 8931 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { 8932 /* not yet open, resend the cookie and that is it */ 8933 return (1); 8934 } 8935 #ifdef SCTP_AUDITING_ENABLED 8936 sctp_auditing(20, inp, stcb, NULL); 8937 #endif 8938 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks); 8939 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 8940 if (chk->sent != SCTP_DATAGRAM_RESEND) { 8941 /* No, not sent to this net or not ready for rtx */ 8942 continue; 8943 } 8944 if (chk->data == NULL) { 8945 printf("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n", 8946 chk->rec.data.TSN_seq, chk->snd_count, chk->sent); 8947 continue; 8948 } 8949 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) && 8950 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) { 8951 /* Gak, we have exceeded max unlucky retran, abort! */ 8952 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n", 8953 chk->snd_count, 8954 SCTP_BASE_SYSCTL(sctp_max_retran_chunk)); 8955 atomic_add_int(&stcb->asoc.refcnt, 1); 8956 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked); 8957 SCTP_TCB_LOCK(stcb); 8958 atomic_subtract_int(&stcb->asoc.refcnt, 1); 8959 return (SCTP_RETRAN_EXIT); 8960 } 8961 /* pick up the net */ 8962 net = chk->whoTo; 8963 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 8964 mtu = (net->mtu - SCTP_MIN_OVERHEAD); 8965 } else { 8966 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 8967 } 8968 8969 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 8970 /* No room in peers rwnd */ 8971 uint32_t tsn; 8972 8973 tsn = asoc->last_acked_seq + 1; 8974 if (tsn == chk->rec.data.TSN_seq) { 8975 /* 8976 * we make a special exception for this 8977 * case. The peer has no rwnd but is missing 8978 * the lowest chunk.. which is probably what 8979 * is holding up the rwnd. 8980 */ 8981 goto one_chunk_around; 8982 } 8983 return (1); 8984 } 8985 one_chunk_around: 8986 if (asoc->peers_rwnd < mtu) { 8987 one_chunk = 1; 8988 if ((asoc->peers_rwnd == 0) && 8989 (asoc->total_flight == 0)) { 8990 chk->window_probe = 1; 8991 chk->whoTo->window_probe = 1; 8992 } 8993 } 8994 #ifdef SCTP_AUDITING_ENABLED 8995 sctp_audit_log(0xC3, 2); 8996 #endif 8997 bundle_at = 0; 8998 m = NULL; 8999 net->fast_retran_ip = 0; 9000 if (chk->rec.data.doing_fast_retransmit == 0) { 9001 /* 9002 * if no FR in progress skip destination that have 9003 * flight_size > cwnd. 9004 */ 9005 if (net->flight_size >= net->cwnd) { 9006 continue; 9007 } 9008 } else { 9009 /* 9010 * Mark the destination net to have FR recovery 9011 * limits put on it. 9012 */ 9013 *fr_done = 1; 9014 net->fast_retran_ip = 1; 9015 } 9016 9017 /* 9018 * if no AUTH is yet included and this chunk requires it, 9019 * make sure to account for it. We don't apply the size 9020 * until the AUTH chunk is actually added below in case 9021 * there is no room for this chunk. 9022 */ 9023 if (data_auth_reqd && (auth == NULL)) { 9024 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 9025 } else 9026 dmtu = 0; 9027 9028 if ((chk->send_size <= (mtu - dmtu)) || 9029 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 9030 /* ok we will add this one */ 9031 if (data_auth_reqd) { 9032 if (auth == NULL) { 9033 m = sctp_add_auth_chunk(m, 9034 &endofchain, 9035 &auth, 9036 &auth_offset, 9037 stcb, 9038 SCTP_DATA); 9039 auth_keyid = chk->auth_keyid; 9040 override_ok = 0; 9041 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9042 } else if (override_ok) { 9043 auth_keyid = chk->auth_keyid; 9044 override_ok = 0; 9045 } else if (chk->auth_keyid != auth_keyid) { 9046 /* different keyid, so done bundling */ 9047 break; 9048 } 9049 } 9050 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 9051 if (m == NULL) { 9052 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 9053 return (ENOMEM); 9054 } 9055 /* Do clear IP_DF ? */ 9056 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 9057 no_fragmentflg = 0; 9058 } 9059 /* upate our MTU size */ 9060 if (mtu > (chk->send_size + dmtu)) 9061 mtu -= (chk->send_size + dmtu); 9062 else 9063 mtu = 0; 9064 data_list[bundle_at++] = chk; 9065 if (one_chunk && (asoc->total_flight <= 0)) { 9066 SCTP_STAT_INCR(sctps_windowprobed); 9067 } 9068 } 9069 if (one_chunk == 0) { 9070 /* 9071 * now are there anymore forward from chk to pick 9072 * up? 9073 */ 9074 fwd = TAILQ_NEXT(chk, sctp_next); 9075 while (fwd) { 9076 if (fwd->sent != SCTP_DATAGRAM_RESEND) { 9077 /* Nope, not for retran */ 9078 fwd = TAILQ_NEXT(fwd, sctp_next); 9079 continue; 9080 } 9081 if (fwd->whoTo != net) { 9082 /* Nope, not the net in question */ 9083 fwd = TAILQ_NEXT(fwd, sctp_next); 9084 continue; 9085 } 9086 if (data_auth_reqd && (auth == NULL)) { 9087 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 9088 } else 9089 dmtu = 0; 9090 if (fwd->send_size <= (mtu - dmtu)) { 9091 if (data_auth_reqd) { 9092 if (auth == NULL) { 9093 m = sctp_add_auth_chunk(m, 9094 &endofchain, 9095 &auth, 9096 &auth_offset, 9097 stcb, 9098 SCTP_DATA); 9099 auth_keyid = fwd->auth_keyid; 9100 override_ok = 0; 9101 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9102 } else if (override_ok) { 9103 auth_keyid = fwd->auth_keyid; 9104 override_ok = 0; 9105 } else if (fwd->auth_keyid != auth_keyid) { 9106 /* 9107 * different keyid, 9108 * so done bundling 9109 */ 9110 break; 9111 } 9112 } 9113 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 9114 if (m == NULL) { 9115 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 9116 return (ENOMEM); 9117 } 9118 /* Do clear IP_DF ? */ 9119 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 9120 no_fragmentflg = 0; 9121 } 9122 /* upate our MTU size */ 9123 if (mtu > (fwd->send_size + dmtu)) 9124 mtu -= (fwd->send_size + dmtu); 9125 else 9126 mtu = 0; 9127 data_list[bundle_at++] = fwd; 9128 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 9129 break; 9130 } 9131 fwd = TAILQ_NEXT(fwd, sctp_next); 9132 } else { 9133 /* can't fit so we are done */ 9134 break; 9135 } 9136 } 9137 } 9138 /* Is there something to send for this destination? */ 9139 if (m) { 9140 /* 9141 * No matter if we fail/or suceed we should start a 9142 * timer. A failure is like a lost IP packet :-) 9143 */ 9144 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 9145 /* 9146 * no timer running on this destination 9147 * restart it. 9148 */ 9149 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 9150 tmr_started = 1; 9151 } 9152 /* Now lets send it, if there is anything to send :> */ 9153 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 9154 (struct sockaddr *)&net->ro._l_addr, m, 9155 auth_offset, auth, auth_keyid, 9156 no_fragmentflg, 0, NULL, 0, 9157 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 9158 net->port, so_locked, NULL, NULL))) { 9159 /* error, we could not output */ 9160 SCTP_STAT_INCR(sctps_lowlevelerr); 9161 return (error); 9162 } 9163 m = endofchain = NULL; 9164 auth = NULL; 9165 auth_offset = 0; 9166 /* For HB's */ 9167 /* 9168 * We don't want to mark the net->sent time here 9169 * since this we use this for HB and retrans cannot 9170 * measure RTT 9171 */ 9172 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 9173 9174 /* For auto-close */ 9175 cnt_thru++; 9176 if (*now_filled == 0) { 9177 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 9178 *now = asoc->time_last_sent; 9179 *now_filled = 1; 9180 } else { 9181 asoc->time_last_sent = *now; 9182 } 9183 *cnt_out += bundle_at; 9184 #ifdef SCTP_AUDITING_ENABLED 9185 sctp_audit_log(0xC4, bundle_at); 9186 #endif 9187 if (bundle_at) { 9188 tsns_sent = data_list[0]->rec.data.TSN_seq; 9189 } 9190 for (i = 0; i < bundle_at; i++) { 9191 SCTP_STAT_INCR(sctps_sendretransdata); 9192 data_list[i]->sent = SCTP_DATAGRAM_SENT; 9193 /* 9194 * When we have a revoked data, and we 9195 * retransmit it, then we clear the revoked 9196 * flag since this flag dictates if we 9197 * subtracted from the fs 9198 */ 9199 if (data_list[i]->rec.data.chunk_was_revoked) { 9200 /* Deflate the cwnd */ 9201 data_list[i]->whoTo->cwnd -= data_list[i]->book_size; 9202 data_list[i]->rec.data.chunk_was_revoked = 0; 9203 } 9204 data_list[i]->snd_count++; 9205 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 9206 /* record the time */ 9207 data_list[i]->sent_rcv_time = asoc->time_last_sent; 9208 if (data_list[i]->book_size_scale) { 9209 /* 9210 * need to double the book size on 9211 * this one 9212 */ 9213 data_list[i]->book_size_scale = 0; 9214 /* 9215 * Since we double the booksize, we 9216 * must also double the output queue 9217 * size, since this get shrunk when 9218 * we free by this amount. 9219 */ 9220 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); 9221 data_list[i]->book_size *= 2; 9222 9223 9224 } else { 9225 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 9226 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 9227 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 9228 } 9229 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 9230 (uint32_t) (data_list[i]->send_size + 9231 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); 9232 } 9233 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 9234 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND, 9235 data_list[i]->whoTo->flight_size, 9236 data_list[i]->book_size, 9237 (uintptr_t) data_list[i]->whoTo, 9238 data_list[i]->rec.data.TSN_seq); 9239 } 9240 sctp_flight_size_increase(data_list[i]); 9241 sctp_total_flight_increase(stcb, data_list[i]); 9242 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 9243 /* SWS sender side engages */ 9244 asoc->peers_rwnd = 0; 9245 } 9246 if ((i == 0) && 9247 (data_list[i]->rec.data.doing_fast_retransmit)) { 9248 SCTP_STAT_INCR(sctps_sendfastretrans); 9249 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 9250 (tmr_started == 0)) { 9251 /*- 9252 * ok we just fast-retrans'd 9253 * the lowest TSN, i.e the 9254 * first on the list. In 9255 * this case we want to give 9256 * some more time to get a 9257 * SACK back without a 9258 * t3-expiring. 9259 */ 9260 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 9261 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); 9262 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 9263 } 9264 } 9265 } 9266 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 9267 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 9268 } 9269 #ifdef SCTP_AUDITING_ENABLED 9270 sctp_auditing(21, inp, stcb, NULL); 9271 #endif 9272 } else { 9273 /* None will fit */ 9274 return (1); 9275 } 9276 if (asoc->sent_queue_retran_cnt <= 0) { 9277 /* all done we have no more to retran */ 9278 asoc->sent_queue_retran_cnt = 0; 9279 break; 9280 } 9281 if (one_chunk) { 9282 /* No more room in rwnd */ 9283 return (1); 9284 } 9285 /* stop the for loop here. we sent out a packet */ 9286 break; 9287 } 9288 return (0); 9289 } 9290 9291 9292 static int 9293 sctp_timer_validation(struct sctp_inpcb *inp, 9294 struct sctp_tcb *stcb, 9295 struct sctp_association *asoc, 9296 int ret) 9297 { 9298 struct sctp_nets *net; 9299 9300 /* Validate that a timer is running somewhere */ 9301 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 9302 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 9303 /* Here is a timer */ 9304 return (ret); 9305 } 9306 } 9307 SCTP_TCB_LOCK_ASSERT(stcb); 9308 /* Gak, we did not have a timer somewhere */ 9309 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n"); 9310 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 9311 return (ret); 9312 } 9313 9314 void 9315 sctp_chunk_output(struct sctp_inpcb *inp, 9316 struct sctp_tcb *stcb, 9317 int from_where, 9318 int so_locked 9319 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 9320 SCTP_UNUSED 9321 #endif 9322 ) 9323 { 9324 /*- 9325 * Ok this is the generic chunk service queue. we must do the 9326 * following: 9327 * - See if there are retransmits pending, if so we must 9328 * do these first. 9329 * - Service the stream queue that is next, moving any 9330 * message (note I must get a complete message i.e. 9331 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 9332 * TSN's 9333 * - Check to see if the cwnd/rwnd allows any output, if so we 9334 * go ahead and fomulate and send the low level chunks. Making sure 9335 * to combine any control in the control chunk queue also. 9336 */ 9337 struct sctp_association *asoc; 9338 struct sctp_nets *net; 9339 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0; 9340 unsigned int burst_cnt = 0; 9341 struct timeval now; 9342 int now_filled = 0; 9343 int nagle_on = 0; 9344 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 9345 int un_sent = 0; 9346 int fr_done; 9347 unsigned int tot_frs = 0; 9348 9349 asoc = &stcb->asoc; 9350 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 9351 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 9352 nagle_on = 0; 9353 } else { 9354 nagle_on = 1; 9355 } 9356 } 9357 SCTP_TCB_LOCK_ASSERT(stcb); 9358 9359 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 9360 9361 if ((un_sent <= 0) && 9362 (TAILQ_EMPTY(&asoc->control_send_queue)) && 9363 (TAILQ_EMPTY(&asoc->asconf_send_queue)) && 9364 (asoc->sent_queue_retran_cnt == 0)) { 9365 /* Nothing to do unless there is something to be sent left */ 9366 return; 9367 } 9368 /* 9369 * Do we have something to send, data or control AND a sack timer 9370 * running, if so piggy-back the sack. 9371 */ 9372 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 9373 sctp_send_sack(stcb); 9374 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 9375 } 9376 while (asoc->sent_queue_retran_cnt) { 9377 /*- 9378 * Ok, it is retransmission time only, we send out only ONE 9379 * packet with a single call off to the retran code. 9380 */ 9381 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 9382 /*- 9383 * Special hook for handling cookiess discarded 9384 * by peer that carried data. Send cookie-ack only 9385 * and then the next call with get the retran's. 9386 */ 9387 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 9388 from_where, 9389 &now, &now_filled, frag_point, so_locked); 9390 return; 9391 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 9392 /* if its not from a HB then do it */ 9393 fr_done = 0; 9394 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked); 9395 if (fr_done) { 9396 tot_frs++; 9397 } 9398 } else { 9399 /* 9400 * its from any other place, we don't allow retran 9401 * output (only control) 9402 */ 9403 ret = 1; 9404 } 9405 if (ret > 0) { 9406 /* Can't send anymore */ 9407 /*- 9408 * now lets push out control by calling med-level 9409 * output once. this assures that we WILL send HB's 9410 * if queued too. 9411 */ 9412 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 9413 from_where, 9414 &now, &now_filled, frag_point, so_locked); 9415 #ifdef SCTP_AUDITING_ENABLED 9416 sctp_auditing(8, inp, stcb, NULL); 9417 #endif 9418 (void)sctp_timer_validation(inp, stcb, asoc, ret); 9419 return; 9420 } 9421 if (ret < 0) { 9422 /*- 9423 * The count was off.. retran is not happening so do 9424 * the normal retransmission. 9425 */ 9426 #ifdef SCTP_AUDITING_ENABLED 9427 sctp_auditing(9, inp, stcb, NULL); 9428 #endif 9429 if (ret == SCTP_RETRAN_EXIT) { 9430 return; 9431 } 9432 break; 9433 } 9434 if (from_where == SCTP_OUTPUT_FROM_T3) { 9435 /* Only one transmission allowed out of a timeout */ 9436 #ifdef SCTP_AUDITING_ENABLED 9437 sctp_auditing(10, inp, stcb, NULL); 9438 #endif 9439 /* Push out any control */ 9440 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where, 9441 &now, &now_filled, frag_point, so_locked); 9442 return; 9443 } 9444 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) { 9445 /* Hit FR burst limit */ 9446 return; 9447 } 9448 if ((num_out == 0) && (ret == 0)) { 9449 /* No more retrans to send */ 9450 break; 9451 } 9452 } 9453 #ifdef SCTP_AUDITING_ENABLED 9454 sctp_auditing(12, inp, stcb, NULL); 9455 #endif 9456 /* Check for bad destinations, if they exist move chunks around. */ 9457 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 9458 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 9459 SCTP_ADDR_NOT_REACHABLE) { 9460 /*- 9461 * if possible move things off of this address we 9462 * still may send below due to the dormant state but 9463 * we try to find an alternate address to send to 9464 * and if we have one we move all queued data on the 9465 * out wheel to this alternate address. 9466 */ 9467 if (net->ref_count > 1) 9468 sctp_move_chunks_from_net(stcb, net); 9469 } else if ((asoc->sctp_cmt_on_off > 0) && 9470 (asoc->sctp_cmt_pf > 0) && 9471 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 9472 /* 9473 * JRS 5/14/07 - If CMT PF is on and the current 9474 * destination is in PF state, move all queued data 9475 * to an alternate desination. 9476 */ 9477 if (net->ref_count > 1) 9478 sctp_move_chunks_from_net(stcb, net); 9479 } else { 9480 /*- 9481 * if ((asoc->sat_network) || (net->addr_is_local)) 9482 * { burst_limit = asoc->max_burst * 9483 * SCTP_SAT_NETWORK_BURST_INCR; } 9484 */ 9485 if (asoc->max_burst > 0) { 9486 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) { 9487 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) { 9488 /* 9489 * JRS - Use the congestion 9490 * control given in the 9491 * congestion control module 9492 */ 9493 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst); 9494 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 9495 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED); 9496 } 9497 SCTP_STAT_INCR(sctps_maxburstqueued); 9498 } 9499 net->fast_retran_ip = 0; 9500 } else { 9501 if (net->flight_size == 0) { 9502 /* 9503 * Should be decaying the 9504 * cwnd here 9505 */ 9506 ; 9507 } 9508 } 9509 } 9510 } 9511 9512 } 9513 burst_cnt = 0; 9514 do { 9515 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 9516 &reason_code, 0, from_where, 9517 &now, &now_filled, frag_point, so_locked); 9518 if (error) { 9519 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); 9520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 9521 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 9522 } 9523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 9524 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 9525 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 9526 } 9527 break; 9528 } 9529 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out); 9530 9531 tot_out += num_out; 9532 burst_cnt++; 9533 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 9534 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 9535 if (num_out == 0) { 9536 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 9537 } 9538 } 9539 if (nagle_on) { 9540 /*- 9541 * When nagle is on, we look at how much is un_sent, then 9542 * if its smaller than an MTU and we have data in 9543 * flight we stop. 9544 */ 9545 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 9546 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 9547 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 9548 (stcb->asoc.total_flight > 0)) { 9549 break; 9550 } 9551 } 9552 if (TAILQ_EMPTY(&asoc->control_send_queue) && 9553 TAILQ_EMPTY(&asoc->send_queue) && 9554 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 9555 /* Nothing left to send */ 9556 break; 9557 } 9558 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 9559 /* Nothing left to send */ 9560 break; 9561 } 9562 } while (num_out && 9563 ((asoc->max_burst == 0) || 9564 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) || 9565 (burst_cnt < asoc->max_burst))); 9566 9567 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) { 9568 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) { 9569 SCTP_STAT_INCR(sctps_maxburstqueued); 9570 asoc->burst_limit_applied = 1; 9571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 9572 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 9573 } 9574 } else { 9575 asoc->burst_limit_applied = 0; 9576 } 9577 } 9578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 9579 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 9580 } 9581 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n", 9582 tot_out); 9583 9584 /*- 9585 * Now we need to clean up the control chunk chain if a ECNE is on 9586 * it. It must be marked as UNSENT again so next call will continue 9587 * to send it until such time that we get a CWR, to remove it. 9588 */ 9589 if (stcb->asoc.ecn_echo_cnt_onq) 9590 sctp_fix_ecn_echo(asoc); 9591 return; 9592 } 9593 9594 9595 int 9596 sctp_output(inp, m, addr, control, p, flags) 9597 struct sctp_inpcb *inp; 9598 struct mbuf *m; 9599 struct sockaddr *addr; 9600 struct mbuf *control; 9601 struct thread *p; 9602 int flags; 9603 { 9604 if (inp == NULL) { 9605 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 9606 return (EINVAL); 9607 } 9608 if (inp->sctp_socket == NULL) { 9609 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 9610 return (EINVAL); 9611 } 9612 return (sctp_sosend(inp->sctp_socket, 9613 addr, 9614 (struct uio *)NULL, 9615 m, 9616 control, 9617 flags, p 9618 )); 9619 } 9620 9621 void 9622 send_forward_tsn(struct sctp_tcb *stcb, 9623 struct sctp_association *asoc) 9624 { 9625 struct sctp_tmit_chunk *chk; 9626 struct sctp_forward_tsn_chunk *fwdtsn; 9627 uint32_t advance_peer_ack_point; 9628 9629 SCTP_TCB_LOCK_ASSERT(stcb); 9630 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9631 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 9632 /* mark it to unsent */ 9633 chk->sent = SCTP_DATAGRAM_UNSENT; 9634 chk->snd_count = 0; 9635 /* Do we correct its output location? */ 9636 if (chk->whoTo != asoc->primary_destination) { 9637 sctp_free_remote_addr(chk->whoTo); 9638 chk->whoTo = asoc->primary_destination; 9639 atomic_add_int(&chk->whoTo->ref_count, 1); 9640 } 9641 goto sctp_fill_in_rest; 9642 } 9643 } 9644 /* Ok if we reach here we must build one */ 9645 sctp_alloc_a_chunk(stcb, chk); 9646 if (chk == NULL) { 9647 return; 9648 } 9649 asoc->fwd_tsn_cnt++; 9650 chk->copy_by_ref = 0; 9651 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 9652 chk->rec.chunk_id.can_take_data = 0; 9653 chk->asoc = asoc; 9654 chk->whoTo = NULL; 9655 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 9656 if (chk->data == NULL) { 9657 sctp_free_a_chunk(stcb, chk); 9658 return; 9659 } 9660 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 9661 chk->sent = SCTP_DATAGRAM_UNSENT; 9662 chk->snd_count = 0; 9663 chk->whoTo = asoc->primary_destination; 9664 atomic_add_int(&chk->whoTo->ref_count, 1); 9665 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 9666 asoc->ctrl_queue_cnt++; 9667 sctp_fill_in_rest: 9668 /*- 9669 * Here we go through and fill out the part that deals with 9670 * stream/seq of the ones we skip. 9671 */ 9672 SCTP_BUF_LEN(chk->data) = 0; 9673 { 9674 struct sctp_tmit_chunk *at, *tp1, *last; 9675 struct sctp_strseq *strseq; 9676 unsigned int cnt_of_space, i, ovh; 9677 unsigned int space_needed; 9678 unsigned int cnt_of_skipped = 0; 9679 9680 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 9681 if (at->sent != SCTP_FORWARD_TSN_SKIP) { 9682 /* no more to look at */ 9683 break; 9684 } 9685 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 9686 /* We don't report these */ 9687 continue; 9688 } 9689 cnt_of_skipped++; 9690 } 9691 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 9692 (cnt_of_skipped * sizeof(struct sctp_strseq))); 9693 9694 cnt_of_space = M_TRAILINGSPACE(chk->data); 9695 9696 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 9697 ovh = SCTP_MIN_OVERHEAD; 9698 } else { 9699 ovh = SCTP_MIN_V4_OVERHEAD; 9700 } 9701 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 9702 /* trim to a mtu size */ 9703 cnt_of_space = asoc->smallest_mtu - ovh; 9704 } 9705 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 9706 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 9707 0xff, 0, cnt_of_skipped, 9708 asoc->advanced_peer_ack_point); 9709 9710 } 9711 advance_peer_ack_point = asoc->advanced_peer_ack_point; 9712 if (cnt_of_space < space_needed) { 9713 /*- 9714 * ok we must trim down the chunk by lowering the 9715 * advance peer ack point. 9716 */ 9717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 9718 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 9719 0xff, 0xff, cnt_of_space, 9720 space_needed); 9721 } 9722 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk); 9723 cnt_of_skipped /= sizeof(struct sctp_strseq); 9724 /*- 9725 * Go through and find the TSN that will be the one 9726 * we report. 9727 */ 9728 at = TAILQ_FIRST(&asoc->sent_queue); 9729 for (i = 0; i < cnt_of_skipped; i++) { 9730 tp1 = TAILQ_NEXT(at, sctp_next); 9731 if (tp1 == NULL) { 9732 break; 9733 } 9734 at = tp1; 9735 } 9736 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 9737 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 9738 0xff, cnt_of_skipped, at->rec.data.TSN_seq, 9739 asoc->advanced_peer_ack_point); 9740 } 9741 last = at; 9742 /*- 9743 * last now points to last one I can report, update 9744 * peer ack point 9745 */ 9746 if (last) 9747 advance_peer_ack_point = last->rec.data.TSN_seq; 9748 space_needed = sizeof(struct sctp_forward_tsn_chunk) + 9749 cnt_of_skipped * sizeof(struct sctp_strseq); 9750 } 9751 chk->send_size = space_needed; 9752 /* Setup the chunk */ 9753 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 9754 fwdtsn->ch.chunk_length = htons(chk->send_size); 9755 fwdtsn->ch.chunk_flags = 0; 9756 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 9757 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point); 9758 SCTP_BUF_LEN(chk->data) = chk->send_size; 9759 fwdtsn++; 9760 /*- 9761 * Move pointer to after the fwdtsn and transfer to the 9762 * strseq pointer. 9763 */ 9764 strseq = (struct sctp_strseq *)fwdtsn; 9765 /*- 9766 * Now populate the strseq list. This is done blindly 9767 * without pulling out duplicate stream info. This is 9768 * inefficent but won't harm the process since the peer will 9769 * look at these in sequence and will thus release anything. 9770 * It could mean we exceed the PMTU and chop off some that 9771 * we could have included.. but this is unlikely (aka 1432/4 9772 * would mean 300+ stream seq's would have to be reported in 9773 * one FWD-TSN. With a bit of work we can later FIX this to 9774 * optimize and pull out duplcates.. but it does add more 9775 * overhead. So for now... not! 9776 */ 9777 at = TAILQ_FIRST(&asoc->sent_queue); 9778 for (i = 0; i < cnt_of_skipped; i++) { 9779 tp1 = TAILQ_NEXT(at, sctp_next); 9780 if (tp1 == NULL) 9781 break; 9782 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 9783 /* We don't report these */ 9784 i--; 9785 at = tp1; 9786 continue; 9787 } 9788 if (at->rec.data.TSN_seq == advance_peer_ack_point) { 9789 at->rec.data.fwd_tsn_cnt = 0; 9790 } 9791 strseq->stream = ntohs(at->rec.data.stream_number); 9792 strseq->sequence = ntohs(at->rec.data.stream_seq); 9793 strseq++; 9794 at = tp1; 9795 } 9796 } 9797 return; 9798 9799 } 9800 9801 void 9802 sctp_send_sack(struct sctp_tcb *stcb) 9803 { 9804 /*- 9805 * Queue up a SACK or NR-SACK in the control queue. 9806 * We must first check to see if a SACK or NR-SACK is 9807 * somehow on the control queue. 9808 * If so, we will take and and remove the old one. 9809 */ 9810 struct sctp_association *asoc; 9811 struct sctp_tmit_chunk *chk, *a_chk; 9812 struct sctp_sack_chunk *sack; 9813 struct sctp_nr_sack_chunk *nr_sack; 9814 struct sctp_gap_ack_block *gap_descriptor; 9815 struct sack_track *selector; 9816 int mergeable = 0; 9817 int offset; 9818 caddr_t limit; 9819 uint32_t *dup; 9820 int limit_reached = 0; 9821 unsigned int i, siz, j; 9822 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space; 9823 int num_dups = 0; 9824 int space_req; 9825 uint32_t highest_tsn; 9826 uint8_t flags; 9827 uint8_t type; 9828 uint8_t tsn_map; 9829 9830 if ((stcb->asoc.sctp_nr_sack_on_off == 1) && 9831 (stcb->asoc.peer_supports_nr_sack == 1)) { 9832 type = SCTP_NR_SELECTIVE_ACK; 9833 } else { 9834 type = SCTP_SELECTIVE_ACK; 9835 } 9836 a_chk = NULL; 9837 asoc = &stcb->asoc; 9838 SCTP_TCB_LOCK_ASSERT(stcb); 9839 if (asoc->last_data_chunk_from == NULL) { 9840 /* Hmm we never received anything */ 9841 return; 9842 } 9843 sctp_slide_mapping_arrays(stcb); 9844 sctp_set_rwnd(stcb, asoc); 9845 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 9846 if (chk->rec.chunk_id.id == type) { 9847 /* Hmm, found a sack already on queue, remove it */ 9848 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 9849 asoc->ctrl_queue_cnt--; 9850 a_chk = chk; 9851 if (a_chk->data) { 9852 sctp_m_freem(a_chk->data); 9853 a_chk->data = NULL; 9854 } 9855 sctp_free_remote_addr(a_chk->whoTo); 9856 a_chk->whoTo = NULL; 9857 break; 9858 } 9859 } 9860 if (a_chk == NULL) { 9861 sctp_alloc_a_chunk(stcb, a_chk); 9862 if (a_chk == NULL) { 9863 /* No memory so we drop the idea, and set a timer */ 9864 if (stcb->asoc.delayed_ack) { 9865 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 9866 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 9867 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 9868 stcb->sctp_ep, stcb, NULL); 9869 } else { 9870 stcb->asoc.send_sack = 1; 9871 } 9872 return; 9873 } 9874 a_chk->copy_by_ref = 0; 9875 a_chk->rec.chunk_id.id = type; 9876 a_chk->rec.chunk_id.can_take_data = 1; 9877 } 9878 /* Clear our pkt counts */ 9879 asoc->data_pkts_seen = 0; 9880 9881 a_chk->asoc = asoc; 9882 a_chk->snd_count = 0; 9883 a_chk->send_size = 0; /* fill in later */ 9884 a_chk->sent = SCTP_DATAGRAM_UNSENT; 9885 a_chk->whoTo = NULL; 9886 9887 if ((asoc->numduptsns) || 9888 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) { 9889 /*- 9890 * Ok, we have some duplicates or the destination for the 9891 * sack is unreachable, lets see if we can select an 9892 * alternate than asoc->last_data_chunk_from 9893 */ 9894 if ((!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) && 9895 (asoc->used_alt_onsack > asoc->numnets)) { 9896 /* We used an alt last time, don't this time */ 9897 a_chk->whoTo = NULL; 9898 } else { 9899 asoc->used_alt_onsack++; 9900 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 9901 } 9902 if (a_chk->whoTo == NULL) { 9903 /* Nope, no alternate */ 9904 a_chk->whoTo = asoc->last_data_chunk_from; 9905 asoc->used_alt_onsack = 0; 9906 } 9907 } else { 9908 /* 9909 * No duplicates so we use the last place we received data 9910 * from. 9911 */ 9912 asoc->used_alt_onsack = 0; 9913 a_chk->whoTo = asoc->last_data_chunk_from; 9914 } 9915 if (a_chk->whoTo) { 9916 atomic_add_int(&a_chk->whoTo->ref_count, 1); 9917 } 9918 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) { 9919 highest_tsn = asoc->highest_tsn_inside_map; 9920 } else { 9921 highest_tsn = asoc->highest_tsn_inside_nr_map; 9922 } 9923 if (highest_tsn == asoc->cumulative_tsn) { 9924 /* no gaps */ 9925 if (type == SCTP_SELECTIVE_ACK) { 9926 space_req = sizeof(struct sctp_sack_chunk); 9927 } else { 9928 space_req = sizeof(struct sctp_nr_sack_chunk); 9929 } 9930 } else { 9931 /* gaps get a cluster */ 9932 space_req = MCLBYTES; 9933 } 9934 /* Ok now lets formulate a MBUF with our sack */ 9935 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA); 9936 if ((a_chk->data == NULL) || 9937 (a_chk->whoTo == NULL)) { 9938 /* rats, no mbuf memory */ 9939 if (a_chk->data) { 9940 /* was a problem with the destination */ 9941 sctp_m_freem(a_chk->data); 9942 a_chk->data = NULL; 9943 } 9944 sctp_free_a_chunk(stcb, a_chk); 9945 /* sa_ignore NO_NULL_CHK */ 9946 if (stcb->asoc.delayed_ack) { 9947 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 9948 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 9949 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 9950 stcb->sctp_ep, stcb, NULL); 9951 } else { 9952 stcb->asoc.send_sack = 1; 9953 } 9954 return; 9955 } 9956 /* ok, lets go through and fill it in */ 9957 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 9958 space = M_TRAILINGSPACE(a_chk->data); 9959 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 9960 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 9961 } 9962 limit = mtod(a_chk->data, caddr_t); 9963 limit += space; 9964 9965 flags = 0; 9966 9967 if ((asoc->sctp_cmt_on_off > 0) && 9968 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 9969 /*- 9970 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 9971 * received, then set high bit to 1, else 0. Reset 9972 * pkts_rcvd. 9973 */ 9974 flags |= (asoc->cmt_dac_pkts_rcvd << 6); 9975 asoc->cmt_dac_pkts_rcvd = 0; 9976 } 9977 #ifdef SCTP_ASOCLOG_OF_TSNS 9978 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn; 9979 stcb->asoc.cumack_log_atsnt++; 9980 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) { 9981 stcb->asoc.cumack_log_atsnt = 0; 9982 } 9983 #endif 9984 /* reset the readers interpretation */ 9985 stcb->freed_by_sorcv_sincelast = 0; 9986 9987 if (type == SCTP_SELECTIVE_ACK) { 9988 sack = mtod(a_chk->data, struct sctp_sack_chunk *); 9989 nr_sack = NULL; 9990 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 9991 if (highest_tsn > asoc->mapping_array_base_tsn) { 9992 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 9993 } else { 9994 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8; 9995 } 9996 } else { 9997 sack = NULL; 9998 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *); 9999 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk)); 10000 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) { 10001 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 10002 } else { 10003 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8; 10004 } 10005 } 10006 10007 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { 10008 offset = 1; 10009 } else { 10010 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 10011 } 10012 if (((type == SCTP_SELECTIVE_ACK) && 10013 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) || 10014 ((type == SCTP_NR_SELECTIVE_ACK) && 10015 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) { 10016 /* we have a gap .. maybe */ 10017 for (i = 0; i < siz; i++) { 10018 tsn_map = asoc->mapping_array[i]; 10019 if (type == SCTP_SELECTIVE_ACK) { 10020 tsn_map |= asoc->nr_mapping_array[i]; 10021 } 10022 if (i == 0) { 10023 /* 10024 * Clear all bits corresponding to TSNs 10025 * smaller or equal to the cumulative TSN. 10026 */ 10027 tsn_map &= (~0 << (1 - offset)); 10028 } 10029 selector = &sack_array[tsn_map]; 10030 if (mergeable && selector->right_edge) { 10031 /* 10032 * Backup, left and right edges were ok to 10033 * merge. 10034 */ 10035 num_gap_blocks--; 10036 gap_descriptor--; 10037 } 10038 if (selector->num_entries == 0) 10039 mergeable = 0; 10040 else { 10041 for (j = 0; j < selector->num_entries; j++) { 10042 if (mergeable && selector->right_edge) { 10043 /* 10044 * do a merge by NOT setting 10045 * the left side 10046 */ 10047 mergeable = 0; 10048 } else { 10049 /* 10050 * no merge, set the left 10051 * side 10052 */ 10053 mergeable = 0; 10054 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 10055 } 10056 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 10057 num_gap_blocks++; 10058 gap_descriptor++; 10059 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 10060 /* no more room */ 10061 limit_reached = 1; 10062 break; 10063 } 10064 } 10065 if (selector->left_edge) { 10066 mergeable = 1; 10067 } 10068 } 10069 if (limit_reached) { 10070 /* Reached the limit stop */ 10071 break; 10072 } 10073 offset += 8; 10074 } 10075 } 10076 if ((type == SCTP_NR_SELECTIVE_ACK) && 10077 (limit_reached == 0)) { 10078 10079 mergeable = 0; 10080 10081 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) { 10082 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 10083 } else { 10084 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8; 10085 } 10086 10087 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { 10088 offset = 1; 10089 } else { 10090 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 10091 } 10092 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) { 10093 /* we have a gap .. maybe */ 10094 for (i = 0; i < siz; i++) { 10095 tsn_map = asoc->nr_mapping_array[i]; 10096 if (i == 0) { 10097 /* 10098 * Clear all bits corresponding to 10099 * TSNs smaller or equal to the 10100 * cumulative TSN. 10101 */ 10102 tsn_map &= (~0 << (1 - offset)); 10103 } 10104 selector = &sack_array[tsn_map]; 10105 if (mergeable && selector->right_edge) { 10106 /* 10107 * Backup, left and right edges were 10108 * ok to merge. 10109 */ 10110 num_nr_gap_blocks--; 10111 gap_descriptor--; 10112 } 10113 if (selector->num_entries == 0) 10114 mergeable = 0; 10115 else { 10116 for (j = 0; j < selector->num_entries; j++) { 10117 if (mergeable && selector->right_edge) { 10118 /* 10119 * do a merge by NOT 10120 * setting the left 10121 * side 10122 */ 10123 mergeable = 0; 10124 } else { 10125 /* 10126 * no merge, set the 10127 * left side 10128 */ 10129 mergeable = 0; 10130 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 10131 } 10132 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 10133 num_nr_gap_blocks++; 10134 gap_descriptor++; 10135 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 10136 /* no more room */ 10137 limit_reached = 1; 10138 break; 10139 } 10140 } 10141 if (selector->left_edge) { 10142 mergeable = 1; 10143 } 10144 } 10145 if (limit_reached) { 10146 /* Reached the limit stop */ 10147 break; 10148 } 10149 offset += 8; 10150 } 10151 } 10152 } 10153 /* now we must add any dups we are going to report. */ 10154 if ((limit_reached == 0) && (asoc->numduptsns)) { 10155 dup = (uint32_t *) gap_descriptor; 10156 for (i = 0; i < asoc->numduptsns; i++) { 10157 *dup = htonl(asoc->dup_tsns[i]); 10158 dup++; 10159 num_dups++; 10160 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 10161 /* no more room */ 10162 break; 10163 } 10164 } 10165 asoc->numduptsns = 0; 10166 } 10167 /* 10168 * now that the chunk is prepared queue it to the control chunk 10169 * queue. 10170 */ 10171 if (type == SCTP_SELECTIVE_ACK) { 10172 a_chk->send_size = sizeof(struct sctp_sack_chunk) + 10173 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + 10174 num_dups * sizeof(int32_t); 10175 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 10176 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 10177 sack->sack.a_rwnd = htonl(asoc->my_rwnd); 10178 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 10179 sack->sack.num_dup_tsns = htons(num_dups); 10180 sack->ch.chunk_type = type; 10181 sack->ch.chunk_flags = flags; 10182 sack->ch.chunk_length = htons(a_chk->send_size); 10183 } else { 10184 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) + 10185 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + 10186 num_dups * sizeof(int32_t); 10187 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 10188 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 10189 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd); 10190 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks); 10191 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks); 10192 nr_sack->nr_sack.num_dup_tsns = htons(num_dups); 10193 nr_sack->nr_sack.reserved = 0; 10194 nr_sack->ch.chunk_type = type; 10195 nr_sack->ch.chunk_flags = flags; 10196 nr_sack->ch.chunk_length = htons(a_chk->send_size); 10197 } 10198 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 10199 asoc->my_last_reported_rwnd = asoc->my_rwnd; 10200 asoc->ctrl_queue_cnt++; 10201 asoc->send_sack = 0; 10202 SCTP_STAT_INCR(sctps_sendsacks); 10203 return; 10204 } 10205 10206 void 10207 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked 10208 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 10209 SCTP_UNUSED 10210 #endif 10211 ) 10212 { 10213 struct mbuf *m_abort; 10214 struct mbuf *m_out = NULL, *m_end = NULL; 10215 struct sctp_abort_chunk *abort = NULL; 10216 int sz; 10217 uint32_t auth_offset = 0; 10218 struct sctp_auth_chunk *auth = NULL; 10219 10220 /*- 10221 * Add an AUTH chunk, if chunk requires it and save the offset into 10222 * the chain for AUTH 10223 */ 10224 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 10225 stcb->asoc.peer_auth_chunks)) { 10226 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset, 10227 stcb, SCTP_ABORT_ASSOCIATION); 10228 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10229 } 10230 SCTP_TCB_LOCK_ASSERT(stcb); 10231 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER); 10232 if (m_abort == NULL) { 10233 /* no mbuf's */ 10234 if (m_out) 10235 sctp_m_freem(m_out); 10236 return; 10237 } 10238 /* link in any error */ 10239 SCTP_BUF_NEXT(m_abort) = operr; 10240 sz = 0; 10241 if (operr) { 10242 struct mbuf *n; 10243 10244 n = operr; 10245 while (n) { 10246 sz += SCTP_BUF_LEN(n); 10247 n = SCTP_BUF_NEXT(n); 10248 } 10249 } 10250 SCTP_BUF_LEN(m_abort) = sizeof(*abort); 10251 if (m_out == NULL) { 10252 /* NO Auth chunk prepended, so reserve space in front */ 10253 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 10254 m_out = m_abort; 10255 } else { 10256 /* Put AUTH chunk at the front of the chain */ 10257 SCTP_BUF_NEXT(m_end) = m_abort; 10258 } 10259 10260 /* fill in the ABORT chunk */ 10261 abort = mtod(m_abort, struct sctp_abort_chunk *); 10262 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 10263 abort->ch.chunk_flags = 0; 10264 abort->ch.chunk_length = htons(sizeof(*abort) + sz); 10265 10266 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, 10267 stcb->asoc.primary_destination, 10268 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr, 10269 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, NULL, 0, 10270 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 10271 stcb->asoc.primary_destination->port, so_locked, NULL, NULL); 10272 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10273 } 10274 10275 void 10276 sctp_send_shutdown_complete(struct sctp_tcb *stcb, 10277 struct sctp_nets *net, 10278 int reflect_vtag) 10279 { 10280 /* formulate and SEND a SHUTDOWN-COMPLETE */ 10281 struct mbuf *m_shutdown_comp; 10282 struct sctp_shutdown_complete_chunk *shutdown_complete; 10283 uint32_t vtag; 10284 uint8_t flags; 10285 10286 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER); 10287 if (m_shutdown_comp == NULL) { 10288 /* no mbuf's */ 10289 return; 10290 } 10291 if (reflect_vtag) { 10292 flags = SCTP_HAD_NO_TCB; 10293 vtag = stcb->asoc.my_vtag; 10294 } else { 10295 flags = 0; 10296 vtag = stcb->asoc.peer_vtag; 10297 } 10298 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *); 10299 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 10300 shutdown_complete->ch.chunk_flags = flags; 10301 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 10302 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk); 10303 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 10304 (struct sockaddr *)&net->ro._l_addr, 10305 m_shutdown_comp, 0, NULL, 0, 1, 0, NULL, 0, 10306 stcb->sctp_ep->sctp_lport, stcb->rport, 10307 htonl(vtag), 10308 net->port, SCTP_SO_NOT_LOCKED, NULL, NULL); 10309 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10310 return; 10311 } 10312 10313 void 10314 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh, 10315 uint32_t vrf_id, uint16_t port) 10316 { 10317 /* formulate and SEND a SHUTDOWN-COMPLETE */ 10318 struct mbuf *o_pak; 10319 struct mbuf *mout; 10320 struct ip *iph, *iph_out; 10321 struct udphdr *udp = NULL; 10322 10323 #ifdef INET6 10324 struct ip6_hdr *ip6, *ip6_out; 10325 10326 #endif 10327 int offset_out, len, mlen; 10328 struct sctp_shutdown_complete_msg *comp_cp; 10329 10330 iph = mtod(m, struct ip *); 10331 switch (iph->ip_v) { 10332 case IPVERSION: 10333 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg)); 10334 break; 10335 #ifdef INET6 10336 case IPV6_VERSION >> 4: 10337 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg)); 10338 break; 10339 #endif 10340 default: 10341 return; 10342 } 10343 if (port) { 10344 len += sizeof(struct udphdr); 10345 } 10346 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA); 10347 if (mout == NULL) { 10348 return; 10349 } 10350 SCTP_BUF_RESV_UF(mout, max_linkhdr); 10351 SCTP_BUF_LEN(mout) = len; 10352 SCTP_BUF_NEXT(mout) = NULL; 10353 if (m->m_flags & M_FLOWID) { 10354 mout->m_pkthdr.flowid = m->m_pkthdr.flowid; 10355 mout->m_flags |= M_FLOWID; 10356 } 10357 iph_out = NULL; 10358 #ifdef INET6 10359 ip6_out = NULL; 10360 #endif 10361 offset_out = 0; 10362 10363 switch (iph->ip_v) { 10364 case IPVERSION: 10365 iph_out = mtod(mout, struct ip *); 10366 10367 /* Fill in the IP header for the ABORT */ 10368 iph_out->ip_v = IPVERSION; 10369 iph_out->ip_hl = (sizeof(struct ip) / 4); 10370 iph_out->ip_tos = (u_char)0; 10371 iph_out->ip_id = 0; 10372 iph_out->ip_off = 0; 10373 iph_out->ip_ttl = MAXTTL; 10374 if (port) { 10375 iph_out->ip_p = IPPROTO_UDP; 10376 } else { 10377 iph_out->ip_p = IPPROTO_SCTP; 10378 } 10379 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 10380 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 10381 10382 /* let IP layer calculate this */ 10383 iph_out->ip_sum = 0; 10384 offset_out += sizeof(*iph_out); 10385 comp_cp = (struct sctp_shutdown_complete_msg *)( 10386 (caddr_t)iph_out + offset_out); 10387 break; 10388 #ifdef INET6 10389 case IPV6_VERSION >> 4: 10390 ip6 = (struct ip6_hdr *)iph; 10391 ip6_out = mtod(mout, struct ip6_hdr *); 10392 10393 /* Fill in the IPv6 header for the ABORT */ 10394 ip6_out->ip6_flow = ip6->ip6_flow; 10395 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim); 10396 if (port) { 10397 ip6_out->ip6_nxt = IPPROTO_UDP; 10398 } else { 10399 ip6_out->ip6_nxt = IPPROTO_SCTP; 10400 } 10401 ip6_out->ip6_src = ip6->ip6_dst; 10402 ip6_out->ip6_dst = ip6->ip6_src; 10403 /* 10404 * ?? The old code had both the iph len + payload, I think 10405 * this is wrong and would never have worked 10406 */ 10407 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg); 10408 offset_out += sizeof(*ip6_out); 10409 comp_cp = (struct sctp_shutdown_complete_msg *)( 10410 (caddr_t)ip6_out + offset_out); 10411 break; 10412 #endif /* INET6 */ 10413 default: 10414 /* Currently not supported. */ 10415 sctp_m_freem(mout); 10416 return; 10417 } 10418 if (port) { 10419 udp = (struct udphdr *)comp_cp; 10420 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 10421 udp->uh_dport = port; 10422 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr)); 10423 if (iph_out) 10424 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 10425 offset_out += sizeof(struct udphdr); 10426 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr)); 10427 } 10428 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 10429 /* no mbuf's */ 10430 sctp_m_freem(mout); 10431 return; 10432 } 10433 /* Now copy in and fill in the ABORT tags etc. */ 10434 comp_cp->sh.src_port = sh->dest_port; 10435 comp_cp->sh.dest_port = sh->src_port; 10436 comp_cp->sh.checksum = 0; 10437 comp_cp->sh.v_tag = sh->v_tag; 10438 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB; 10439 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 10440 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 10441 10442 if (iph_out != NULL) { 10443 sctp_route_t ro; 10444 int ret; 10445 10446 mlen = SCTP_BUF_LEN(mout); 10447 bzero(&ro, sizeof ro); 10448 /* set IPv4 length */ 10449 iph_out->ip_len = mlen; 10450 #ifdef SCTP_PACKET_LOGGING 10451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 10452 sctp_packet_log(mout, mlen); 10453 #endif 10454 if (port) { 10455 #if defined(SCTP_WITH_NO_CSUM) 10456 SCTP_STAT_INCR(sctps_sendnocrc); 10457 #else 10458 comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out); 10459 SCTP_STAT_INCR(sctps_sendswcrc); 10460 #endif 10461 SCTP_ENABLE_UDP_CSUM(mout); 10462 } else { 10463 #if defined(SCTP_WITH_NO_CSUM) 10464 SCTP_STAT_INCR(sctps_sendnocrc); 10465 #else 10466 mout->m_pkthdr.csum_flags = CSUM_SCTP; 10467 mout->m_pkthdr.csum_data = 0; 10468 SCTP_STAT_INCR(sctps_sendhwcrc); 10469 #endif 10470 } 10471 SCTP_ATTACH_CHAIN(o_pak, mout, mlen); 10472 /* out it goes */ 10473 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id); 10474 10475 /* Free the route if we got one back */ 10476 if (ro.ro_rt) 10477 RTFREE(ro.ro_rt); 10478 } 10479 #ifdef INET6 10480 if (ip6_out != NULL) { 10481 struct route_in6 ro; 10482 int ret; 10483 struct ifnet *ifp = NULL; 10484 10485 bzero(&ro, sizeof(ro)); 10486 mlen = SCTP_BUF_LEN(mout); 10487 #ifdef SCTP_PACKET_LOGGING 10488 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 10489 sctp_packet_log(mout, mlen); 10490 #endif 10491 SCTP_ATTACH_CHAIN(o_pak, mout, mlen); 10492 if (port) { 10493 #if defined(SCTP_WITH_NO_CSUM) 10494 SCTP_STAT_INCR(sctps_sendnocrc); 10495 #else 10496 comp_cp->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 10497 SCTP_STAT_INCR(sctps_sendswcrc); 10498 #endif 10499 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), mlen - sizeof(struct ip6_hdr))) == 0) { 10500 udp->uh_sum = 0xffff; 10501 } 10502 } else { 10503 #if defined(SCTP_WITH_NO_CSUM) 10504 SCTP_STAT_INCR(sctps_sendnocrc); 10505 #else 10506 mout->m_pkthdr.csum_flags = CSUM_SCTP; 10507 mout->m_pkthdr.csum_data = 0; 10508 SCTP_STAT_INCR(sctps_sendhwcrc); 10509 #endif 10510 } 10511 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id); 10512 10513 /* Free the route if we got one back */ 10514 if (ro.ro_rt) 10515 RTFREE(ro.ro_rt); 10516 } 10517 #endif 10518 SCTP_STAT_INCR(sctps_sendpackets); 10519 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 10520 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10521 return; 10522 10523 } 10524 10525 static struct sctp_nets * 10526 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now) 10527 { 10528 struct sctp_nets *net, *hnet; 10529 int ms_goneby, highest_ms, state_overide = 0; 10530 10531 (void)SCTP_GETTIME_TIMEVAL(now); 10532 highest_ms = 0; 10533 hnet = NULL; 10534 SCTP_TCB_LOCK_ASSERT(stcb); 10535 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 10536 if ( 10537 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) || 10538 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE) 10539 ) { 10540 /* 10541 * Skip this guy from consideration if HB is off AND 10542 * its confirmed 10543 */ 10544 continue; 10545 } 10546 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) { 10547 /* skip this dest net from consideration */ 10548 continue; 10549 } 10550 if (net->last_sent_time.tv_sec) { 10551 /* Sent to so we subtract */ 10552 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000; 10553 } else 10554 /* Never been sent to */ 10555 ms_goneby = 0x7fffffff; 10556 /*- 10557 * When the address state is unconfirmed but still 10558 * considered reachable, we HB at a higher rate. Once it 10559 * goes confirmed OR reaches the "unreachable" state, thenw 10560 * we cut it back to HB at a more normal pace. 10561 */ 10562 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) { 10563 state_overide = 1; 10564 } else { 10565 state_overide = 0; 10566 } 10567 10568 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) && 10569 (ms_goneby > highest_ms)) { 10570 highest_ms = ms_goneby; 10571 hnet = net; 10572 } 10573 } 10574 if (hnet && 10575 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) { 10576 state_overide = 1; 10577 } else { 10578 state_overide = 0; 10579 } 10580 10581 if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) { 10582 /*- 10583 * Found the one with longest delay bounds OR it is 10584 * unconfirmed and still not marked unreachable. 10585 */ 10586 SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet); 10587 #ifdef SCTP_DEBUG 10588 if (hnet) { 10589 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4, 10590 (struct sockaddr *)&hnet->ro._l_addr); 10591 } else { 10592 SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n"); 10593 } 10594 #endif 10595 /* update the timer now */ 10596 hnet->last_sent_time = *now; 10597 return (hnet); 10598 } 10599 /* Nothing to HB */ 10600 return (NULL); 10601 } 10602 10603 int 10604 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net) 10605 { 10606 struct sctp_tmit_chunk *chk; 10607 struct sctp_nets *net; 10608 struct sctp_heartbeat_chunk *hb; 10609 struct timeval now; 10610 struct sockaddr_in *sin; 10611 struct sockaddr_in6 *sin6; 10612 10613 SCTP_TCB_LOCK_ASSERT(stcb); 10614 if (user_req == 0) { 10615 net = sctp_select_hb_destination(stcb, &now); 10616 if (net == NULL) { 10617 /*- 10618 * All our busy none to send to, just start the 10619 * timer again. 10620 */ 10621 if (stcb->asoc.state == 0) { 10622 return (0); 10623 } 10624 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, 10625 stcb->sctp_ep, 10626 stcb, 10627 net); 10628 return (0); 10629 } 10630 } else { 10631 net = u_net; 10632 if (net == NULL) { 10633 return (0); 10634 } 10635 (void)SCTP_GETTIME_TIMEVAL(&now); 10636 } 10637 sin = (struct sockaddr_in *)&net->ro._l_addr; 10638 if (sin->sin_family != AF_INET) { 10639 if (sin->sin_family != AF_INET6) { 10640 /* huh */ 10641 return (0); 10642 } 10643 } 10644 sctp_alloc_a_chunk(stcb, chk); 10645 if (chk == NULL) { 10646 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n"); 10647 return (0); 10648 } 10649 chk->copy_by_ref = 0; 10650 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 10651 chk->rec.chunk_id.can_take_data = 1; 10652 chk->asoc = &stcb->asoc; 10653 chk->send_size = sizeof(struct sctp_heartbeat_chunk); 10654 10655 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 10656 if (chk->data == NULL) { 10657 sctp_free_a_chunk(stcb, chk); 10658 return (0); 10659 } 10660 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10661 SCTP_BUF_LEN(chk->data) = chk->send_size; 10662 chk->sent = SCTP_DATAGRAM_UNSENT; 10663 chk->snd_count = 0; 10664 chk->whoTo = net; 10665 atomic_add_int(&chk->whoTo->ref_count, 1); 10666 /* Now we have a mbuf that we can fill in with the details */ 10667 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 10668 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk)); 10669 /* fill out chunk header */ 10670 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 10671 hb->ch.chunk_flags = 0; 10672 hb->ch.chunk_length = htons(chk->send_size); 10673 /* Fill out hb parameter */ 10674 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 10675 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 10676 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 10677 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 10678 /* Did our user request this one, put it in */ 10679 hb->heartbeat.hb_info.user_req = user_req; 10680 hb->heartbeat.hb_info.addr_family = sin->sin_family; 10681 hb->heartbeat.hb_info.addr_len = sin->sin_len; 10682 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 10683 /* 10684 * we only take from the entropy pool if the address is not 10685 * confirmed. 10686 */ 10687 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 10688 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 10689 } else { 10690 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 10691 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 10692 } 10693 if (sin->sin_family == AF_INET) { 10694 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr)); 10695 } else if (sin->sin_family == AF_INET6) { 10696 /* We leave the scope the way it is in our lookup table. */ 10697 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 10698 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr)); 10699 } else { 10700 /* huh compiler bug */ 10701 return (0); 10702 } 10703 10704 /* 10705 * JRS 5/14/07 - In CMT PF, the T3 timer is used to track 10706 * PF-heartbeats. Because of this, threshold management is done by 10707 * the t3 timer handler, and does not need to be done upon the send 10708 * of a PF-heartbeat. If CMT PF is on and the destination to which a 10709 * heartbeat is being sent is in PF state, do NOT do threshold 10710 * management. 10711 */ 10712 if ((stcb->asoc.sctp_cmt_pf == 0) || 10713 ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) { 10714 /* ok we have a destination that needs a beat */ 10715 /* lets do the theshold management Qiaobing style */ 10716 if (sctp_threshold_management(stcb->sctp_ep, stcb, net, 10717 stcb->asoc.max_send_times)) { 10718 /*- 10719 * we have lost the association, in a way this is 10720 * quite bad since we really are one less time since 10721 * we really did not send yet. This is the down side 10722 * to the Q's style as defined in the RFC and not my 10723 * alternate style defined in the RFC. 10724 */ 10725 if (chk->data != NULL) { 10726 sctp_m_freem(chk->data); 10727 chk->data = NULL; 10728 } 10729 /* 10730 * Here we do NOT use the macro since the 10731 * association is now gone. 10732 */ 10733 if (chk->whoTo) { 10734 sctp_free_remote_addr(chk->whoTo); 10735 chk->whoTo = NULL; 10736 } 10737 sctp_free_a_chunk((struct sctp_tcb *)NULL, chk); 10738 return (-1); 10739 } 10740 } 10741 net->hb_responded = 0; 10742 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 10743 stcb->asoc.ctrl_queue_cnt++; 10744 SCTP_STAT_INCR(sctps_sendheartbeat); 10745 /*- 10746 * Call directly med level routine to put out the chunk. It will 10747 * always tumble out control chunks aka HB but it may even tumble 10748 * out data too. 10749 */ 10750 return (1); 10751 } 10752 10753 void 10754 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 10755 uint32_t high_tsn) 10756 { 10757 struct sctp_association *asoc; 10758 struct sctp_ecne_chunk *ecne; 10759 struct sctp_tmit_chunk *chk; 10760 10761 asoc = &stcb->asoc; 10762 SCTP_TCB_LOCK_ASSERT(stcb); 10763 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 10764 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) { 10765 /* found a previous ECN_ECHO update it if needed */ 10766 uint32_t cnt, ctsn; 10767 10768 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 10769 ctsn = ntohl(ecne->tsn); 10770 if (SCTP_TSN_GT(high_tsn, ctsn)) { 10771 ecne->tsn = htonl(high_tsn); 10772 SCTP_STAT_INCR(sctps_queue_upd_ecne); 10773 } 10774 cnt = ntohl(ecne->num_pkts_since_cwr); 10775 cnt++; 10776 ecne->num_pkts_since_cwr = htonl(cnt); 10777 return; 10778 } 10779 } 10780 /* nope could not find one to update so we must build one */ 10781 sctp_alloc_a_chunk(stcb, chk); 10782 if (chk == NULL) { 10783 return; 10784 } 10785 chk->copy_by_ref = 0; 10786 SCTP_STAT_INCR(sctps_queue_upd_ecne); 10787 chk->rec.chunk_id.id = SCTP_ECN_ECHO; 10788 chk->rec.chunk_id.can_take_data = 0; 10789 chk->asoc = &stcb->asoc; 10790 chk->send_size = sizeof(struct sctp_ecne_chunk); 10791 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 10792 if (chk->data == NULL) { 10793 sctp_free_a_chunk(stcb, chk); 10794 return; 10795 } 10796 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10797 SCTP_BUF_LEN(chk->data) = chk->send_size; 10798 chk->sent = SCTP_DATAGRAM_UNSENT; 10799 chk->snd_count = 0; 10800 chk->whoTo = net; 10801 atomic_add_int(&chk->whoTo->ref_count, 1); 10802 stcb->asoc.ecn_echo_cnt_onq++; 10803 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 10804 ecne->ch.chunk_type = SCTP_ECN_ECHO; 10805 ecne->ch.chunk_flags = 0; 10806 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 10807 ecne->tsn = htonl(high_tsn); 10808 ecne->num_pkts_since_cwr = htonl(1); 10809 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next); 10810 asoc->ctrl_queue_cnt++; 10811 } 10812 10813 void 10814 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 10815 struct mbuf *m, int iphlen, int bad_crc) 10816 { 10817 struct sctp_association *asoc; 10818 struct sctp_pktdrop_chunk *drp; 10819 struct sctp_tmit_chunk *chk; 10820 uint8_t *datap; 10821 int len; 10822 int was_trunc = 0; 10823 struct ip *iph; 10824 10825 #ifdef INET6 10826 struct ip6_hdr *ip6h; 10827 10828 #endif 10829 int fullsz = 0, extra = 0; 10830 long spc; 10831 int offset; 10832 struct sctp_chunkhdr *ch, chunk_buf; 10833 unsigned int chk_length; 10834 10835 if (!stcb) { 10836 return; 10837 } 10838 asoc = &stcb->asoc; 10839 SCTP_TCB_LOCK_ASSERT(stcb); 10840 if (asoc->peer_supports_pktdrop == 0) { 10841 /*- 10842 * peer must declare support before I send one. 10843 */ 10844 return; 10845 } 10846 if (stcb->sctp_socket == NULL) { 10847 return; 10848 } 10849 sctp_alloc_a_chunk(stcb, chk); 10850 if (chk == NULL) { 10851 return; 10852 } 10853 chk->copy_by_ref = 0; 10854 iph = mtod(m, struct ip *); 10855 if (iph == NULL) { 10856 sctp_free_a_chunk(stcb, chk); 10857 return; 10858 } 10859 switch (iph->ip_v) { 10860 case IPVERSION: 10861 /* IPv4 */ 10862 len = chk->send_size = iph->ip_len; 10863 break; 10864 #ifdef INET6 10865 case IPV6_VERSION >> 4: 10866 /* IPv6 */ 10867 ip6h = mtod(m, struct ip6_hdr *); 10868 len = chk->send_size = htons(ip6h->ip6_plen); 10869 break; 10870 #endif 10871 default: 10872 return; 10873 } 10874 /* Validate that we do not have an ABORT in here. */ 10875 offset = iphlen + sizeof(struct sctphdr); 10876 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 10877 sizeof(*ch), (uint8_t *) & chunk_buf); 10878 while (ch != NULL) { 10879 chk_length = ntohs(ch->chunk_length); 10880 if (chk_length < sizeof(*ch)) { 10881 /* break to abort land */ 10882 break; 10883 } 10884 switch (ch->chunk_type) { 10885 case SCTP_PACKET_DROPPED: 10886 case SCTP_ABORT_ASSOCIATION: 10887 case SCTP_INITIATION_ACK: 10888 /** 10889 * We don't respond with an PKT-DROP to an ABORT 10890 * or PKT-DROP. We also do not respond to an 10891 * INIT-ACK, because we can't know if the initiation 10892 * tag is correct or not. 10893 */ 10894 sctp_free_a_chunk(stcb, chk); 10895 return; 10896 default: 10897 break; 10898 } 10899 offset += SCTP_SIZE32(chk_length); 10900 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 10901 sizeof(*ch), (uint8_t *) & chunk_buf); 10902 } 10903 10904 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) > 10905 min(stcb->asoc.smallest_mtu, MCLBYTES)) { 10906 /* 10907 * only send 1 mtu worth, trim off the excess on the end. 10908 */ 10909 fullsz = len - extra; 10910 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD; 10911 was_trunc = 1; 10912 } 10913 chk->asoc = &stcb->asoc; 10914 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 10915 if (chk->data == NULL) { 10916 jump_out: 10917 sctp_free_a_chunk(stcb, chk); 10918 return; 10919 } 10920 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10921 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 10922 if (drp == NULL) { 10923 sctp_m_freem(chk->data); 10924 chk->data = NULL; 10925 goto jump_out; 10926 } 10927 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 10928 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 10929 chk->book_size_scale = 0; 10930 if (was_trunc) { 10931 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 10932 drp->trunc_len = htons(fullsz); 10933 /* 10934 * Len is already adjusted to size minus overhead above take 10935 * out the pkt_drop chunk itself from it. 10936 */ 10937 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk); 10938 len = chk->send_size; 10939 } else { 10940 /* no truncation needed */ 10941 drp->ch.chunk_flags = 0; 10942 drp->trunc_len = htons(0); 10943 } 10944 if (bad_crc) { 10945 drp->ch.chunk_flags |= SCTP_BADCRC; 10946 } 10947 chk->send_size += sizeof(struct sctp_pktdrop_chunk); 10948 SCTP_BUF_LEN(chk->data) = chk->send_size; 10949 chk->sent = SCTP_DATAGRAM_UNSENT; 10950 chk->snd_count = 0; 10951 if (net) { 10952 /* we should hit here */ 10953 chk->whoTo = net; 10954 } else { 10955 chk->whoTo = asoc->primary_destination; 10956 } 10957 atomic_add_int(&chk->whoTo->ref_count, 1); 10958 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 10959 chk->rec.chunk_id.can_take_data = 1; 10960 drp->ch.chunk_type = SCTP_PACKET_DROPPED; 10961 drp->ch.chunk_length = htons(chk->send_size); 10962 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); 10963 if (spc < 0) { 10964 spc = 0; 10965 } 10966 drp->bottle_bw = htonl(spc); 10967 if (asoc->my_rwnd) { 10968 drp->current_onq = htonl(asoc->size_on_reasm_queue + 10969 asoc->size_on_all_streams + 10970 asoc->my_rwnd_control_len + 10971 stcb->sctp_socket->so_rcv.sb_cc); 10972 } else { 10973 /*- 10974 * If my rwnd is 0, possibly from mbuf depletion as well as 10975 * space used, tell the peer there is NO space aka onq == bw 10976 */ 10977 drp->current_onq = htonl(spc); 10978 } 10979 drp->reserved = 0; 10980 datap = drp->data; 10981 m_copydata(m, iphlen, len, (caddr_t)datap); 10982 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 10983 asoc->ctrl_queue_cnt++; 10984 } 10985 10986 void 10987 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override) 10988 { 10989 struct sctp_association *asoc; 10990 struct sctp_cwr_chunk *cwr; 10991 struct sctp_tmit_chunk *chk; 10992 10993 asoc = &stcb->asoc; 10994 SCTP_TCB_LOCK_ASSERT(stcb); 10995 10996 10997 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 10998 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) { 10999 /* 11000 * found a previous CWR queued to same destination 11001 * update it if needed 11002 */ 11003 uint32_t ctsn; 11004 11005 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 11006 ctsn = ntohl(cwr->tsn); 11007 if (SCTP_TSN_GT(high_tsn, ctsn)) { 11008 cwr->tsn = htonl(high_tsn); 11009 } 11010 if (override & SCTP_CWR_REDUCE_OVERRIDE) { 11011 /* Make sure override is carried */ 11012 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE; 11013 } 11014 return; 11015 } 11016 } 11017 sctp_alloc_a_chunk(stcb, chk); 11018 if (chk == NULL) { 11019 return; 11020 } 11021 chk->copy_by_ref = 0; 11022 chk->rec.chunk_id.id = SCTP_ECN_CWR; 11023 chk->rec.chunk_id.can_take_data = 1; 11024 chk->asoc = &stcb->asoc; 11025 chk->send_size = sizeof(struct sctp_cwr_chunk); 11026 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER); 11027 if (chk->data == NULL) { 11028 sctp_free_a_chunk(stcb, chk); 11029 return; 11030 } 11031 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 11032 SCTP_BUF_LEN(chk->data) = chk->send_size; 11033 chk->sent = SCTP_DATAGRAM_UNSENT; 11034 chk->snd_count = 0; 11035 chk->whoTo = net; 11036 atomic_add_int(&chk->whoTo->ref_count, 1); 11037 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 11038 cwr->ch.chunk_type = SCTP_ECN_CWR; 11039 cwr->ch.chunk_flags = override; 11040 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 11041 cwr->tsn = htonl(high_tsn); 11042 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 11043 asoc->ctrl_queue_cnt++; 11044 } 11045 11046 void 11047 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, 11048 int number_entries, uint16_t * list, 11049 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 11050 { 11051 int len, old_len, i; 11052 struct sctp_stream_reset_out_request *req_out; 11053 struct sctp_chunkhdr *ch; 11054 11055 ch = mtod(chk->data, struct sctp_chunkhdr *); 11056 11057 11058 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 11059 11060 /* get to new offset for the param. */ 11061 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 11062 /* now how long will this param be? */ 11063 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 11064 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 11065 req_out->ph.param_length = htons(len); 11066 req_out->request_seq = htonl(seq); 11067 req_out->response_seq = htonl(resp_seq); 11068 req_out->send_reset_at_tsn = htonl(last_sent); 11069 if (number_entries) { 11070 for (i = 0; i < number_entries; i++) { 11071 req_out->list_of_streams[i] = htons(list[i]); 11072 } 11073 } 11074 if (SCTP_SIZE32(len) > len) { 11075 /*- 11076 * Need to worry about the pad we may end up adding to the 11077 * end. This is easy since the struct is either aligned to 4 11078 * bytes or 2 bytes off. 11079 */ 11080 req_out->list_of_streams[number_entries] = 0; 11081 } 11082 /* now fix the chunk length */ 11083 ch->chunk_length = htons(len + old_len); 11084 chk->book_size = len + old_len; 11085 chk->book_size_scale = 0; 11086 chk->send_size = SCTP_SIZE32(chk->book_size); 11087 SCTP_BUF_LEN(chk->data) = chk->send_size; 11088 return; 11089 } 11090 11091 11092 void 11093 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 11094 int number_entries, uint16_t * list, 11095 uint32_t seq) 11096 { 11097 int len, old_len, i; 11098 struct sctp_stream_reset_in_request *req_in; 11099 struct sctp_chunkhdr *ch; 11100 11101 ch = mtod(chk->data, struct sctp_chunkhdr *); 11102 11103 11104 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 11105 11106 /* get to new offset for the param. */ 11107 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 11108 /* now how long will this param be? */ 11109 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 11110 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 11111 req_in->ph.param_length = htons(len); 11112 req_in->request_seq = htonl(seq); 11113 if (number_entries) { 11114 for (i = 0; i < number_entries; i++) { 11115 req_in->list_of_streams[i] = htons(list[i]); 11116 } 11117 } 11118 if (SCTP_SIZE32(len) > len) { 11119 /*- 11120 * Need to worry about the pad we may end up adding to the 11121 * end. This is easy since the struct is either aligned to 4 11122 * bytes or 2 bytes off. 11123 */ 11124 req_in->list_of_streams[number_entries] = 0; 11125 } 11126 /* now fix the chunk length */ 11127 ch->chunk_length = htons(len + old_len); 11128 chk->book_size = len + old_len; 11129 chk->book_size_scale = 0; 11130 chk->send_size = SCTP_SIZE32(chk->book_size); 11131 SCTP_BUF_LEN(chk->data) = chk->send_size; 11132 return; 11133 } 11134 11135 11136 void 11137 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 11138 uint32_t seq) 11139 { 11140 int len, old_len; 11141 struct sctp_stream_reset_tsn_request *req_tsn; 11142 struct sctp_chunkhdr *ch; 11143 11144 ch = mtod(chk->data, struct sctp_chunkhdr *); 11145 11146 11147 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 11148 11149 /* get to new offset for the param. */ 11150 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 11151 /* now how long will this param be? */ 11152 len = sizeof(struct sctp_stream_reset_tsn_request); 11153 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 11154 req_tsn->ph.param_length = htons(len); 11155 req_tsn->request_seq = htonl(seq); 11156 11157 /* now fix the chunk length */ 11158 ch->chunk_length = htons(len + old_len); 11159 chk->send_size = len + old_len; 11160 chk->book_size = SCTP_SIZE32(chk->send_size); 11161 chk->book_size_scale = 0; 11162 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 11163 return; 11164 } 11165 11166 void 11167 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 11168 uint32_t resp_seq, uint32_t result) 11169 { 11170 int len, old_len; 11171 struct sctp_stream_reset_response *resp; 11172 struct sctp_chunkhdr *ch; 11173 11174 ch = mtod(chk->data, struct sctp_chunkhdr *); 11175 11176 11177 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 11178 11179 /* get to new offset for the param. */ 11180 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 11181 /* now how long will this param be? */ 11182 len = sizeof(struct sctp_stream_reset_response); 11183 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 11184 resp->ph.param_length = htons(len); 11185 resp->response_seq = htonl(resp_seq); 11186 resp->result = ntohl(result); 11187 11188 /* now fix the chunk length */ 11189 ch->chunk_length = htons(len + old_len); 11190 chk->book_size = len + old_len; 11191 chk->book_size_scale = 0; 11192 chk->send_size = SCTP_SIZE32(chk->book_size); 11193 SCTP_BUF_LEN(chk->data) = chk->send_size; 11194 return; 11195 11196 } 11197 11198 11199 void 11200 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 11201 uint32_t resp_seq, uint32_t result, 11202 uint32_t send_una, uint32_t recv_next) 11203 { 11204 int len, old_len; 11205 struct sctp_stream_reset_response_tsn *resp; 11206 struct sctp_chunkhdr *ch; 11207 11208 ch = mtod(chk->data, struct sctp_chunkhdr *); 11209 11210 11211 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 11212 11213 /* get to new offset for the param. */ 11214 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 11215 /* now how long will this param be? */ 11216 len = sizeof(struct sctp_stream_reset_response_tsn); 11217 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 11218 resp->ph.param_length = htons(len); 11219 resp->response_seq = htonl(resp_seq); 11220 resp->result = htonl(result); 11221 resp->senders_next_tsn = htonl(send_una); 11222 resp->receivers_next_tsn = htonl(recv_next); 11223 11224 /* now fix the chunk length */ 11225 ch->chunk_length = htons(len + old_len); 11226 chk->book_size = len + old_len; 11227 chk->send_size = SCTP_SIZE32(chk->book_size); 11228 chk->book_size_scale = 0; 11229 SCTP_BUF_LEN(chk->data) = chk->send_size; 11230 return; 11231 } 11232 11233 static void 11234 sctp_add_a_stream(struct sctp_tmit_chunk *chk, 11235 uint32_t seq, 11236 uint16_t adding) 11237 { 11238 int len, old_len; 11239 struct sctp_chunkhdr *ch; 11240 struct sctp_stream_reset_add_strm *addstr; 11241 11242 ch = mtod(chk->data, struct sctp_chunkhdr *); 11243 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 11244 11245 /* get to new offset for the param. */ 11246 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); 11247 /* now how long will this param be? */ 11248 len = sizeof(struct sctp_stream_reset_add_strm); 11249 11250 /* Fill it out. */ 11251 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_STREAMS); 11252 addstr->ph.param_length = htons(len); 11253 addstr->request_seq = htonl(seq); 11254 addstr->number_of_streams = htons(adding); 11255 addstr->reserved = 0; 11256 11257 /* now fix the chunk length */ 11258 ch->chunk_length = htons(len + old_len); 11259 chk->send_size = len + old_len; 11260 chk->book_size = SCTP_SIZE32(chk->send_size); 11261 chk->book_size_scale = 0; 11262 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 11263 return; 11264 } 11265 11266 int 11267 sctp_send_str_reset_req(struct sctp_tcb *stcb, 11268 int number_entries, uint16_t * list, 11269 uint8_t send_out_req, 11270 uint32_t resp_seq, 11271 uint8_t send_in_req, 11272 uint8_t send_tsn_req, 11273 uint8_t add_stream, 11274 uint16_t adding 11275 ) 11276 { 11277 11278 struct sctp_association *asoc; 11279 struct sctp_tmit_chunk *chk; 11280 struct sctp_chunkhdr *ch; 11281 uint32_t seq; 11282 11283 asoc = &stcb->asoc; 11284 if (asoc->stream_reset_outstanding) { 11285 /*- 11286 * Already one pending, must get ACK back to clear the flag. 11287 */ 11288 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY); 11289 return (EBUSY); 11290 } 11291 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) && 11292 (add_stream == 0)) { 11293 /* nothing to do */ 11294 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11295 return (EINVAL); 11296 } 11297 if (send_tsn_req && (send_out_req || send_in_req)) { 11298 /* error, can't do that */ 11299 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 11300 return (EINVAL); 11301 } 11302 sctp_alloc_a_chunk(stcb, chk); 11303 if (chk == NULL) { 11304 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11305 return (ENOMEM); 11306 } 11307 chk->copy_by_ref = 0; 11308 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 11309 chk->rec.chunk_id.can_take_data = 0; 11310 chk->asoc = &stcb->asoc; 11311 chk->book_size = sizeof(struct sctp_chunkhdr); 11312 chk->send_size = SCTP_SIZE32(chk->book_size); 11313 chk->book_size_scale = 0; 11314 11315 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 11316 if (chk->data == NULL) { 11317 sctp_free_a_chunk(stcb, chk); 11318 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11319 return (ENOMEM); 11320 } 11321 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 11322 11323 /* setup chunk parameters */ 11324 chk->sent = SCTP_DATAGRAM_UNSENT; 11325 chk->snd_count = 0; 11326 chk->whoTo = asoc->primary_destination; 11327 atomic_add_int(&chk->whoTo->ref_count, 1); 11328 11329 ch = mtod(chk->data, struct sctp_chunkhdr *); 11330 ch->chunk_type = SCTP_STREAM_RESET; 11331 ch->chunk_flags = 0; 11332 ch->chunk_length = htons(chk->book_size); 11333 SCTP_BUF_LEN(chk->data) = chk->send_size; 11334 11335 seq = stcb->asoc.str_reset_seq_out; 11336 if (send_out_req) { 11337 sctp_add_stream_reset_out(chk, number_entries, list, 11338 seq, resp_seq, (stcb->asoc.sending_seq - 1)); 11339 asoc->stream_reset_out_is_outstanding = 1; 11340 seq++; 11341 asoc->stream_reset_outstanding++; 11342 } 11343 if (add_stream) { 11344 sctp_add_a_stream(chk, seq, adding); 11345 seq++; 11346 asoc->stream_reset_outstanding++; 11347 } 11348 if (send_in_req) { 11349 sctp_add_stream_reset_in(chk, number_entries, list, seq); 11350 asoc->stream_reset_outstanding++; 11351 } 11352 if (send_tsn_req) { 11353 sctp_add_stream_reset_tsn(chk, seq); 11354 asoc->stream_reset_outstanding++; 11355 } 11356 asoc->str_reset = chk; 11357 11358 /* insert the chunk for sending */ 11359 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 11360 chk, 11361 sctp_next); 11362 asoc->ctrl_queue_cnt++; 11363 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 11364 return (0); 11365 } 11366 11367 void 11368 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag, 11369 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port) 11370 { 11371 /*- 11372 * Formulate the abort message, and send it back down. 11373 */ 11374 struct mbuf *o_pak; 11375 struct mbuf *mout; 11376 struct sctp_abort_msg *abm; 11377 struct ip *iph, *iph_out; 11378 struct udphdr *udp; 11379 11380 #ifdef INET6 11381 struct ip6_hdr *ip6, *ip6_out; 11382 11383 #endif 11384 int iphlen_out, len; 11385 11386 /* don't respond to ABORT with ABORT */ 11387 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 11388 if (err_cause) 11389 sctp_m_freem(err_cause); 11390 return; 11391 } 11392 iph = mtod(m, struct ip *); 11393 switch (iph->ip_v) { 11394 case IPVERSION: 11395 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg)); 11396 break; 11397 #ifdef INET6 11398 case IPV6_VERSION >> 4: 11399 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg)); 11400 break; 11401 #endif 11402 default: 11403 if (err_cause) { 11404 sctp_m_freem(err_cause); 11405 } 11406 return; 11407 } 11408 if (port) { 11409 len += sizeof(struct udphdr); 11410 } 11411 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA); 11412 if (mout == NULL) { 11413 if (err_cause) { 11414 sctp_m_freem(err_cause); 11415 } 11416 return; 11417 } 11418 SCTP_BUF_RESV_UF(mout, max_linkhdr); 11419 SCTP_BUF_LEN(mout) = len; 11420 SCTP_BUF_NEXT(mout) = err_cause; 11421 if (m->m_flags & M_FLOWID) { 11422 mout->m_pkthdr.flowid = m->m_pkthdr.flowid; 11423 mout->m_flags |= M_FLOWID; 11424 } 11425 iph_out = NULL; 11426 #ifdef INET6 11427 ip6_out = NULL; 11428 #endif 11429 switch (iph->ip_v) { 11430 case IPVERSION: 11431 iph_out = mtod(mout, struct ip *); 11432 11433 /* Fill in the IP header for the ABORT */ 11434 iph_out->ip_v = IPVERSION; 11435 iph_out->ip_hl = (sizeof(struct ip) / 4); 11436 iph_out->ip_tos = (u_char)0; 11437 iph_out->ip_id = 0; 11438 iph_out->ip_off = 0; 11439 iph_out->ip_ttl = MAXTTL; 11440 if (port) { 11441 iph_out->ip_p = IPPROTO_UDP; 11442 } else { 11443 iph_out->ip_p = IPPROTO_SCTP; 11444 } 11445 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 11446 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 11447 /* let IP layer calculate this */ 11448 iph_out->ip_sum = 0; 11449 11450 iphlen_out = sizeof(*iph_out); 11451 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out); 11452 break; 11453 #ifdef INET6 11454 case IPV6_VERSION >> 4: 11455 ip6 = (struct ip6_hdr *)iph; 11456 ip6_out = mtod(mout, struct ip6_hdr *); 11457 11458 /* Fill in the IP6 header for the ABORT */ 11459 ip6_out->ip6_flow = ip6->ip6_flow; 11460 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim); 11461 if (port) { 11462 ip6_out->ip6_nxt = IPPROTO_UDP; 11463 } else { 11464 ip6_out->ip6_nxt = IPPROTO_SCTP; 11465 } 11466 ip6_out->ip6_src = ip6->ip6_dst; 11467 ip6_out->ip6_dst = ip6->ip6_src; 11468 11469 iphlen_out = sizeof(*ip6_out); 11470 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out); 11471 break; 11472 #endif /* INET6 */ 11473 default: 11474 /* Currently not supported */ 11475 sctp_m_freem(mout); 11476 return; 11477 } 11478 11479 udp = (struct udphdr *)abm; 11480 if (port) { 11481 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 11482 udp->uh_dport = port; 11483 /* set udp->uh_ulen later */ 11484 udp->uh_sum = 0; 11485 iphlen_out += sizeof(struct udphdr); 11486 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr)); 11487 } 11488 abm->sh.src_port = sh->dest_port; 11489 abm->sh.dest_port = sh->src_port; 11490 abm->sh.checksum = 0; 11491 if (vtag == 0) { 11492 abm->sh.v_tag = sh->v_tag; 11493 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB; 11494 } else { 11495 abm->sh.v_tag = htonl(vtag); 11496 abm->msg.ch.chunk_flags = 0; 11497 } 11498 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION; 11499 11500 if (err_cause) { 11501 struct mbuf *m_tmp = err_cause; 11502 int err_len = 0; 11503 11504 /* get length of the err_cause chain */ 11505 while (m_tmp != NULL) { 11506 err_len += SCTP_BUF_LEN(m_tmp); 11507 m_tmp = SCTP_BUF_NEXT(m_tmp); 11508 } 11509 len = SCTP_BUF_LEN(mout) + err_len; 11510 if (err_len % 4) { 11511 /* need pad at end of chunk */ 11512 uint32_t cpthis = 0; 11513 int padlen; 11514 11515 padlen = 4 - (len % 4); 11516 m_copyback(mout, len, padlen, (caddr_t)&cpthis); 11517 len += padlen; 11518 } 11519 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len); 11520 } else { 11521 len = SCTP_BUF_LEN(mout); 11522 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch)); 11523 } 11524 11525 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 11526 /* no mbuf's */ 11527 sctp_m_freem(mout); 11528 return; 11529 } 11530 if (iph_out != NULL) { 11531 sctp_route_t ro; 11532 int ret; 11533 11534 /* zap the stack pointer to the route */ 11535 bzero(&ro, sizeof ro); 11536 if (port) { 11537 udp->uh_ulen = htons(len - sizeof(struct ip)); 11538 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 11539 } 11540 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n"); 11541 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh); 11542 /* set IPv4 length */ 11543 iph_out->ip_len = len; 11544 /* out it goes */ 11545 #ifdef SCTP_PACKET_LOGGING 11546 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 11547 sctp_packet_log(mout, len); 11548 #endif 11549 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11550 if (port) { 11551 #if defined(SCTP_WITH_NO_CSUM) 11552 SCTP_STAT_INCR(sctps_sendnocrc); 11553 #else 11554 abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out); 11555 SCTP_STAT_INCR(sctps_sendswcrc); 11556 #endif 11557 SCTP_ENABLE_UDP_CSUM(o_pak); 11558 } else { 11559 #if defined(SCTP_WITH_NO_CSUM) 11560 SCTP_STAT_INCR(sctps_sendnocrc); 11561 #else 11562 mout->m_pkthdr.csum_flags = CSUM_SCTP; 11563 mout->m_pkthdr.csum_data = 0; 11564 SCTP_STAT_INCR(sctps_sendhwcrc); 11565 #endif 11566 } 11567 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id); 11568 11569 /* Free the route if we got one back */ 11570 if (ro.ro_rt) 11571 RTFREE(ro.ro_rt); 11572 } 11573 #ifdef INET6 11574 if (ip6_out != NULL) { 11575 struct route_in6 ro; 11576 int ret; 11577 struct ifnet *ifp = NULL; 11578 11579 /* zap the stack pointer to the route */ 11580 bzero(&ro, sizeof(ro)); 11581 if (port) { 11582 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr)); 11583 } 11584 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n"); 11585 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh); 11586 ip6_out->ip6_plen = len - sizeof(*ip6_out); 11587 #ifdef SCTP_PACKET_LOGGING 11588 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 11589 sctp_packet_log(mout, len); 11590 #endif 11591 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11592 if (port) { 11593 #if defined(SCTP_WITH_NO_CSUM) 11594 SCTP_STAT_INCR(sctps_sendnocrc); 11595 #else 11596 abm->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 11597 SCTP_STAT_INCR(sctps_sendswcrc); 11598 #endif 11599 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) { 11600 udp->uh_sum = 0xffff; 11601 } 11602 } else { 11603 #if defined(SCTP_WITH_NO_CSUM) 11604 SCTP_STAT_INCR(sctps_sendnocrc); 11605 #else 11606 mout->m_pkthdr.csum_flags = CSUM_SCTP; 11607 mout->m_pkthdr.csum_data = 0; 11608 SCTP_STAT_INCR(sctps_sendhwcrc); 11609 #endif 11610 } 11611 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id); 11612 11613 /* Free the route if we got one back */ 11614 if (ro.ro_rt) 11615 RTFREE(ro.ro_rt); 11616 } 11617 #endif 11618 SCTP_STAT_INCR(sctps_sendpackets); 11619 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 11620 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11621 } 11622 11623 void 11624 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag, 11625 uint32_t vrf_id, uint16_t port) 11626 { 11627 struct mbuf *o_pak; 11628 struct sctphdr *sh, *sh_out; 11629 struct sctp_chunkhdr *ch; 11630 struct ip *iph, *iph_out; 11631 struct udphdr *udp = NULL; 11632 struct mbuf *mout; 11633 11634 #ifdef INET6 11635 struct ip6_hdr *ip6, *ip6_out; 11636 11637 #endif 11638 int iphlen_out, len; 11639 11640 iph = mtod(m, struct ip *); 11641 sh = (struct sctphdr *)((caddr_t)iph + iphlen); 11642 switch (iph->ip_v) { 11643 case IPVERSION: 11644 len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)); 11645 break; 11646 #ifdef INET6 11647 case IPV6_VERSION >> 4: 11648 len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)); 11649 break; 11650 #endif 11651 default: 11652 if (scm) { 11653 sctp_m_freem(scm); 11654 } 11655 return; 11656 } 11657 if (port) { 11658 len += sizeof(struct udphdr); 11659 } 11660 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA); 11661 if (mout == NULL) { 11662 if (scm) { 11663 sctp_m_freem(scm); 11664 } 11665 return; 11666 } 11667 SCTP_BUF_RESV_UF(mout, max_linkhdr); 11668 SCTP_BUF_LEN(mout) = len; 11669 SCTP_BUF_NEXT(mout) = scm; 11670 if (m->m_flags & M_FLOWID) { 11671 mout->m_pkthdr.flowid = m->m_pkthdr.flowid; 11672 mout->m_flags |= M_FLOWID; 11673 } 11674 iph_out = NULL; 11675 #ifdef INET6 11676 ip6_out = NULL; 11677 #endif 11678 switch (iph->ip_v) { 11679 case IPVERSION: 11680 iph_out = mtod(mout, struct ip *); 11681 11682 /* Fill in the IP header for the ABORT */ 11683 iph_out->ip_v = IPVERSION; 11684 iph_out->ip_hl = (sizeof(struct ip) / 4); 11685 iph_out->ip_tos = (u_char)0; 11686 iph_out->ip_id = 0; 11687 iph_out->ip_off = 0; 11688 iph_out->ip_ttl = MAXTTL; 11689 if (port) { 11690 iph_out->ip_p = IPPROTO_UDP; 11691 } else { 11692 iph_out->ip_p = IPPROTO_SCTP; 11693 } 11694 iph_out->ip_src.s_addr = iph->ip_dst.s_addr; 11695 iph_out->ip_dst.s_addr = iph->ip_src.s_addr; 11696 /* let IP layer calculate this */ 11697 iph_out->ip_sum = 0; 11698 11699 iphlen_out = sizeof(struct ip); 11700 sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out); 11701 break; 11702 #ifdef INET6 11703 case IPV6_VERSION >> 4: 11704 ip6 = (struct ip6_hdr *)iph; 11705 ip6_out = mtod(mout, struct ip6_hdr *); 11706 11707 /* Fill in the IP6 header for the ABORT */ 11708 ip6_out->ip6_flow = ip6->ip6_flow; 11709 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim); 11710 if (port) { 11711 ip6_out->ip6_nxt = IPPROTO_UDP; 11712 } else { 11713 ip6_out->ip6_nxt = IPPROTO_SCTP; 11714 } 11715 ip6_out->ip6_src = ip6->ip6_dst; 11716 ip6_out->ip6_dst = ip6->ip6_src; 11717 11718 iphlen_out = sizeof(struct ip6_hdr); 11719 sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out); 11720 break; 11721 #endif /* INET6 */ 11722 default: 11723 /* Currently not supported */ 11724 sctp_m_freem(mout); 11725 return; 11726 } 11727 11728 udp = (struct udphdr *)sh_out; 11729 if (port) { 11730 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 11731 udp->uh_dport = port; 11732 /* set udp->uh_ulen later */ 11733 udp->uh_sum = 0; 11734 iphlen_out += sizeof(struct udphdr); 11735 sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 11736 } 11737 sh_out->src_port = sh->dest_port; 11738 sh_out->dest_port = sh->src_port; 11739 sh_out->v_tag = vtag; 11740 sh_out->checksum = 0; 11741 11742 ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr)); 11743 ch->chunk_type = SCTP_OPERATION_ERROR; 11744 ch->chunk_flags = 0; 11745 11746 if (scm) { 11747 struct mbuf *m_tmp = scm; 11748 int cause_len = 0; 11749 11750 /* get length of the err_cause chain */ 11751 while (m_tmp != NULL) { 11752 cause_len += SCTP_BUF_LEN(m_tmp); 11753 m_tmp = SCTP_BUF_NEXT(m_tmp); 11754 } 11755 len = SCTP_BUF_LEN(mout) + cause_len; 11756 if (cause_len % 4) { 11757 /* need pad at end of chunk */ 11758 uint32_t cpthis = 0; 11759 int padlen; 11760 11761 padlen = 4 - (len % 4); 11762 m_copyback(mout, len, padlen, (caddr_t)&cpthis); 11763 len += padlen; 11764 } 11765 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len); 11766 } else { 11767 len = SCTP_BUF_LEN(mout); 11768 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr)); 11769 } 11770 11771 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 11772 /* no mbuf's */ 11773 sctp_m_freem(mout); 11774 return; 11775 } 11776 if (iph_out != NULL) { 11777 sctp_route_t ro; 11778 int ret; 11779 11780 /* zap the stack pointer to the route */ 11781 bzero(&ro, sizeof ro); 11782 if (port) { 11783 udp->uh_ulen = htons(len - sizeof(struct ip)); 11784 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 11785 } 11786 /* set IPv4 length */ 11787 iph_out->ip_len = len; 11788 /* out it goes */ 11789 #ifdef SCTP_PACKET_LOGGING 11790 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 11791 sctp_packet_log(mout, len); 11792 #endif 11793 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11794 if (port) { 11795 #if defined(SCTP_WITH_NO_CSUM) 11796 SCTP_STAT_INCR(sctps_sendnocrc); 11797 #else 11798 sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out); 11799 SCTP_STAT_INCR(sctps_sendswcrc); 11800 #endif 11801 SCTP_ENABLE_UDP_CSUM(o_pak); 11802 } else { 11803 #if defined(SCTP_WITH_NO_CSUM) 11804 SCTP_STAT_INCR(sctps_sendnocrc); 11805 #else 11806 mout->m_pkthdr.csum_flags = CSUM_SCTP; 11807 mout->m_pkthdr.csum_data = 0; 11808 SCTP_STAT_INCR(sctps_sendhwcrc); 11809 #endif 11810 } 11811 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id); 11812 11813 /* Free the route if we got one back */ 11814 if (ro.ro_rt) 11815 RTFREE(ro.ro_rt); 11816 } 11817 #ifdef INET6 11818 if (ip6_out != NULL) { 11819 struct route_in6 ro; 11820 int ret; 11821 struct ifnet *ifp = NULL; 11822 11823 /* zap the stack pointer to the route */ 11824 bzero(&ro, sizeof(ro)); 11825 if (port) { 11826 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr)); 11827 } 11828 ip6_out->ip6_plen = len - sizeof(*ip6_out); 11829 #ifdef SCTP_PACKET_LOGGING 11830 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 11831 sctp_packet_log(mout, len); 11832 #endif 11833 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11834 if (port) { 11835 #if defined(SCTP_WITH_NO_CSUM) 11836 SCTP_STAT_INCR(sctps_sendnocrc); 11837 #else 11838 sh_out->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 11839 SCTP_STAT_INCR(sctps_sendswcrc); 11840 #endif 11841 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) { 11842 udp->uh_sum = 0xffff; 11843 } 11844 } else { 11845 #if defined(SCTP_WITH_NO_CSUM) 11846 SCTP_STAT_INCR(sctps_sendnocrc); 11847 #else 11848 mout->m_pkthdr.csum_flags = CSUM_SCTP; 11849 mout->m_pkthdr.csum_data = 0; 11850 SCTP_STAT_INCR(sctps_sendhwcrc); 11851 #endif 11852 } 11853 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id); 11854 11855 /* Free the route if we got one back */ 11856 if (ro.ro_rt) 11857 RTFREE(ro.ro_rt); 11858 } 11859 #endif 11860 SCTP_STAT_INCR(sctps_sendpackets); 11861 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 11862 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11863 } 11864 11865 static struct mbuf * 11866 sctp_copy_resume(struct sctp_stream_queue_pending *sp, 11867 struct uio *uio, 11868 struct sctp_sndrcvinfo *srcv, 11869 int max_send_len, 11870 int user_marks_eor, 11871 int *error, 11872 uint32_t * sndout, 11873 struct mbuf **new_tail) 11874 { 11875 struct mbuf *m; 11876 11877 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, 11878 (M_PKTHDR | (user_marks_eor ? M_EOR : 0))); 11879 if (m == NULL) { 11880 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11881 *error = ENOMEM; 11882 } else { 11883 *sndout = m_length(m, NULL); 11884 *new_tail = m_last(m); 11885 } 11886 return (m); 11887 } 11888 11889 static int 11890 sctp_copy_one(struct sctp_stream_queue_pending *sp, 11891 struct uio *uio, 11892 int resv_upfront) 11893 { 11894 int left; 11895 11896 left = sp->length; 11897 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, 11898 resv_upfront, 0); 11899 if (sp->data == NULL) { 11900 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11901 return (ENOMEM); 11902 } 11903 sp->tail_mbuf = m_last(sp->data); 11904 return (0); 11905 } 11906 11907 11908 11909 static struct sctp_stream_queue_pending * 11910 sctp_copy_it_in(struct sctp_tcb *stcb, 11911 struct sctp_association *asoc, 11912 struct sctp_sndrcvinfo *srcv, 11913 struct uio *uio, 11914 struct sctp_nets *net, 11915 int max_send_len, 11916 int user_marks_eor, 11917 int *error, 11918 int non_blocking) 11919 { 11920 /*- 11921 * This routine must be very careful in its work. Protocol 11922 * processing is up and running so care must be taken to spl...() 11923 * when you need to do something that may effect the stcb/asoc. The 11924 * sb is locked however. When data is copied the protocol processing 11925 * should be enabled since this is a slower operation... 11926 */ 11927 struct sctp_stream_queue_pending *sp = NULL; 11928 int resv_in_first; 11929 11930 *error = 0; 11931 /* Now can we send this? */ 11932 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 11933 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 11934 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 11935 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 11936 /* got data while shutting down */ 11937 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 11938 *error = ECONNRESET; 11939 goto out_now; 11940 } 11941 sctp_alloc_a_strmoq(stcb, sp); 11942 if (sp == NULL) { 11943 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 11944 *error = ENOMEM; 11945 goto out_now; 11946 } 11947 sp->act_flags = 0; 11948 sp->sender_all_done = 0; 11949 sp->sinfo_flags = srcv->sinfo_flags; 11950 sp->timetolive = srcv->sinfo_timetolive; 11951 sp->ppid = srcv->sinfo_ppid; 11952 sp->context = srcv->sinfo_context; 11953 sp->strseq = 0; 11954 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 11955 11956 sp->stream = srcv->sinfo_stream; 11957 sp->length = min(uio->uio_resid, max_send_len); 11958 if ((sp->length == (uint32_t) uio->uio_resid) && 11959 ((user_marks_eor == 0) || 11960 (srcv->sinfo_flags & SCTP_EOF) || 11961 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { 11962 sp->msg_is_complete = 1; 11963 } else { 11964 sp->msg_is_complete = 0; 11965 } 11966 sp->sender_all_done = 0; 11967 sp->some_taken = 0; 11968 sp->put_last_out = 0; 11969 resv_in_first = sizeof(struct sctp_data_chunk); 11970 sp->data = sp->tail_mbuf = NULL; 11971 if (sp->length == 0) { 11972 *error = 0; 11973 goto skip_copy; 11974 } 11975 sp->auth_keyid = stcb->asoc.authinfo.active_keyid; 11976 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 11977 sctp_auth_key_acquire(stcb, stcb->asoc.authinfo.active_keyid); 11978 sp->holds_key_ref = 1; 11979 } 11980 *error = sctp_copy_one(sp, uio, resv_in_first); 11981 skip_copy: 11982 if (*error) { 11983 sctp_free_a_strmoq(stcb, sp); 11984 sp = NULL; 11985 } else { 11986 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 11987 sp->net = net; 11988 atomic_add_int(&sp->net->ref_count, 1); 11989 } else { 11990 sp->net = NULL; 11991 } 11992 sctp_set_prsctp_policy(sp); 11993 } 11994 out_now: 11995 return (sp); 11996 } 11997 11998 11999 int 12000 sctp_sosend(struct socket *so, 12001 struct sockaddr *addr, 12002 struct uio *uio, 12003 struct mbuf *top, 12004 struct mbuf *control, 12005 int flags, 12006 struct thread *p 12007 ) 12008 { 12009 int error, use_rcvinfo = 0; 12010 struct sctp_sndrcvinfo srcv; 12011 struct sockaddr *addr_to_use; 12012 12013 #if defined(INET) && defined(INET6) 12014 struct sockaddr_in sin; 12015 12016 #endif 12017 12018 if (control) { 12019 /* process cmsg snd/rcv info (maybe a assoc-id) */ 12020 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control, 12021 sizeof(srcv))) { 12022 /* got one */ 12023 use_rcvinfo = 1; 12024 } 12025 } 12026 addr_to_use = addr; 12027 #if defined(INET) && defined(INET6) 12028 if ((addr) && (addr->sa_family == AF_INET6)) { 12029 struct sockaddr_in6 *sin6; 12030 12031 sin6 = (struct sockaddr_in6 *)addr; 12032 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 12033 in6_sin6_2_sin(&sin, sin6); 12034 addr_to_use = (struct sockaddr *)&sin; 12035 } 12036 } 12037 #endif 12038 error = sctp_lower_sosend(so, addr_to_use, uio, top, 12039 control, 12040 flags, 12041 use_rcvinfo ? &srcv : NULL 12042 ,p 12043 ); 12044 return (error); 12045 } 12046 12047 12048 int 12049 sctp_lower_sosend(struct socket *so, 12050 struct sockaddr *addr, 12051 struct uio *uio, 12052 struct mbuf *i_pak, 12053 struct mbuf *control, 12054 int flags, 12055 struct sctp_sndrcvinfo *srcv 12056 , 12057 struct thread *p 12058 ) 12059 { 12060 unsigned int sndlen = 0, max_len; 12061 int error, len; 12062 struct mbuf *top = NULL; 12063 int queue_only = 0, queue_only_for_init = 0; 12064 int free_cnt_applied = 0; 12065 int un_sent; 12066 int now_filled = 0; 12067 unsigned int inqueue_bytes = 0; 12068 struct sctp_block_entry be; 12069 struct sctp_inpcb *inp; 12070 struct sctp_tcb *stcb = NULL; 12071 struct timeval now; 12072 struct sctp_nets *net; 12073 struct sctp_association *asoc; 12074 struct sctp_inpcb *t_inp; 12075 int user_marks_eor; 12076 int create_lock_applied = 0; 12077 int nagle_applies = 0; 12078 int some_on_control = 0; 12079 int got_all_of_the_send = 0; 12080 int hold_tcblock = 0; 12081 int non_blocking = 0; 12082 uint32_t local_add_more, local_soresv = 0; 12083 uint16_t port; 12084 uint16_t sinfo_flags; 12085 sctp_assoc_t sinfo_assoc_id; 12086 12087 error = 0; 12088 net = NULL; 12089 stcb = NULL; 12090 asoc = NULL; 12091 12092 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 12093 if (inp == NULL) { 12094 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12095 error = EINVAL; 12096 if (i_pak) { 12097 SCTP_RELEASE_PKT(i_pak); 12098 } 12099 return (error); 12100 } 12101 if ((uio == NULL) && (i_pak == NULL)) { 12102 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12103 return (EINVAL); 12104 } 12105 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 12106 atomic_add_int(&inp->total_sends, 1); 12107 if (uio) { 12108 if (uio->uio_resid < 0) { 12109 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12110 return (EINVAL); 12111 } 12112 sndlen = uio->uio_resid; 12113 } else { 12114 top = SCTP_HEADER_TO_CHAIN(i_pak); 12115 sndlen = SCTP_HEADER_LEN(i_pak); 12116 } 12117 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n", 12118 addr, 12119 sndlen); 12120 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 12121 (inp->sctp_socket->so_qlimit)) { 12122 /* The listener can NOT send */ 12123 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 12124 error = ENOTCONN; 12125 goto out_unlocked; 12126 } 12127 /** 12128 * Pre-screen address, if one is given the sin-len 12129 * must be set correctly! 12130 */ 12131 if (addr) { 12132 union sctp_sockstore *raddr = (union sctp_sockstore *)addr; 12133 12134 switch (raddr->sa.sa_family) { 12135 #if defined(INET) 12136 case AF_INET: 12137 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) { 12138 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12139 error = EINVAL; 12140 goto out_unlocked; 12141 } 12142 port = raddr->sin.sin_port; 12143 break; 12144 #endif 12145 #if defined(INET6) 12146 case AF_INET6: 12147 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) { 12148 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12149 error = EINVAL; 12150 goto out_unlocked; 12151 } 12152 port = raddr->sin6.sin6_port; 12153 break; 12154 #endif 12155 default: 12156 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT); 12157 error = EAFNOSUPPORT; 12158 goto out_unlocked; 12159 } 12160 } else 12161 port = 0; 12162 12163 if (srcv) { 12164 sinfo_flags = srcv->sinfo_flags; 12165 sinfo_assoc_id = srcv->sinfo_assoc_id; 12166 if (INVALID_SINFO_FLAG(sinfo_flags) || 12167 PR_SCTP_INVALID_POLICY(sinfo_flags)) { 12168 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12169 error = EINVAL; 12170 goto out_unlocked; 12171 } 12172 if (srcv->sinfo_flags) 12173 SCTP_STAT_INCR(sctps_sends_with_flags); 12174 } else { 12175 sinfo_flags = inp->def_send.sinfo_flags; 12176 sinfo_assoc_id = inp->def_send.sinfo_assoc_id; 12177 } 12178 if (sinfo_flags & SCTP_SENDALL) { 12179 /* its a sendall */ 12180 error = sctp_sendall(inp, uio, top, srcv); 12181 top = NULL; 12182 goto out_unlocked; 12183 } 12184 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) { 12185 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12186 error = EINVAL; 12187 goto out_unlocked; 12188 } 12189 /* now we must find the assoc */ 12190 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 12191 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 12192 SCTP_INP_RLOCK(inp); 12193 stcb = LIST_FIRST(&inp->sctp_asoc_list); 12194 if (stcb == NULL) { 12195 SCTP_INP_RUNLOCK(inp); 12196 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 12197 error = ENOTCONN; 12198 goto out_unlocked; 12199 } 12200 SCTP_TCB_LOCK(stcb); 12201 hold_tcblock = 1; 12202 SCTP_INP_RUNLOCK(inp); 12203 } else if (sinfo_assoc_id) { 12204 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0); 12205 } else if (addr) { 12206 /*- 12207 * Since we did not use findep we must 12208 * increment it, and if we don't find a tcb 12209 * decrement it. 12210 */ 12211 SCTP_INP_WLOCK(inp); 12212 SCTP_INP_INCR_REF(inp); 12213 SCTP_INP_WUNLOCK(inp); 12214 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 12215 if (stcb == NULL) { 12216 SCTP_INP_WLOCK(inp); 12217 SCTP_INP_DECR_REF(inp); 12218 SCTP_INP_WUNLOCK(inp); 12219 } else { 12220 hold_tcblock = 1; 12221 } 12222 } 12223 if ((stcb == NULL) && (addr)) { 12224 /* Possible implicit send? */ 12225 SCTP_ASOC_CREATE_LOCK(inp); 12226 create_lock_applied = 1; 12227 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 12228 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 12229 /* Should I really unlock ? */ 12230 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12231 error = EINVAL; 12232 goto out_unlocked; 12233 12234 } 12235 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 12236 (addr->sa_family == AF_INET6)) { 12237 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12238 error = EINVAL; 12239 goto out_unlocked; 12240 } 12241 SCTP_INP_WLOCK(inp); 12242 SCTP_INP_INCR_REF(inp); 12243 SCTP_INP_WUNLOCK(inp); 12244 /* With the lock applied look again */ 12245 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 12246 if (stcb == NULL) { 12247 SCTP_INP_WLOCK(inp); 12248 SCTP_INP_DECR_REF(inp); 12249 SCTP_INP_WUNLOCK(inp); 12250 } else { 12251 hold_tcblock = 1; 12252 } 12253 if (t_inp != inp) { 12254 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 12255 error = ENOTCONN; 12256 goto out_unlocked; 12257 } 12258 } 12259 if (stcb == NULL) { 12260 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 12261 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 12262 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 12263 error = ENOTCONN; 12264 goto out_unlocked; 12265 } 12266 if (addr == NULL) { 12267 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); 12268 error = ENOENT; 12269 goto out_unlocked; 12270 } else { 12271 /* 12272 * UDP style, we must go ahead and start the INIT 12273 * process 12274 */ 12275 uint32_t vrf_id; 12276 12277 if ((sinfo_flags & SCTP_ABORT) || 12278 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) { 12279 /*- 12280 * User asks to abort a non-existant assoc, 12281 * or EOF a non-existant assoc with no data 12282 */ 12283 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); 12284 error = ENOENT; 12285 goto out_unlocked; 12286 } 12287 /* get an asoc/stcb struct */ 12288 vrf_id = inp->def_vrf_id; 12289 #ifdef INVARIANTS 12290 if (create_lock_applied == 0) { 12291 panic("Error, should hold create lock and I don't?"); 12292 } 12293 #endif 12294 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, 12295 p 12296 ); 12297 if (stcb == NULL) { 12298 /* Error is setup for us in the call */ 12299 goto out_unlocked; 12300 } 12301 if (create_lock_applied) { 12302 SCTP_ASOC_CREATE_UNLOCK(inp); 12303 create_lock_applied = 0; 12304 } else { 12305 SCTP_PRINTF("Huh-3? create lock should have been on??\n"); 12306 } 12307 /* 12308 * Turn on queue only flag to prevent data from 12309 * being sent 12310 */ 12311 queue_only = 1; 12312 asoc = &stcb->asoc; 12313 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); 12314 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 12315 12316 /* initialize authentication params for the assoc */ 12317 sctp_initialize_auth_params(inp, stcb); 12318 12319 if (control) { 12320 /* 12321 * see if a init structure exists in cmsg 12322 * headers 12323 */ 12324 struct sctp_initmsg initm; 12325 int i; 12326 12327 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control, 12328 sizeof(initm))) { 12329 /* 12330 * we have an INIT override of the 12331 * default 12332 */ 12333 if (initm.sinit_max_attempts) 12334 asoc->max_init_times = initm.sinit_max_attempts; 12335 if (initm.sinit_num_ostreams) 12336 asoc->pre_open_streams = initm.sinit_num_ostreams; 12337 if (initm.sinit_max_instreams) 12338 asoc->max_inbound_streams = initm.sinit_max_instreams; 12339 if (initm.sinit_max_init_timeo) 12340 asoc->initial_init_rto_max = initm.sinit_max_init_timeo; 12341 if (asoc->streamoutcnt < asoc->pre_open_streams) { 12342 struct sctp_stream_out *tmp_str; 12343 int had_lock = 0; 12344 12345 /* Default is NOT correct */ 12346 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n", 12347 asoc->streamoutcnt, asoc->pre_open_streams); 12348 /* 12349 * What happens if this 12350 * fails? we panic ... 12351 */ 12352 12353 if (hold_tcblock) { 12354 had_lock = 1; 12355 SCTP_TCB_UNLOCK(stcb); 12356 } 12357 SCTP_MALLOC(tmp_str, 12358 struct sctp_stream_out *, 12359 (asoc->pre_open_streams * 12360 sizeof(struct sctp_stream_out)), 12361 SCTP_M_STRMO); 12362 if (had_lock) { 12363 SCTP_TCB_LOCK(stcb); 12364 } 12365 if (tmp_str != NULL) { 12366 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 12367 asoc->strmout = tmp_str; 12368 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams; 12369 } else { 12370 asoc->pre_open_streams = asoc->streamoutcnt; 12371 } 12372 for (i = 0; i < asoc->streamoutcnt; i++) { 12373 /*- 12374 * inbound side must be set 12375 * to 0xffff, also NOTE when 12376 * we get the INIT-ACK back 12377 * (for INIT sender) we MUST 12378 * reduce the count 12379 * (streamoutcnt) but first 12380 * check if we sent to any 12381 * of the upper streams that 12382 * were dropped (if some 12383 * were). Those that were 12384 * dropped must be notified 12385 * to the upper layer as 12386 * failed to send. 12387 */ 12388 asoc->strmout[i].next_sequence_sent = 0x0; 12389 TAILQ_INIT(&asoc->strmout[i].outqueue); 12390 asoc->strmout[i].stream_no = i; 12391 asoc->strmout[i].last_msg_incomplete = 0; 12392 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL); 12393 } 12394 } 12395 } 12396 } 12397 hold_tcblock = 1; 12398 /* out with the INIT */ 12399 queue_only_for_init = 1; 12400 /*- 12401 * we may want to dig in after this call and adjust the MTU 12402 * value. It defaulted to 1500 (constant) but the ro 12403 * structure may now have an update and thus we may need to 12404 * change it BEFORE we append the message. 12405 */ 12406 } 12407 } else 12408 asoc = &stcb->asoc; 12409 if (srcv == NULL) 12410 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send; 12411 if (srcv->sinfo_flags & SCTP_ADDR_OVER) { 12412 if (addr) 12413 net = sctp_findnet(stcb, addr); 12414 else 12415 net = NULL; 12416 if ((net == NULL) || 12417 ((port != 0) && (port != stcb->rport))) { 12418 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12419 error = EINVAL; 12420 goto out_unlocked; 12421 } 12422 } else { 12423 net = stcb->asoc.primary_destination; 12424 } 12425 atomic_add_int(&stcb->total_sends, 1); 12426 /* Keep the stcb from being freed under our feet */ 12427 atomic_add_int(&asoc->refcnt, 1); 12428 free_cnt_applied = 1; 12429 12430 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) { 12431 if (sndlen > asoc->smallest_mtu) { 12432 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 12433 error = EMSGSIZE; 12434 goto out_unlocked; 12435 } 12436 } 12437 if ((SCTP_SO_IS_NBIO(so) 12438 || (flags & MSG_NBIO) 12439 )) { 12440 non_blocking = 1; 12441 } 12442 /* would we block? */ 12443 if (non_blocking) { 12444 if (hold_tcblock == 0) { 12445 SCTP_TCB_LOCK(stcb); 12446 hold_tcblock = 1; 12447 } 12448 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12449 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) || 12450 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 12451 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK); 12452 if (sndlen > SCTP_SB_LIMIT_SND(so)) 12453 error = EMSGSIZE; 12454 else 12455 error = EWOULDBLOCK; 12456 goto out_unlocked; 12457 } 12458 stcb->asoc.sb_send_resv += sndlen; 12459 SCTP_TCB_UNLOCK(stcb); 12460 hold_tcblock = 0; 12461 } else { 12462 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen); 12463 } 12464 local_soresv = sndlen; 12465 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12466 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 12467 error = ECONNRESET; 12468 goto out_unlocked; 12469 } 12470 if (create_lock_applied) { 12471 SCTP_ASOC_CREATE_UNLOCK(inp); 12472 create_lock_applied = 0; 12473 } 12474 if (asoc->stream_reset_outstanding) { 12475 /* 12476 * Can't queue any data while stream reset is underway. 12477 */ 12478 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN); 12479 error = EAGAIN; 12480 goto out_unlocked; 12481 } 12482 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 12483 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 12484 queue_only = 1; 12485 } 12486 /* we are now done with all control */ 12487 if (control) { 12488 sctp_m_freem(control); 12489 control = NULL; 12490 } 12491 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 12492 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 12493 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 12494 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 12495 if (srcv->sinfo_flags & SCTP_ABORT) { 12496 ; 12497 } else { 12498 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 12499 error = ECONNRESET; 12500 goto out_unlocked; 12501 } 12502 } 12503 /* Ok, we will attempt a msgsnd :> */ 12504 if (p) { 12505 p->td_ru.ru_msgsnd++; 12506 } 12507 /* Are we aborting? */ 12508 if (srcv->sinfo_flags & SCTP_ABORT) { 12509 struct mbuf *mm; 12510 int tot_demand, tot_out = 0, max_out; 12511 12512 SCTP_STAT_INCR(sctps_sends_with_abort); 12513 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 12514 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 12515 /* It has to be up before we abort */ 12516 /* how big is the user initiated abort? */ 12517 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12518 error = EINVAL; 12519 goto out; 12520 } 12521 if (hold_tcblock) { 12522 SCTP_TCB_UNLOCK(stcb); 12523 hold_tcblock = 0; 12524 } 12525 if (top) { 12526 struct mbuf *cntm = NULL; 12527 12528 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA); 12529 if (sndlen != 0) { 12530 cntm = top; 12531 while (cntm) { 12532 tot_out += SCTP_BUF_LEN(cntm); 12533 cntm = SCTP_BUF_NEXT(cntm); 12534 } 12535 } 12536 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 12537 } else { 12538 /* Must fit in a MTU */ 12539 tot_out = sndlen; 12540 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 12541 if (tot_demand > SCTP_DEFAULT_ADD_MORE) { 12542 /* To big */ 12543 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 12544 error = EMSGSIZE; 12545 goto out; 12546 } 12547 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA); 12548 } 12549 if (mm == NULL) { 12550 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12551 error = ENOMEM; 12552 goto out; 12553 } 12554 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 12555 max_out -= sizeof(struct sctp_abort_msg); 12556 if (tot_out > max_out) { 12557 tot_out = max_out; 12558 } 12559 if (mm) { 12560 struct sctp_paramhdr *ph; 12561 12562 /* now move forward the data pointer */ 12563 ph = mtod(mm, struct sctp_paramhdr *); 12564 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 12565 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out)); 12566 ph++; 12567 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); 12568 if (top == NULL) { 12569 error = uiomove((caddr_t)ph, (int)tot_out, uio); 12570 if (error) { 12571 /*- 12572 * Here if we can't get his data we 12573 * still abort we just don't get to 12574 * send the users note :-0 12575 */ 12576 sctp_m_freem(mm); 12577 mm = NULL; 12578 } 12579 } else { 12580 if (sndlen != 0) { 12581 SCTP_BUF_NEXT(mm) = top; 12582 } 12583 } 12584 } 12585 if (hold_tcblock == 0) { 12586 SCTP_TCB_LOCK(stcb); 12587 hold_tcblock = 1; 12588 } 12589 atomic_add_int(&stcb->asoc.refcnt, -1); 12590 free_cnt_applied = 0; 12591 /* release this lock, otherwise we hang on ourselves */ 12592 sctp_abort_an_association(stcb->sctp_ep, stcb, 12593 SCTP_RESPONSE_TO_USER_REQ, 12594 mm, SCTP_SO_LOCKED); 12595 /* now relock the stcb so everything is sane */ 12596 hold_tcblock = 0; 12597 stcb = NULL; 12598 /* 12599 * In this case top is already chained to mm avoid double 12600 * free, since we free it below if top != NULL and driver 12601 * would free it after sending the packet out 12602 */ 12603 if (sndlen != 0) { 12604 top = NULL; 12605 } 12606 goto out_unlocked; 12607 } 12608 /* Calculate the maximum we can send */ 12609 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12610 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 12611 if (non_blocking) { 12612 /* we already checked for non-blocking above. */ 12613 max_len = sndlen; 12614 } else { 12615 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 12616 } 12617 } else { 12618 max_len = 0; 12619 } 12620 if (hold_tcblock) { 12621 SCTP_TCB_UNLOCK(stcb); 12622 hold_tcblock = 0; 12623 } 12624 /* Is the stream no. valid? */ 12625 if (srcv->sinfo_stream >= asoc->streamoutcnt) { 12626 /* Invalid stream number */ 12627 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12628 error = EINVAL; 12629 goto out_unlocked; 12630 } 12631 if (asoc->strmout == NULL) { 12632 /* huh? software error */ 12633 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 12634 error = EFAULT; 12635 goto out_unlocked; 12636 } 12637 /* Unless E_EOR mode is on, we must make a send FIT in one call. */ 12638 if ((user_marks_eor == 0) && 12639 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { 12640 /* It will NEVER fit */ 12641 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 12642 error = EMSGSIZE; 12643 goto out_unlocked; 12644 } 12645 if ((uio == NULL) && user_marks_eor) { 12646 /*- 12647 * We do not support eeor mode for 12648 * sending with mbuf chains (like sendfile). 12649 */ 12650 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12651 error = EINVAL; 12652 goto out_unlocked; 12653 } 12654 if (user_marks_eor) { 12655 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold)); 12656 } else { 12657 /*- 12658 * For non-eeor the whole message must fit in 12659 * the socket send buffer. 12660 */ 12661 local_add_more = sndlen; 12662 } 12663 len = 0; 12664 if (non_blocking) { 12665 goto skip_preblock; 12666 } 12667 if (((max_len <= local_add_more) && 12668 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) || 12669 (max_len == 0) || 12670 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 12671 /* No room right now ! */ 12672 SOCKBUF_LOCK(&so->so_snd); 12673 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12674 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) || 12675 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 12676 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n", 12677 (unsigned int)SCTP_SB_LIMIT_SND(so), 12678 inqueue_bytes, 12679 local_add_more, 12680 stcb->asoc.stream_queue_cnt, 12681 stcb->asoc.chunks_on_out_queue, 12682 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)); 12683 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 12684 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, so, asoc, sndlen); 12685 } 12686 be.error = 0; 12687 stcb->block_entry = &be; 12688 error = sbwait(&so->so_snd); 12689 stcb->block_entry = NULL; 12690 if (error || so->so_error || be.error) { 12691 if (error == 0) { 12692 if (so->so_error) 12693 error = so->so_error; 12694 if (be.error) { 12695 error = be.error; 12696 } 12697 } 12698 SOCKBUF_UNLOCK(&so->so_snd); 12699 goto out_unlocked; 12700 } 12701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 12702 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 12703 so, asoc, stcb->asoc.total_output_queue_size); 12704 } 12705 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12706 goto out_unlocked; 12707 } 12708 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12709 } 12710 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 12711 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 12712 } else { 12713 max_len = 0; 12714 } 12715 SOCKBUF_UNLOCK(&so->so_snd); 12716 } 12717 skip_preblock: 12718 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12719 goto out_unlocked; 12720 } 12721 /* 12722 * sndlen covers for mbuf case uio_resid covers for the non-mbuf 12723 * case NOTE: uio will be null when top/mbuf is passed 12724 */ 12725 if (sndlen == 0) { 12726 if (srcv->sinfo_flags & SCTP_EOF) { 12727 got_all_of_the_send = 1; 12728 goto dataless_eof; 12729 } else { 12730 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12731 error = EINVAL; 12732 goto out; 12733 } 12734 } 12735 if (top == NULL) { 12736 struct sctp_stream_queue_pending *sp; 12737 struct sctp_stream_out *strm; 12738 uint32_t sndout; 12739 12740 SCTP_TCB_SEND_LOCK(stcb); 12741 if ((asoc->stream_locked) && 12742 (asoc->stream_locked_on != srcv->sinfo_stream)) { 12743 SCTP_TCB_SEND_UNLOCK(stcb); 12744 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12745 error = EINVAL; 12746 goto out; 12747 } 12748 SCTP_TCB_SEND_UNLOCK(stcb); 12749 12750 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 12751 if (strm->last_msg_incomplete == 0) { 12752 do_a_copy_in: 12753 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking); 12754 if ((sp == NULL) || (error)) { 12755 goto out; 12756 } 12757 SCTP_TCB_SEND_LOCK(stcb); 12758 if (sp->msg_is_complete) { 12759 strm->last_msg_incomplete = 0; 12760 asoc->stream_locked = 0; 12761 } else { 12762 /* 12763 * Just got locked to this guy in case of an 12764 * interrupt. 12765 */ 12766 strm->last_msg_incomplete = 1; 12767 asoc->stream_locked = 1; 12768 asoc->stream_locked_on = srcv->sinfo_stream; 12769 sp->sender_all_done = 0; 12770 } 12771 sctp_snd_sb_alloc(stcb, sp->length); 12772 atomic_add_int(&asoc->stream_queue_cnt, 1); 12773 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) { 12774 sp->strseq = strm->next_sequence_sent; 12775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) { 12776 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN, 12777 (uintptr_t) stcb, sp->length, 12778 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0); 12779 } 12780 strm->next_sequence_sent++; 12781 } else { 12782 SCTP_STAT_INCR(sctps_sends_with_unord); 12783 } 12784 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 12785 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1); 12786 SCTP_TCB_SEND_UNLOCK(stcb); 12787 } else { 12788 SCTP_TCB_SEND_LOCK(stcb); 12789 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 12790 SCTP_TCB_SEND_UNLOCK(stcb); 12791 if (sp == NULL) { 12792 /* ???? Huh ??? last msg is gone */ 12793 #ifdef INVARIANTS 12794 panic("Warning: Last msg marked incomplete, yet nothing left?"); 12795 #else 12796 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n"); 12797 strm->last_msg_incomplete = 0; 12798 #endif 12799 goto do_a_copy_in; 12800 12801 } 12802 } 12803 while (uio->uio_resid > 0) { 12804 /* How much room do we have? */ 12805 struct mbuf *new_tail, *mm; 12806 12807 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 12808 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 12809 else 12810 max_len = 0; 12811 12812 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) || 12813 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) || 12814 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) { 12815 sndout = 0; 12816 new_tail = NULL; 12817 if (hold_tcblock) { 12818 SCTP_TCB_UNLOCK(stcb); 12819 hold_tcblock = 0; 12820 } 12821 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail); 12822 if ((mm == NULL) || error) { 12823 if (mm) { 12824 sctp_m_freem(mm); 12825 } 12826 goto out; 12827 } 12828 /* Update the mbuf and count */ 12829 SCTP_TCB_SEND_LOCK(stcb); 12830 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 12831 /* 12832 * we need to get out. Peer probably 12833 * aborted. 12834 */ 12835 sctp_m_freem(mm); 12836 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) { 12837 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 12838 error = ECONNRESET; 12839 } 12840 SCTP_TCB_SEND_UNLOCK(stcb); 12841 goto out; 12842 } 12843 if (sp->tail_mbuf) { 12844 /* tack it to the end */ 12845 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 12846 sp->tail_mbuf = new_tail; 12847 } else { 12848 /* A stolen mbuf */ 12849 sp->data = mm; 12850 sp->tail_mbuf = new_tail; 12851 } 12852 sctp_snd_sb_alloc(stcb, sndout); 12853 atomic_add_int(&sp->length, sndout); 12854 len += sndout; 12855 12856 /* Did we reach EOR? */ 12857 if ((uio->uio_resid == 0) && 12858 ((user_marks_eor == 0) || 12859 (srcv->sinfo_flags & SCTP_EOF) || 12860 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { 12861 sp->msg_is_complete = 1; 12862 } else { 12863 sp->msg_is_complete = 0; 12864 } 12865 SCTP_TCB_SEND_UNLOCK(stcb); 12866 } 12867 if (uio->uio_resid == 0) { 12868 /* got it all? */ 12869 continue; 12870 } 12871 /* PR-SCTP? */ 12872 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) { 12873 /* 12874 * This is ugly but we must assure locking 12875 * order 12876 */ 12877 if (hold_tcblock == 0) { 12878 SCTP_TCB_LOCK(stcb); 12879 hold_tcblock = 1; 12880 } 12881 sctp_prune_prsctp(stcb, asoc, srcv, sndlen); 12882 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 12883 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 12884 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 12885 else 12886 max_len = 0; 12887 if (max_len > 0) { 12888 continue; 12889 } 12890 SCTP_TCB_UNLOCK(stcb); 12891 hold_tcblock = 0; 12892 } 12893 /* wait for space now */ 12894 if (non_blocking) { 12895 /* Non-blocking io in place out */ 12896 goto skip_out_eof; 12897 } 12898 /* What about the INIT, send it maybe */ 12899 if (queue_only_for_init) { 12900 if (hold_tcblock == 0) { 12901 SCTP_TCB_LOCK(stcb); 12902 hold_tcblock = 1; 12903 } 12904 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 12905 /* a collision took us forward? */ 12906 queue_only = 0; 12907 } else { 12908 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 12909 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); 12910 queue_only = 1; 12911 } 12912 } 12913 if ((net->flight_size > net->cwnd) && 12914 (asoc->sctp_cmt_on_off == 0)) { 12915 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 12916 queue_only = 1; 12917 } else if (asoc->ifp_had_enobuf) { 12918 SCTP_STAT_INCR(sctps_ifnomemqueued); 12919 if (net->flight_size > (2 * net->mtu)) { 12920 queue_only = 1; 12921 } 12922 asoc->ifp_had_enobuf = 0; 12923 } 12924 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 12925 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 12926 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 12927 (stcb->asoc.total_flight > 0) && 12928 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 12929 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 12930 12931 /*- 12932 * Ok, Nagle is set on and we have data outstanding. 12933 * Don't send anything and let SACKs drive out the 12934 * data unless wen have a "full" segment to send. 12935 */ 12936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 12937 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 12938 } 12939 SCTP_STAT_INCR(sctps_naglequeued); 12940 nagle_applies = 1; 12941 } else { 12942 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 12943 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 12944 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 12945 } 12946 SCTP_STAT_INCR(sctps_naglesent); 12947 nagle_applies = 0; 12948 } 12949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 12950 12951 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 12952 nagle_applies, un_sent); 12953 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, 12954 stcb->asoc.total_flight, 12955 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 12956 } 12957 if (queue_only_for_init) 12958 queue_only_for_init = 0; 12959 if ((queue_only == 0) && (nagle_applies == 0)) { 12960 /*- 12961 * need to start chunk output 12962 * before blocking.. note that if 12963 * a lock is already applied, then 12964 * the input via the net is happening 12965 * and I don't need to start output :-D 12966 */ 12967 if (hold_tcblock == 0) { 12968 if (SCTP_TCB_TRYLOCK(stcb)) { 12969 hold_tcblock = 1; 12970 sctp_chunk_output(inp, 12971 stcb, 12972 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 12973 } 12974 } else { 12975 sctp_chunk_output(inp, 12976 stcb, 12977 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 12978 } 12979 if (hold_tcblock == 1) { 12980 SCTP_TCB_UNLOCK(stcb); 12981 hold_tcblock = 0; 12982 } 12983 } 12984 SOCKBUF_LOCK(&so->so_snd); 12985 /*- 12986 * This is a bit strange, but I think it will 12987 * work. The total_output_queue_size is locked and 12988 * protected by the TCB_LOCK, which we just released. 12989 * There is a race that can occur between releasing it 12990 * above, and me getting the socket lock, where sacks 12991 * come in but we have not put the SB_WAIT on the 12992 * so_snd buffer to get the wakeup. After the LOCK 12993 * is applied the sack_processing will also need to 12994 * LOCK the so->so_snd to do the actual sowwakeup(). So 12995 * once we have the socket buffer lock if we recheck the 12996 * size we KNOW we will get to sleep safely with the 12997 * wakeup flag in place. 12998 */ 12999 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size + 13000 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) { 13001 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 13002 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 13003 so, asoc, uio->uio_resid); 13004 } 13005 be.error = 0; 13006 stcb->block_entry = &be; 13007 error = sbwait(&so->so_snd); 13008 stcb->block_entry = NULL; 13009 13010 if (error || so->so_error || be.error) { 13011 if (error == 0) { 13012 if (so->so_error) 13013 error = so->so_error; 13014 if (be.error) { 13015 error = be.error; 13016 } 13017 } 13018 SOCKBUF_UNLOCK(&so->so_snd); 13019 goto out_unlocked; 13020 } 13021 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 13022 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 13023 so, asoc, stcb->asoc.total_output_queue_size); 13024 } 13025 } 13026 SOCKBUF_UNLOCK(&so->so_snd); 13027 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 13028 goto out_unlocked; 13029 } 13030 } 13031 SCTP_TCB_SEND_LOCK(stcb); 13032 if (sp) { 13033 if (sp->msg_is_complete == 0) { 13034 strm->last_msg_incomplete = 1; 13035 asoc->stream_locked = 1; 13036 asoc->stream_locked_on = srcv->sinfo_stream; 13037 } else { 13038 sp->sender_all_done = 1; 13039 strm->last_msg_incomplete = 0; 13040 asoc->stream_locked = 0; 13041 } 13042 } else { 13043 SCTP_PRINTF("Huh no sp TSNH?\n"); 13044 strm->last_msg_incomplete = 0; 13045 asoc->stream_locked = 0; 13046 } 13047 SCTP_TCB_SEND_UNLOCK(stcb); 13048 if (uio->uio_resid == 0) { 13049 got_all_of_the_send = 1; 13050 } 13051 } else { 13052 /* We send in a 0, since we do NOT have any locks */ 13053 error = sctp_msg_append(stcb, net, top, srcv, 0); 13054 top = NULL; 13055 if (srcv->sinfo_flags & SCTP_EOF) { 13056 /* 13057 * This should only happen for Panda for the mbuf 13058 * send case, which does NOT yet support EEOR mode. 13059 * Thus, we can just set this flag to do the proper 13060 * EOF handling. 13061 */ 13062 got_all_of_the_send = 1; 13063 } 13064 } 13065 if (error) { 13066 goto out; 13067 } 13068 dataless_eof: 13069 /* EOF thing ? */ 13070 if ((srcv->sinfo_flags & SCTP_EOF) && 13071 (got_all_of_the_send == 1) && 13072 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 13073 int cnt; 13074 13075 SCTP_STAT_INCR(sctps_sends_with_eof); 13076 error = 0; 13077 if (hold_tcblock == 0) { 13078 SCTP_TCB_LOCK(stcb); 13079 hold_tcblock = 1; 13080 } 13081 cnt = sctp_is_there_unsent_data(stcb); 13082 if (TAILQ_EMPTY(&asoc->send_queue) && 13083 TAILQ_EMPTY(&asoc->sent_queue) && 13084 (cnt == 0)) { 13085 if (asoc->locked_on_sending) { 13086 goto abort_anyway; 13087 } 13088 /* there is nothing queued to send, so I'm done... */ 13089 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 13090 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 13091 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 13092 /* only send SHUTDOWN the first time through */ 13093 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 13094 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 13095 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 13096 } 13097 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 13098 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 13099 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 13100 asoc->primary_destination); 13101 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 13102 asoc->primary_destination); 13103 } 13104 } else { 13105 /*- 13106 * we still got (or just got) data to send, so set 13107 * SHUTDOWN_PENDING 13108 */ 13109 /*- 13110 * XXX sockets draft says that SCTP_EOF should be 13111 * sent with no data. currently, we will allow user 13112 * data to be sent first and move to 13113 * SHUTDOWN-PENDING 13114 */ 13115 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 13116 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 13117 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 13118 if (hold_tcblock == 0) { 13119 SCTP_TCB_LOCK(stcb); 13120 hold_tcblock = 1; 13121 } 13122 if (asoc->locked_on_sending) { 13123 /* Locked to send out the data */ 13124 struct sctp_stream_queue_pending *sp; 13125 13126 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 13127 if (sp) { 13128 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 13129 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 13130 } 13131 } 13132 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 13133 if (TAILQ_EMPTY(&asoc->send_queue) && 13134 TAILQ_EMPTY(&asoc->sent_queue) && 13135 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 13136 abort_anyway: 13137 if (free_cnt_applied) { 13138 atomic_add_int(&stcb->asoc.refcnt, -1); 13139 free_cnt_applied = 0; 13140 } 13141 sctp_abort_an_association(stcb->sctp_ep, stcb, 13142 SCTP_RESPONSE_TO_USER_REQ, 13143 NULL, SCTP_SO_LOCKED); 13144 /* 13145 * now relock the stcb so everything 13146 * is sane 13147 */ 13148 hold_tcblock = 0; 13149 stcb = NULL; 13150 goto out; 13151 } 13152 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 13153 asoc->primary_destination); 13154 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY); 13155 } 13156 } 13157 } 13158 skip_out_eof: 13159 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 13160 some_on_control = 1; 13161 } 13162 if (queue_only_for_init) { 13163 if (hold_tcblock == 0) { 13164 SCTP_TCB_LOCK(stcb); 13165 hold_tcblock = 1; 13166 } 13167 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 13168 /* a collision took us forward? */ 13169 queue_only = 0; 13170 } else { 13171 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 13172 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 13173 queue_only = 1; 13174 } 13175 } 13176 if ((net->flight_size > net->cwnd) && 13177 (stcb->asoc.sctp_cmt_on_off == 0)) { 13178 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 13179 queue_only = 1; 13180 } else if (asoc->ifp_had_enobuf) { 13181 SCTP_STAT_INCR(sctps_ifnomemqueued); 13182 if (net->flight_size > (2 * net->mtu)) { 13183 queue_only = 1; 13184 } 13185 asoc->ifp_had_enobuf = 0; 13186 } 13187 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 13188 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 13189 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 13190 (stcb->asoc.total_flight > 0) && 13191 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 13192 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 13193 /*- 13194 * Ok, Nagle is set on and we have data outstanding. 13195 * Don't send anything and let SACKs drive out the 13196 * data unless wen have a "full" segment to send. 13197 */ 13198 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 13199 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 13200 } 13201 SCTP_STAT_INCR(sctps_naglequeued); 13202 nagle_applies = 1; 13203 } else { 13204 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 13205 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 13206 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 13207 } 13208 SCTP_STAT_INCR(sctps_naglesent); 13209 nagle_applies = 0; 13210 } 13211 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 13212 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 13213 nagle_applies, un_sent); 13214 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, 13215 stcb->asoc.total_flight, 13216 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 13217 } 13218 if (queue_only_for_init) 13219 queue_only_for_init = 0; 13220 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { 13221 /* we can attempt to send too. */ 13222 if (hold_tcblock == 0) { 13223 /* 13224 * If there is activity recv'ing sacks no need to 13225 * send 13226 */ 13227 if (SCTP_TCB_TRYLOCK(stcb)) { 13228 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 13229 hold_tcblock = 1; 13230 } 13231 } else { 13232 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 13233 } 13234 } else if ((queue_only == 0) && 13235 (stcb->asoc.peers_rwnd == 0) && 13236 (stcb->asoc.total_flight == 0)) { 13237 /* We get to have a probe outstanding */ 13238 if (hold_tcblock == 0) { 13239 hold_tcblock = 1; 13240 SCTP_TCB_LOCK(stcb); 13241 } 13242 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 13243 } else if (some_on_control) { 13244 int num_out, reason, frag_point; 13245 13246 /* Here we do control only */ 13247 if (hold_tcblock == 0) { 13248 hold_tcblock = 1; 13249 SCTP_TCB_LOCK(stcb); 13250 } 13251 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 13252 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 13253 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED); 13254 } 13255 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n", 13256 queue_only, stcb->asoc.peers_rwnd, un_sent, 13257 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, 13258 stcb->asoc.total_output_queue_size, error); 13259 13260 out: 13261 out_unlocked: 13262 13263 if (local_soresv && stcb) { 13264 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen); 13265 local_soresv = 0; 13266 } 13267 if (create_lock_applied) { 13268 SCTP_ASOC_CREATE_UNLOCK(inp); 13269 create_lock_applied = 0; 13270 } 13271 if ((stcb) && hold_tcblock) { 13272 SCTP_TCB_UNLOCK(stcb); 13273 } 13274 if (stcb && free_cnt_applied) { 13275 atomic_add_int(&stcb->asoc.refcnt, -1); 13276 } 13277 #ifdef INVARIANTS 13278 if (stcb) { 13279 if (mtx_owned(&stcb->tcb_mtx)) { 13280 panic("Leaving with tcb mtx owned?"); 13281 } 13282 if (mtx_owned(&stcb->tcb_send_mtx)) { 13283 panic("Leaving with tcb send mtx owned?"); 13284 } 13285 } 13286 #endif 13287 #ifdef INVARIANTS 13288 if (inp) { 13289 sctp_validate_no_locks(inp); 13290 } else { 13291 printf("Warning - inp is NULL so cant validate locks\n"); 13292 } 13293 #endif 13294 if (top) { 13295 sctp_m_freem(top); 13296 } 13297 if (control) { 13298 sctp_m_freem(control); 13299 } 13300 return (error); 13301 } 13302 13303 13304 /* 13305 * generate an AUTHentication chunk, if required 13306 */ 13307 struct mbuf * 13308 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 13309 struct sctp_auth_chunk **auth_ret, uint32_t * offset, 13310 struct sctp_tcb *stcb, uint8_t chunk) 13311 { 13312 struct mbuf *m_auth; 13313 struct sctp_auth_chunk *auth; 13314 int chunk_len; 13315 13316 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 13317 (stcb == NULL)) 13318 return (m); 13319 13320 /* sysctl disabled auth? */ 13321 if (SCTP_BASE_SYSCTL(sctp_auth_disable)) 13322 return (m); 13323 13324 /* peer doesn't do auth... */ 13325 if (!stcb->asoc.peer_supports_auth) { 13326 return (m); 13327 } 13328 /* does the requested chunk require auth? */ 13329 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 13330 return (m); 13331 } 13332 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER); 13333 if (m_auth == NULL) { 13334 /* no mbuf's */ 13335 return (m); 13336 } 13337 /* reserve some space if this will be the first mbuf */ 13338 if (m == NULL) 13339 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 13340 /* fill in the AUTH chunk details */ 13341 auth = mtod(m_auth, struct sctp_auth_chunk *); 13342 bzero(auth, sizeof(*auth)); 13343 auth->ch.chunk_type = SCTP_AUTHENTICATION; 13344 auth->ch.chunk_flags = 0; 13345 chunk_len = sizeof(*auth) + 13346 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 13347 auth->ch.chunk_length = htons(chunk_len); 13348 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 13349 /* key id and hmac digest will be computed and filled in upon send */ 13350 13351 /* save the offset where the auth was inserted into the chain */ 13352 if (m != NULL) { 13353 struct mbuf *cn; 13354 13355 *offset = 0; 13356 cn = m; 13357 while (cn) { 13358 *offset += SCTP_BUF_LEN(cn); 13359 cn = SCTP_BUF_NEXT(cn); 13360 } 13361 } else 13362 *offset = 0; 13363 13364 /* update length and return pointer to the auth chunk */ 13365 SCTP_BUF_LEN(m_auth) = chunk_len; 13366 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 13367 if (auth_ret != NULL) 13368 *auth_ret = auth; 13369 13370 return (m); 13371 } 13372 13373 #ifdef INET6 13374 int 13375 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro) 13376 { 13377 struct nd_prefix *pfx = NULL; 13378 struct nd_pfxrouter *pfxrtr = NULL; 13379 struct sockaddr_in6 gw6; 13380 13381 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6) 13382 return (0); 13383 13384 /* get prefix entry of address */ 13385 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) { 13386 if (pfx->ndpr_stateflags & NDPRF_DETACHED) 13387 continue; 13388 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr, 13389 &src6->sin6_addr, &pfx->ndpr_mask)) 13390 break; 13391 } 13392 /* no prefix entry in the prefix list */ 13393 if (pfx == NULL) { 13394 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for "); 13395 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 13396 return (0); 13397 } 13398 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is "); 13399 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 13400 13401 /* search installed gateway from prefix entry */ 13402 for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr = 13403 pfxrtr->pfr_next) { 13404 memset(&gw6, 0, sizeof(struct sockaddr_in6)); 13405 gw6.sin6_family = AF_INET6; 13406 gw6.sin6_len = sizeof(struct sockaddr_in6); 13407 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr, 13408 sizeof(struct in6_addr)); 13409 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is "); 13410 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6); 13411 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is "); 13412 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 13413 if (sctp_cmpaddr((struct sockaddr *)&gw6, 13414 ro->ro_rt->rt_gateway)) { 13415 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n"); 13416 return (1); 13417 } 13418 } 13419 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n"); 13420 return (0); 13421 } 13422 13423 #endif 13424 13425 int 13426 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro) 13427 { 13428 struct sockaddr_in *sin, *mask; 13429 struct ifaddr *ifa; 13430 struct in_addr srcnetaddr, gwnetaddr; 13431 13432 if (ro == NULL || ro->ro_rt == NULL || 13433 sifa->address.sa.sa_family != AF_INET) { 13434 return (0); 13435 } 13436 ifa = (struct ifaddr *)sifa->ifa; 13437 mask = (struct sockaddr_in *)(ifa->ifa_netmask); 13438 sin = (struct sockaddr_in *)&sifa->address.sin; 13439 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 13440 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is "); 13441 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 13442 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr); 13443 13444 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway; 13445 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 13446 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is "); 13447 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 13448 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr); 13449 if (srcnetaddr.s_addr == gwnetaddr.s_addr) { 13450 return (1); 13451 } 13452 return (0); 13453 } 13454