1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* SCTP kernel implementation 3 * (C) Copyright Red Hat Inc. 2017 4 * 5 * This file is part of the SCTP kernel implementation 6 * 7 * These functions implement sctp stream message interleaving, mostly 8 * including I-DATA and I-FORWARD-TSN chunks process. 9 * 10 * Please send any bug reports or fixes you make to the 11 * email addresched(es): 12 * lksctp developers <linux-sctp@vger.kernel.org> 13 * 14 * Written or modified by: 15 * Xin Long <lucien.xin@gmail.com> 16 */ 17 18 #include <net/busy_poll.h> 19 #include <net/sctp/sctp.h> 20 #include <net/sctp/sm.h> 21 #include <net/sctp/ulpevent.h> 22 #include <linux/sctp.h> 23 24 static struct sctp_chunk *sctp_make_idatafrag_empty( 25 const struct sctp_association *asoc, 26 const struct sctp_sndrcvinfo *sinfo, 27 int len, __u8 flags, gfp_t gfp) 28 { 29 struct sctp_chunk *retval; 30 struct sctp_idatahdr dp; 31 32 memset(&dp, 0, sizeof(dp)); 33 dp.stream = htons(sinfo->sinfo_stream); 34 35 if (sinfo->sinfo_flags & SCTP_UNORDERED) 36 flags |= SCTP_DATA_UNORDERED; 37 38 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp); 39 if (!retval) 40 return NULL; 41 42 retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); 43 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); 44 45 return retval; 46 } 47 48 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk) 49 { 50 struct sctp_stream *stream; 51 struct sctp_chunk *lchunk; 52 __u32 cfsn = 0; 53 __u16 sid; 54 55 if (chunk->has_mid) 56 return; 57 58 sid = sctp_chunk_stream_no(chunk); 59 stream = &chunk->asoc->stream; 60 61 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) { 62 struct sctp_idatahdr *hdr; 63 __u32 mid; 64 65 lchunk->has_mid = 1; 66 67 hdr = lchunk->subh.idata_hdr; 68 69 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) 70 hdr->ppid = lchunk->sinfo.sinfo_ppid; 71 else 72 hdr->fsn = htonl(cfsn++); 73 74 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 75 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ? 76 sctp_mid_uo_next(stream, out, sid) : 77 sctp_mid_uo_peek(stream, out, sid); 78 } else { 79 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ? 80 sctp_mid_next(stream, out, sid) : 81 sctp_mid_peek(stream, out, sid); 82 } 83 hdr->mid = htonl(mid); 84 } 85 } 86 87 static bool sctp_validate_data(struct sctp_chunk *chunk) 88 { 89 struct sctp_stream *stream; 90 __u16 sid, ssn; 91 92 if (chunk->chunk_hdr->type != SCTP_CID_DATA) 93 return false; 94 95 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 96 return true; 97 98 stream = &chunk->asoc->stream; 99 sid = sctp_chunk_stream_no(chunk); 100 ssn = ntohs(chunk->subh.data_hdr->ssn); 101 102 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)); 103 } 104 105 static bool sctp_validate_idata(struct sctp_chunk *chunk) 106 { 107 struct sctp_stream *stream; 108 __u32 mid; 109 __u16 sid; 110 111 if (chunk->chunk_hdr->type != SCTP_CID_I_DATA) 112 return false; 113 114 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 115 return true; 116 117 stream = &chunk->asoc->stream; 118 sid = sctp_chunk_stream_no(chunk); 119 mid = ntohl(chunk->subh.idata_hdr->mid); 120 121 return !MID_lt(mid, sctp_mid_peek(stream, in, sid)); 122 } 123 124 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq, 125 struct sctp_ulpevent *event) 126 { 127 struct sctp_ulpevent *cevent; 128 struct sk_buff *pos, *loc; 129 130 pos = skb_peek_tail(&ulpq->reasm); 131 if (!pos) { 132 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); 133 return; 134 } 135 136 cevent = sctp_skb2event(pos); 137 138 if (event->stream == cevent->stream && 139 event->mid == cevent->mid && 140 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG || 141 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && 142 event->fsn > cevent->fsn))) { 143 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); 144 return; 145 } 146 147 if ((event->stream == cevent->stream && 148 MID_lt(cevent->mid, event->mid)) || 149 event->stream > cevent->stream) { 150 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); 151 return; 152 } 153 154 loc = NULL; 155 skb_queue_walk(&ulpq->reasm, pos) { 156 cevent = sctp_skb2event(pos); 157 158 if (event->stream < cevent->stream || 159 (event->stream == cevent->stream && 160 MID_lt(event->mid, cevent->mid))) { 161 loc = pos; 162 break; 163 } 164 if (event->stream == cevent->stream && 165 event->mid == cevent->mid && 166 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) && 167 (event->msg_flags & SCTP_DATA_FIRST_FRAG || 168 event->fsn < cevent->fsn)) { 169 loc = pos; 170 break; 171 } 172 } 173 174 if (!loc) 175 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); 176 else 177 __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event)); 178 } 179 180 static struct sctp_ulpevent *sctp_intl_retrieve_partial( 181 struct sctp_ulpq *ulpq, 182 struct sctp_ulpevent *event) 183 { 184 struct sk_buff *first_frag = NULL; 185 struct sk_buff *last_frag = NULL; 186 struct sctp_ulpevent *retval; 187 struct sctp_stream_in *sin; 188 struct sk_buff *pos; 189 __u32 next_fsn = 0; 190 int is_last = 0; 191 192 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 193 194 skb_queue_walk(&ulpq->reasm, pos) { 195 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 196 197 if (cevent->stream < event->stream) 198 continue; 199 200 if (cevent->stream > event->stream || 201 cevent->mid != sin->mid) 202 break; 203 204 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 205 case SCTP_DATA_FIRST_FRAG: 206 goto out; 207 case SCTP_DATA_MIDDLE_FRAG: 208 if (!first_frag) { 209 if (cevent->fsn == sin->fsn) { 210 first_frag = pos; 211 last_frag = pos; 212 next_fsn = cevent->fsn + 1; 213 } 214 } else if (cevent->fsn == next_fsn) { 215 last_frag = pos; 216 next_fsn++; 217 } else { 218 goto out; 219 } 220 break; 221 case SCTP_DATA_LAST_FRAG: 222 if (!first_frag) { 223 if (cevent->fsn == sin->fsn) { 224 first_frag = pos; 225 last_frag = pos; 226 next_fsn = 0; 227 is_last = 1; 228 } 229 } else if (cevent->fsn == next_fsn) { 230 last_frag = pos; 231 next_fsn = 0; 232 is_last = 1; 233 } 234 goto out; 235 default: 236 goto out; 237 } 238 } 239 240 out: 241 if (!first_frag) 242 return NULL; 243 244 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 245 &ulpq->reasm, first_frag, 246 last_frag); 247 if (retval) { 248 sin->fsn = next_fsn; 249 if (is_last) { 250 retval->msg_flags |= MSG_EOR; 251 sin->pd_mode = 0; 252 } 253 } 254 255 return retval; 256 } 257 258 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled( 259 struct sctp_ulpq *ulpq, 260 struct sctp_ulpevent *event) 261 { 262 struct sctp_association *asoc = ulpq->asoc; 263 struct sk_buff *pos, *first_frag = NULL; 264 struct sctp_ulpevent *retval = NULL; 265 struct sk_buff *pd_first = NULL; 266 struct sk_buff *pd_last = NULL; 267 struct sctp_stream_in *sin; 268 __u32 next_fsn = 0; 269 __u32 pd_point = 0; 270 __u32 pd_len = 0; 271 __u32 mid = 0; 272 273 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 274 275 skb_queue_walk(&ulpq->reasm, pos) { 276 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 277 278 if (cevent->stream < event->stream) 279 continue; 280 if (cevent->stream > event->stream) 281 break; 282 283 if (MID_lt(cevent->mid, event->mid)) 284 continue; 285 if (MID_lt(event->mid, cevent->mid)) 286 break; 287 288 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 289 case SCTP_DATA_FIRST_FRAG: 290 if (cevent->mid == sin->mid) { 291 pd_first = pos; 292 pd_last = pos; 293 pd_len = pos->len; 294 } 295 296 first_frag = pos; 297 next_fsn = 0; 298 mid = cevent->mid; 299 break; 300 301 case SCTP_DATA_MIDDLE_FRAG: 302 if (first_frag && cevent->mid == mid && 303 cevent->fsn == next_fsn) { 304 next_fsn++; 305 if (pd_first) { 306 pd_last = pos; 307 pd_len += pos->len; 308 } 309 } else { 310 first_frag = NULL; 311 } 312 break; 313 314 case SCTP_DATA_LAST_FRAG: 315 if (first_frag && cevent->mid == mid && 316 cevent->fsn == next_fsn) 317 goto found; 318 else 319 first_frag = NULL; 320 break; 321 } 322 } 323 324 if (!pd_first) 325 goto out; 326 327 pd_point = sctp_sk(asoc->base.sk)->pd_point; 328 if (pd_point && pd_point <= pd_len) { 329 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 330 &ulpq->reasm, 331 pd_first, pd_last); 332 if (retval) { 333 sin->fsn = next_fsn; 334 sin->pd_mode = 1; 335 } 336 } 337 goto out; 338 339 found: 340 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 341 &ulpq->reasm, 342 first_frag, pos); 343 if (retval) 344 retval->msg_flags |= MSG_EOR; 345 346 out: 347 return retval; 348 } 349 350 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq, 351 struct sctp_ulpevent *event) 352 { 353 struct sctp_ulpevent *retval = NULL; 354 struct sctp_stream_in *sin; 355 356 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { 357 event->msg_flags |= MSG_EOR; 358 return event; 359 } 360 361 sctp_intl_store_reasm(ulpq, event); 362 363 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 364 if (sin->pd_mode && event->mid == sin->mid && 365 event->fsn == sin->fsn) 366 retval = sctp_intl_retrieve_partial(ulpq, event); 367 368 if (!retval) 369 retval = sctp_intl_retrieve_reassembled(ulpq, event); 370 371 return retval; 372 } 373 374 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq, 375 struct sctp_ulpevent *event) 376 { 377 struct sctp_ulpevent *cevent; 378 struct sk_buff *pos, *loc; 379 380 pos = skb_peek_tail(&ulpq->lobby); 381 if (!pos) { 382 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); 383 return; 384 } 385 386 cevent = (struct sctp_ulpevent *)pos->cb; 387 if (event->stream == cevent->stream && 388 MID_lt(cevent->mid, event->mid)) { 389 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); 390 return; 391 } 392 393 if (event->stream > cevent->stream) { 394 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); 395 return; 396 } 397 398 loc = NULL; 399 skb_queue_walk(&ulpq->lobby, pos) { 400 cevent = (struct sctp_ulpevent *)pos->cb; 401 402 if (cevent->stream > event->stream) { 403 loc = pos; 404 break; 405 } 406 if (cevent->stream == event->stream && 407 MID_lt(event->mid, cevent->mid)) { 408 loc = pos; 409 break; 410 } 411 } 412 413 if (!loc) 414 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); 415 else 416 __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event)); 417 } 418 419 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq, 420 struct sctp_ulpevent *event) 421 { 422 struct sk_buff_head *event_list; 423 struct sctp_stream *stream; 424 struct sk_buff *pos, *tmp; 425 __u16 sid = event->stream; 426 427 stream = &ulpq->asoc->stream; 428 event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev; 429 430 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 431 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb; 432 433 if (cevent->stream > sid) 434 break; 435 436 if (cevent->stream < sid) 437 continue; 438 439 if (cevent->mid != sctp_mid_peek(stream, in, sid)) 440 break; 441 442 sctp_mid_next(stream, in, sid); 443 444 __skb_unlink(pos, &ulpq->lobby); 445 446 __skb_queue_tail(event_list, pos); 447 } 448 } 449 450 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq, 451 struct sctp_ulpevent *event) 452 { 453 struct sctp_stream *stream; 454 __u16 sid; 455 456 stream = &ulpq->asoc->stream; 457 sid = event->stream; 458 459 if (event->mid != sctp_mid_peek(stream, in, sid)) { 460 sctp_intl_store_ordered(ulpq, event); 461 return NULL; 462 } 463 464 sctp_mid_next(stream, in, sid); 465 466 sctp_intl_retrieve_ordered(ulpq, event); 467 468 return event; 469 } 470 471 static int sctp_enqueue_event(struct sctp_ulpq *ulpq, 472 struct sk_buff_head *skb_list) 473 { 474 struct sock *sk = ulpq->asoc->base.sk; 475 struct sctp_sock *sp = sctp_sk(sk); 476 struct sctp_ulpevent *event; 477 struct sk_buff *skb; 478 479 skb = __skb_peek(skb_list); 480 event = sctp_skb2event(skb); 481 482 if (sk->sk_shutdown & RCV_SHUTDOWN && 483 (sk->sk_shutdown & SEND_SHUTDOWN || 484 !sctp_ulpevent_is_notification(event))) 485 goto out_free; 486 487 if (!sctp_ulpevent_is_notification(event)) { 488 sk_mark_napi_id(sk, skb); 489 sk_incoming_cpu_update(sk); 490 } 491 492 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) 493 goto out_free; 494 495 if (skb_list) 496 skb_queue_splice_tail_init(skb_list, 497 &sk->sk_receive_queue); 498 else 499 __skb_queue_tail(&sk->sk_receive_queue, skb); 500 501 if (!sp->data_ready_signalled) { 502 sp->data_ready_signalled = 1; 503 sk->sk_data_ready(sk); 504 } 505 506 return 1; 507 508 out_free: 509 if (skb_list) 510 sctp_queue_purge_ulpevents(skb_list); 511 else 512 sctp_ulpevent_free(event); 513 514 return 0; 515 } 516 517 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq, 518 struct sctp_ulpevent *event) 519 { 520 struct sctp_ulpevent *cevent; 521 struct sk_buff *pos; 522 523 pos = skb_peek_tail(&ulpq->reasm_uo); 524 if (!pos) { 525 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); 526 return; 527 } 528 529 cevent = sctp_skb2event(pos); 530 531 if (event->stream == cevent->stream && 532 event->mid == cevent->mid && 533 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG || 534 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && 535 event->fsn > cevent->fsn))) { 536 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); 537 return; 538 } 539 540 if ((event->stream == cevent->stream && 541 MID_lt(cevent->mid, event->mid)) || 542 event->stream > cevent->stream) { 543 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); 544 return; 545 } 546 547 skb_queue_walk(&ulpq->reasm_uo, pos) { 548 cevent = sctp_skb2event(pos); 549 550 if (event->stream < cevent->stream || 551 (event->stream == cevent->stream && 552 MID_lt(event->mid, cevent->mid))) 553 break; 554 555 if (event->stream == cevent->stream && 556 event->mid == cevent->mid && 557 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) && 558 (event->msg_flags & SCTP_DATA_FIRST_FRAG || 559 event->fsn < cevent->fsn)) 560 break; 561 } 562 563 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event)); 564 } 565 566 static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo( 567 struct sctp_ulpq *ulpq, 568 struct sctp_ulpevent *event) 569 { 570 struct sk_buff *first_frag = NULL; 571 struct sk_buff *last_frag = NULL; 572 struct sctp_ulpevent *retval; 573 struct sctp_stream_in *sin; 574 struct sk_buff *pos; 575 __u32 next_fsn = 0; 576 int is_last = 0; 577 578 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 579 580 skb_queue_walk(&ulpq->reasm_uo, pos) { 581 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 582 583 if (cevent->stream < event->stream) 584 continue; 585 if (cevent->stream > event->stream) 586 break; 587 588 if (MID_lt(cevent->mid, sin->mid_uo)) 589 continue; 590 if (MID_lt(sin->mid_uo, cevent->mid)) 591 break; 592 593 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 594 case SCTP_DATA_FIRST_FRAG: 595 goto out; 596 case SCTP_DATA_MIDDLE_FRAG: 597 if (!first_frag) { 598 if (cevent->fsn == sin->fsn_uo) { 599 first_frag = pos; 600 last_frag = pos; 601 next_fsn = cevent->fsn + 1; 602 } 603 } else if (cevent->fsn == next_fsn) { 604 last_frag = pos; 605 next_fsn++; 606 } else { 607 goto out; 608 } 609 break; 610 case SCTP_DATA_LAST_FRAG: 611 if (!first_frag) { 612 if (cevent->fsn == sin->fsn_uo) { 613 first_frag = pos; 614 last_frag = pos; 615 next_fsn = 0; 616 is_last = 1; 617 } 618 } else if (cevent->fsn == next_fsn) { 619 last_frag = pos; 620 next_fsn = 0; 621 is_last = 1; 622 } 623 goto out; 624 default: 625 goto out; 626 } 627 } 628 629 out: 630 if (!first_frag) 631 return NULL; 632 633 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 634 &ulpq->reasm_uo, first_frag, 635 last_frag); 636 if (retval) { 637 sin->fsn_uo = next_fsn; 638 if (is_last) { 639 retval->msg_flags |= MSG_EOR; 640 sin->pd_mode_uo = 0; 641 } 642 } 643 644 return retval; 645 } 646 647 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo( 648 struct sctp_ulpq *ulpq, 649 struct sctp_ulpevent *event) 650 { 651 struct sctp_association *asoc = ulpq->asoc; 652 struct sk_buff *pos, *first_frag = NULL; 653 struct sctp_ulpevent *retval = NULL; 654 struct sk_buff *pd_first = NULL; 655 struct sk_buff *pd_last = NULL; 656 struct sctp_stream_in *sin; 657 __u32 next_fsn = 0; 658 __u32 pd_point = 0; 659 __u32 pd_len = 0; 660 __u32 mid = 0; 661 662 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 663 664 skb_queue_walk(&ulpq->reasm_uo, pos) { 665 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 666 667 if (cevent->stream < event->stream) 668 continue; 669 if (cevent->stream > event->stream) 670 break; 671 672 if (MID_lt(cevent->mid, event->mid)) 673 continue; 674 if (MID_lt(event->mid, cevent->mid)) 675 break; 676 677 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 678 case SCTP_DATA_FIRST_FRAG: 679 if (!sin->pd_mode_uo) { 680 sin->mid_uo = cevent->mid; 681 pd_first = pos; 682 pd_last = pos; 683 pd_len = pos->len; 684 } 685 686 first_frag = pos; 687 next_fsn = 0; 688 mid = cevent->mid; 689 break; 690 691 case SCTP_DATA_MIDDLE_FRAG: 692 if (first_frag && cevent->mid == mid && 693 cevent->fsn == next_fsn) { 694 next_fsn++; 695 if (pd_first) { 696 pd_last = pos; 697 pd_len += pos->len; 698 } 699 } else { 700 first_frag = NULL; 701 } 702 break; 703 704 case SCTP_DATA_LAST_FRAG: 705 if (first_frag && cevent->mid == mid && 706 cevent->fsn == next_fsn) 707 goto found; 708 else 709 first_frag = NULL; 710 break; 711 } 712 } 713 714 if (!pd_first) 715 goto out; 716 717 pd_point = sctp_sk(asoc->base.sk)->pd_point; 718 if (pd_point && pd_point <= pd_len) { 719 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 720 &ulpq->reasm_uo, 721 pd_first, pd_last); 722 if (retval) { 723 sin->fsn_uo = next_fsn; 724 sin->pd_mode_uo = 1; 725 } 726 } 727 goto out; 728 729 found: 730 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 731 &ulpq->reasm_uo, 732 first_frag, pos); 733 if (retval) 734 retval->msg_flags |= MSG_EOR; 735 736 out: 737 return retval; 738 } 739 740 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq, 741 struct sctp_ulpevent *event) 742 { 743 struct sctp_ulpevent *retval = NULL; 744 struct sctp_stream_in *sin; 745 746 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { 747 event->msg_flags |= MSG_EOR; 748 return event; 749 } 750 751 sctp_intl_store_reasm_uo(ulpq, event); 752 753 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); 754 if (sin->pd_mode_uo && event->mid == sin->mid_uo && 755 event->fsn == sin->fsn_uo) 756 retval = sctp_intl_retrieve_partial_uo(ulpq, event); 757 758 if (!retval) 759 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event); 760 761 return retval; 762 } 763 764 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq) 765 { 766 struct sctp_stream_in *csin, *sin = NULL; 767 struct sk_buff *first_frag = NULL; 768 struct sk_buff *last_frag = NULL; 769 struct sctp_ulpevent *retval; 770 struct sk_buff *pos; 771 __u32 next_fsn = 0; 772 __u16 sid = 0; 773 774 skb_queue_walk(&ulpq->reasm_uo, pos) { 775 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 776 777 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream); 778 if (csin->pd_mode_uo) 779 continue; 780 781 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 782 case SCTP_DATA_FIRST_FRAG: 783 if (first_frag) 784 goto out; 785 first_frag = pos; 786 last_frag = pos; 787 next_fsn = 0; 788 sin = csin; 789 sid = cevent->stream; 790 sin->mid_uo = cevent->mid; 791 break; 792 case SCTP_DATA_MIDDLE_FRAG: 793 if (!first_frag) 794 break; 795 if (cevent->stream == sid && 796 cevent->mid == sin->mid_uo && 797 cevent->fsn == next_fsn) { 798 next_fsn++; 799 last_frag = pos; 800 } else { 801 goto out; 802 } 803 break; 804 case SCTP_DATA_LAST_FRAG: 805 if (first_frag) 806 goto out; 807 break; 808 default: 809 break; 810 } 811 } 812 813 if (!first_frag) 814 return NULL; 815 816 out: 817 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 818 &ulpq->reasm_uo, first_frag, 819 last_frag); 820 if (retval) { 821 sin->fsn_uo = next_fsn; 822 sin->pd_mode_uo = 1; 823 } 824 825 return retval; 826 } 827 828 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq, 829 struct sctp_chunk *chunk, gfp_t gfp) 830 { 831 struct sctp_ulpevent *event; 832 struct sk_buff_head temp; 833 int event_eor = 0; 834 835 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); 836 if (!event) 837 return -ENOMEM; 838 839 event->mid = ntohl(chunk->subh.idata_hdr->mid); 840 if (event->msg_flags & SCTP_DATA_FIRST_FRAG) 841 event->ppid = chunk->subh.idata_hdr->ppid; 842 else 843 event->fsn = ntohl(chunk->subh.idata_hdr->fsn); 844 845 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) { 846 event = sctp_intl_reasm(ulpq, event); 847 if (event) { 848 skb_queue_head_init(&temp); 849 __skb_queue_tail(&temp, sctp_event2skb(event)); 850 851 if (event->msg_flags & MSG_EOR) 852 event = sctp_intl_order(ulpq, event); 853 } 854 } else { 855 event = sctp_intl_reasm_uo(ulpq, event); 856 if (event) { 857 skb_queue_head_init(&temp); 858 __skb_queue_tail(&temp, sctp_event2skb(event)); 859 } 860 } 861 862 if (event) { 863 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; 864 sctp_enqueue_event(ulpq, &temp); 865 } 866 867 return event_eor; 868 } 869 870 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq) 871 { 872 struct sctp_stream_in *csin, *sin = NULL; 873 struct sk_buff *first_frag = NULL; 874 struct sk_buff *last_frag = NULL; 875 struct sctp_ulpevent *retval; 876 struct sk_buff *pos; 877 __u32 next_fsn = 0; 878 __u16 sid = 0; 879 880 skb_queue_walk(&ulpq->reasm, pos) { 881 struct sctp_ulpevent *cevent = sctp_skb2event(pos); 882 883 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream); 884 if (csin->pd_mode) 885 continue; 886 887 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 888 case SCTP_DATA_FIRST_FRAG: 889 if (first_frag) 890 goto out; 891 if (cevent->mid == csin->mid) { 892 first_frag = pos; 893 last_frag = pos; 894 next_fsn = 0; 895 sin = csin; 896 sid = cevent->stream; 897 } 898 break; 899 case SCTP_DATA_MIDDLE_FRAG: 900 if (!first_frag) 901 break; 902 if (cevent->stream == sid && 903 cevent->mid == sin->mid && 904 cevent->fsn == next_fsn) { 905 next_fsn++; 906 last_frag = pos; 907 } else { 908 goto out; 909 } 910 break; 911 case SCTP_DATA_LAST_FRAG: 912 if (first_frag) 913 goto out; 914 break; 915 default: 916 break; 917 } 918 } 919 920 if (!first_frag) 921 return NULL; 922 923 out: 924 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 925 &ulpq->reasm, first_frag, 926 last_frag); 927 if (retval) { 928 sin->fsn = next_fsn; 929 sin->pd_mode = 1; 930 } 931 932 return retval; 933 } 934 935 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp) 936 { 937 struct sctp_ulpevent *event; 938 struct sk_buff_head temp; 939 940 if (!skb_queue_empty(&ulpq->reasm)) { 941 do { 942 event = sctp_intl_retrieve_first(ulpq); 943 if (event) { 944 skb_queue_head_init(&temp); 945 __skb_queue_tail(&temp, sctp_event2skb(event)); 946 sctp_enqueue_event(ulpq, &temp); 947 } 948 } while (event); 949 } 950 951 if (!skb_queue_empty(&ulpq->reasm_uo)) { 952 do { 953 event = sctp_intl_retrieve_first_uo(ulpq); 954 if (event) { 955 skb_queue_head_init(&temp); 956 __skb_queue_tail(&temp, sctp_event2skb(event)); 957 sctp_enqueue_event(ulpq, &temp); 958 } 959 } while (event); 960 } 961 } 962 963 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 964 gfp_t gfp) 965 { 966 struct sctp_association *asoc = ulpq->asoc; 967 __u32 freed = 0; 968 __u16 needed; 969 970 needed = ntohs(chunk->chunk_hdr->length) - 971 sizeof(struct sctp_idata_chunk); 972 973 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { 974 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); 975 if (freed < needed) 976 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm, 977 needed); 978 if (freed < needed) 979 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo, 980 needed); 981 } 982 983 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) 984 sctp_intl_start_pd(ulpq, gfp); 985 986 sk_mem_reclaim(asoc->base.sk); 987 } 988 989 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid, 990 __u32 mid, __u16 flags, gfp_t gfp) 991 { 992 struct sock *sk = ulpq->asoc->base.sk; 993 struct sctp_ulpevent *ev = NULL; 994 995 if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe, 996 SCTP_PARTIAL_DELIVERY_EVENT)) 997 return; 998 999 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED, 1000 sid, mid, flags, gfp); 1001 if (ev) { 1002 struct sctp_sock *sp = sctp_sk(sk); 1003 1004 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); 1005 1006 if (!sp->data_ready_signalled) { 1007 sp->data_ready_signalled = 1; 1008 sk->sk_data_ready(sk); 1009 } 1010 } 1011 } 1012 1013 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) 1014 { 1015 struct sctp_stream *stream = &ulpq->asoc->stream; 1016 struct sctp_ulpevent *cevent, *event = NULL; 1017 struct sk_buff_head *lobby = &ulpq->lobby; 1018 struct sk_buff *pos, *tmp; 1019 struct sk_buff_head temp; 1020 __u16 csid; 1021 __u32 cmid; 1022 1023 skb_queue_head_init(&temp); 1024 sctp_skb_for_each(pos, lobby, tmp) { 1025 cevent = (struct sctp_ulpevent *)pos->cb; 1026 csid = cevent->stream; 1027 cmid = cevent->mid; 1028 1029 if (csid > sid) 1030 break; 1031 1032 if (csid < sid) 1033 continue; 1034 1035 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid))) 1036 break; 1037 1038 __skb_unlink(pos, lobby); 1039 if (!event) 1040 event = sctp_skb2event(pos); 1041 1042 __skb_queue_tail(&temp, pos); 1043 } 1044 1045 if (!event && pos != (struct sk_buff *)lobby) { 1046 cevent = (struct sctp_ulpevent *)pos->cb; 1047 csid = cevent->stream; 1048 cmid = cevent->mid; 1049 1050 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) { 1051 sctp_mid_next(stream, in, csid); 1052 __skb_unlink(pos, lobby); 1053 __skb_queue_tail(&temp, pos); 1054 event = sctp_skb2event(pos); 1055 } 1056 } 1057 1058 if (event) { 1059 sctp_intl_retrieve_ordered(ulpq, event); 1060 sctp_enqueue_event(ulpq, &temp); 1061 } 1062 } 1063 1064 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) 1065 { 1066 struct sctp_stream *stream = &ulpq->asoc->stream; 1067 __u16 sid; 1068 1069 for (sid = 0; sid < stream->incnt; sid++) { 1070 struct sctp_stream_in *sin = SCTP_SI(stream, sid); 1071 __u32 mid; 1072 1073 if (sin->pd_mode_uo) { 1074 sin->pd_mode_uo = 0; 1075 1076 mid = sin->mid_uo; 1077 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp); 1078 } 1079 1080 if (sin->pd_mode) { 1081 sin->pd_mode = 0; 1082 1083 mid = sin->mid; 1084 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp); 1085 sctp_mid_skip(stream, in, sid, mid); 1086 1087 sctp_intl_reap_ordered(ulpq, sid); 1088 } 1089 } 1090 1091 /* intl abort pd happens only when all data needs to be cleaned */ 1092 sctp_ulpq_flush(ulpq); 1093 } 1094 1095 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist, 1096 int nskips, __be16 stream, __u8 flags) 1097 { 1098 int i; 1099 1100 for (i = 0; i < nskips; i++) 1101 if (skiplist[i].stream == stream && 1102 skiplist[i].flags == flags) 1103 return i; 1104 1105 return i; 1106 } 1107 1108 #define SCTP_FTSN_U_BIT 0x1 1109 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn) 1110 { 1111 struct sctp_ifwdtsn_skip ftsn_skip_arr[10]; 1112 struct sctp_association *asoc = q->asoc; 1113 struct sctp_chunk *ftsn_chunk = NULL; 1114 struct list_head *lchunk, *temp; 1115 int nskips = 0, skip_pos; 1116 struct sctp_chunk *chunk; 1117 __u32 tsn; 1118 1119 if (!asoc->peer.prsctp_capable) 1120 return; 1121 1122 if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) 1123 asoc->adv_peer_ack_point = ctsn; 1124 1125 list_for_each_safe(lchunk, temp, &q->abandoned) { 1126 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); 1127 tsn = ntohl(chunk->subh.data_hdr->tsn); 1128 1129 if (TSN_lte(tsn, ctsn)) { 1130 list_del_init(lchunk); 1131 sctp_chunk_free(chunk); 1132 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) { 1133 __be16 sid = chunk->subh.idata_hdr->stream; 1134 __be32 mid = chunk->subh.idata_hdr->mid; 1135 __u8 flags = 0; 1136 1137 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 1138 flags |= SCTP_FTSN_U_BIT; 1139 1140 asoc->adv_peer_ack_point = tsn; 1141 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips, 1142 sid, flags); 1143 ftsn_skip_arr[skip_pos].stream = sid; 1144 ftsn_skip_arr[skip_pos].reserved = 0; 1145 ftsn_skip_arr[skip_pos].flags = flags; 1146 ftsn_skip_arr[skip_pos].mid = mid; 1147 if (skip_pos == nskips) 1148 nskips++; 1149 if (nskips == 10) 1150 break; 1151 } else { 1152 break; 1153 } 1154 } 1155 1156 if (asoc->adv_peer_ack_point > ctsn) 1157 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point, 1158 nskips, &ftsn_skip_arr[0]); 1159 1160 if (ftsn_chunk) { 1161 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1162 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); 1163 } 1164 } 1165 1166 #define _sctp_walk_ifwdtsn(pos, chunk, end) \ 1167 for (pos = chunk->subh.ifwdtsn_hdr->skip; \ 1168 (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++) 1169 1170 #define sctp_walk_ifwdtsn(pos, ch) \ 1171 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \ 1172 sizeof(struct sctp_ifwdtsn_chunk)) 1173 1174 static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk) 1175 { 1176 struct sctp_fwdtsn_skip *skip; 1177 __u16 incnt; 1178 1179 if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN) 1180 return false; 1181 1182 incnt = chunk->asoc->stream.incnt; 1183 sctp_walk_fwdtsn(skip, chunk) 1184 if (ntohs(skip->stream) >= incnt) 1185 return false; 1186 1187 return true; 1188 } 1189 1190 static bool sctp_validate_iftsn(struct sctp_chunk *chunk) 1191 { 1192 struct sctp_ifwdtsn_skip *skip; 1193 __u16 incnt; 1194 1195 if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN) 1196 return false; 1197 1198 incnt = chunk->asoc->stream.incnt; 1199 sctp_walk_ifwdtsn(skip, chunk) 1200 if (ntohs(skip->stream) >= incnt) 1201 return false; 1202 1203 return true; 1204 } 1205 1206 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1207 { 1208 /* Move the Cumulattive TSN Ack ahead. */ 1209 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn); 1210 /* purge the fragmentation queue */ 1211 sctp_ulpq_reasm_flushtsn(ulpq, ftsn); 1212 /* Abort any in progress partial delivery. */ 1213 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC); 1214 } 1215 1216 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1217 { 1218 struct sk_buff *pos, *tmp; 1219 1220 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { 1221 struct sctp_ulpevent *event = sctp_skb2event(pos); 1222 __u32 tsn = event->tsn; 1223 1224 if (TSN_lte(tsn, ftsn)) { 1225 __skb_unlink(pos, &ulpq->reasm); 1226 sctp_ulpevent_free(event); 1227 } 1228 } 1229 1230 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) { 1231 struct sctp_ulpevent *event = sctp_skb2event(pos); 1232 __u32 tsn = event->tsn; 1233 1234 if (TSN_lte(tsn, ftsn)) { 1235 __skb_unlink(pos, &ulpq->reasm_uo); 1236 sctp_ulpevent_free(event); 1237 } 1238 } 1239 } 1240 1241 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1242 { 1243 /* Move the Cumulattive TSN Ack ahead. */ 1244 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn); 1245 /* purge the fragmentation queue */ 1246 sctp_intl_reasm_flushtsn(ulpq, ftsn); 1247 /* abort only when it's for all data */ 1248 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map)) 1249 sctp_intl_abort_pd(ulpq, GFP_ATOMIC); 1250 } 1251 1252 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) 1253 { 1254 struct sctp_fwdtsn_skip *skip; 1255 1256 /* Walk through all the skipped SSNs */ 1257 sctp_walk_fwdtsn(skip, chunk) 1258 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); 1259 } 1260 1261 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid, 1262 __u8 flags) 1263 { 1264 struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid); 1265 struct sctp_stream *stream = &ulpq->asoc->stream; 1266 1267 if (flags & SCTP_FTSN_U_BIT) { 1268 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) { 1269 sin->pd_mode_uo = 0; 1270 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, 1271 GFP_ATOMIC); 1272 } 1273 return; 1274 } 1275 1276 if (MID_lt(mid, sctp_mid_peek(stream, in, sid))) 1277 return; 1278 1279 if (sin->pd_mode) { 1280 sin->pd_mode = 0; 1281 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC); 1282 } 1283 1284 sctp_mid_skip(stream, in, sid, mid); 1285 1286 sctp_intl_reap_ordered(ulpq, sid); 1287 } 1288 1289 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) 1290 { 1291 struct sctp_ifwdtsn_skip *skip; 1292 1293 /* Walk through all the skipped MIDs and abort stream pd if possible */ 1294 sctp_walk_ifwdtsn(skip, chunk) 1295 sctp_intl_skip(ulpq, ntohs(skip->stream), 1296 ntohl(skip->mid), skip->flags); 1297 } 1298 1299 static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) 1300 { 1301 struct sk_buff_head temp; 1302 1303 skb_queue_head_init(&temp); 1304 __skb_queue_tail(&temp, sctp_event2skb(event)); 1305 return sctp_ulpq_tail_event(ulpq, &temp); 1306 } 1307 1308 static struct sctp_stream_interleave sctp_stream_interleave_0 = { 1309 .data_chunk_len = sizeof(struct sctp_data_chunk), 1310 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk), 1311 /* DATA process functions */ 1312 .make_datafrag = sctp_make_datafrag_empty, 1313 .assign_number = sctp_chunk_assign_ssn, 1314 .validate_data = sctp_validate_data, 1315 .ulpevent_data = sctp_ulpq_tail_data, 1316 .enqueue_event = do_ulpq_tail_event, 1317 .renege_events = sctp_ulpq_renege, 1318 .start_pd = sctp_ulpq_partial_delivery, 1319 .abort_pd = sctp_ulpq_abort_pd, 1320 /* FORWARD-TSN process functions */ 1321 .generate_ftsn = sctp_generate_fwdtsn, 1322 .validate_ftsn = sctp_validate_fwdtsn, 1323 .report_ftsn = sctp_report_fwdtsn, 1324 .handle_ftsn = sctp_handle_fwdtsn, 1325 }; 1326 1327 static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq, 1328 struct sctp_ulpevent *event) 1329 { 1330 struct sk_buff_head temp; 1331 1332 skb_queue_head_init(&temp); 1333 __skb_queue_tail(&temp, sctp_event2skb(event)); 1334 return sctp_enqueue_event(ulpq, &temp); 1335 } 1336 1337 static struct sctp_stream_interleave sctp_stream_interleave_1 = { 1338 .data_chunk_len = sizeof(struct sctp_idata_chunk), 1339 .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk), 1340 /* I-DATA process functions */ 1341 .make_datafrag = sctp_make_idatafrag_empty, 1342 .assign_number = sctp_chunk_assign_mid, 1343 .validate_data = sctp_validate_idata, 1344 .ulpevent_data = sctp_ulpevent_idata, 1345 .enqueue_event = do_sctp_enqueue_event, 1346 .renege_events = sctp_renege_events, 1347 .start_pd = sctp_intl_start_pd, 1348 .abort_pd = sctp_intl_abort_pd, 1349 /* I-FORWARD-TSN process functions */ 1350 .generate_ftsn = sctp_generate_iftsn, 1351 .validate_ftsn = sctp_validate_iftsn, 1352 .report_ftsn = sctp_report_iftsn, 1353 .handle_ftsn = sctp_handle_iftsn, 1354 }; 1355 1356 void sctp_stream_interleave_init(struct sctp_stream *stream) 1357 { 1358 struct sctp_association *asoc; 1359 1360 asoc = container_of(stream, struct sctp_association, stream); 1361 stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1 1362 : &sctp_stream_interleave_0; 1363 } 1364