1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 48 #define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \ 49 if ((compare_with_wrap(tsn, mapping_tsn, MAX_TSN)) || \ 50 (tsn == mapping_tsn)) { \ 51 gap = tsn - mapping_tsn; \ 52 } else { \ 53 gap = (MAX_TSN - mapping_tsn) + tsn + 1; \ 54 } \ 55 } while(0) 56 57 #define SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc) do { \ 58 if (asoc->mapping_array_base_tsn == asoc->nr_mapping_array_base_tsn) { \ 59 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, nr_gap); \ 60 } else {\ 61 int lgap; \ 62 SCTP_CALC_TSN_TO_GAP(lgap, tsn, asoc->mapping_array_base_tsn); \ 63 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, lgap); \ 64 } \ 65 } while(0) 66 67 /* 68 * NOTES: On the outbound side of things I need to check the sack timer to 69 * see if I should generate a sack into the chunk queue (if I have data to 70 * send that is and will be sending it .. for bundling. 71 * 72 * The callback in sctp_usrreq.c will get called when the socket is read from. 73 * This will cause sctp_service_queues() to get called on the top entry in 74 * the list. 75 */ 76 77 void 78 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 79 { 80 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 81 } 82 83 /* Calculate what the rwnd would be */ 84 uint32_t 85 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 86 { 87 uint32_t calc = 0; 88 89 /* 90 * This is really set wrong with respect to a 1-2-m socket. Since 91 * the sb_cc is the count that everyone as put up. When we re-write 92 * sctp_soreceive then we will fix this so that ONLY this 93 * associations data is taken into account. 94 */ 95 if (stcb->sctp_socket == NULL) 96 return (calc); 97 98 if (stcb->asoc.sb_cc == 0 && 99 asoc->size_on_reasm_queue == 0 && 100 asoc->size_on_all_streams == 0) { 101 /* Full rwnd granted */ 102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 103 return (calc); 104 } 105 /* get actual space */ 106 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 107 108 /* 109 * take out what has NOT been put on socket queue and we yet hold 110 * for putting up. 111 */ 112 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 113 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 114 115 if (calc == 0) { 116 /* out of space */ 117 return (calc); 118 } 119 /* what is the overhead of all these rwnd's */ 120 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 121 /* 122 * If the window gets too small due to ctrl-stuff, reduce it to 1, 123 * even it is 0. SWS engaged 124 */ 125 if (calc < stcb->asoc.my_rwnd_control_len) { 126 calc = 1; 127 } 128 return (calc); 129 } 130 131 132 133 /* 134 * Build out our readq entry based on the incoming packet. 135 */ 136 struct sctp_queued_to_read * 137 sctp_build_readq_entry(struct sctp_tcb *stcb, 138 struct sctp_nets *net, 139 uint32_t tsn, uint32_t ppid, 140 uint32_t context, uint16_t stream_no, 141 uint16_t stream_seq, uint8_t flags, 142 struct mbuf *dm) 143 { 144 struct sctp_queued_to_read *read_queue_e = NULL; 145 146 sctp_alloc_a_readq(stcb, read_queue_e); 147 if (read_queue_e == NULL) { 148 goto failed_build; 149 } 150 read_queue_e->sinfo_stream = stream_no; 151 read_queue_e->sinfo_ssn = stream_seq; 152 read_queue_e->sinfo_flags = (flags << 8); 153 read_queue_e->sinfo_ppid = ppid; 154 read_queue_e->sinfo_context = stcb->asoc.context; 155 read_queue_e->sinfo_timetolive = 0; 156 read_queue_e->sinfo_tsn = tsn; 157 read_queue_e->sinfo_cumtsn = tsn; 158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 159 read_queue_e->whoFrom = net; 160 read_queue_e->length = 0; 161 atomic_add_int(&net->ref_count, 1); 162 read_queue_e->data = dm; 163 read_queue_e->spec_flags = 0; 164 read_queue_e->tail_mbuf = NULL; 165 read_queue_e->aux_data = NULL; 166 read_queue_e->stcb = stcb; 167 read_queue_e->port_from = stcb->rport; 168 read_queue_e->do_not_ref_stcb = 0; 169 read_queue_e->end_added = 0; 170 read_queue_e->some_taken = 0; 171 read_queue_e->pdapi_aborted = 0; 172 failed_build: 173 return (read_queue_e); 174 } 175 176 177 /* 178 * Build out our readq entry based on the incoming packet. 179 */ 180 static struct sctp_queued_to_read * 181 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 182 struct sctp_tmit_chunk *chk) 183 { 184 struct sctp_queued_to_read *read_queue_e = NULL; 185 186 sctp_alloc_a_readq(stcb, read_queue_e); 187 if (read_queue_e == NULL) { 188 goto failed_build; 189 } 190 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 191 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 192 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 193 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 194 read_queue_e->sinfo_context = stcb->asoc.context; 195 read_queue_e->sinfo_timetolive = 0; 196 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 197 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 198 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 199 read_queue_e->whoFrom = chk->whoTo; 200 read_queue_e->aux_data = NULL; 201 read_queue_e->length = 0; 202 atomic_add_int(&chk->whoTo->ref_count, 1); 203 read_queue_e->data = chk->data; 204 read_queue_e->tail_mbuf = NULL; 205 read_queue_e->stcb = stcb; 206 read_queue_e->port_from = stcb->rport; 207 read_queue_e->spec_flags = 0; 208 read_queue_e->do_not_ref_stcb = 0; 209 read_queue_e->end_added = 0; 210 read_queue_e->some_taken = 0; 211 read_queue_e->pdapi_aborted = 0; 212 failed_build: 213 return (read_queue_e); 214 } 215 216 217 struct mbuf * 218 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 219 struct sctp_sndrcvinfo *sinfo) 220 { 221 struct sctp_sndrcvinfo *outinfo; 222 struct cmsghdr *cmh; 223 struct mbuf *ret; 224 int len; 225 int use_extended = 0; 226 227 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 228 /* user does not want the sndrcv ctl */ 229 return (NULL); 230 } 231 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 232 use_extended = 1; 233 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 234 } else { 235 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 236 } 237 238 239 ret = sctp_get_mbuf_for_msg(len, 240 0, M_DONTWAIT, 1, MT_DATA); 241 242 if (ret == NULL) { 243 /* No space */ 244 return (ret); 245 } 246 /* We need a CMSG header followed by the struct */ 247 cmh = mtod(ret, struct cmsghdr *); 248 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 249 cmh->cmsg_level = IPPROTO_SCTP; 250 if (use_extended) { 251 cmh->cmsg_type = SCTP_EXTRCV; 252 cmh->cmsg_len = len; 253 memcpy(outinfo, sinfo, len); 254 } else { 255 cmh->cmsg_type = SCTP_SNDRCV; 256 cmh->cmsg_len = len; 257 *outinfo = *sinfo; 258 } 259 SCTP_BUF_LEN(ret) = cmh->cmsg_len; 260 return (ret); 261 } 262 263 264 char * 265 sctp_build_ctl_cchunk(struct sctp_inpcb *inp, 266 int *control_len, 267 struct sctp_sndrcvinfo *sinfo) 268 { 269 struct sctp_sndrcvinfo *outinfo; 270 struct cmsghdr *cmh; 271 char *buf; 272 int len; 273 int use_extended = 0; 274 275 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 276 /* user does not want the sndrcv ctl */ 277 return (NULL); 278 } 279 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 280 use_extended = 1; 281 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 282 } else { 283 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 284 } 285 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG); 286 if (buf == NULL) { 287 /* No space */ 288 return (buf); 289 } 290 /* We need a CMSG header followed by the struct */ 291 cmh = (struct cmsghdr *)buf; 292 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 293 cmh->cmsg_level = IPPROTO_SCTP; 294 if (use_extended) { 295 cmh->cmsg_type = SCTP_EXTRCV; 296 cmh->cmsg_len = len; 297 memcpy(outinfo, sinfo, len); 298 } else { 299 cmh->cmsg_type = SCTP_SNDRCV; 300 cmh->cmsg_len = len; 301 *outinfo = *sinfo; 302 } 303 *control_len = len; 304 return (buf); 305 } 306 307 308 /* 309 * We are delivering currently from the reassembly queue. We must continue to 310 * deliver until we either: 1) run out of space. 2) run out of sequential 311 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 312 */ 313 static void 314 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 315 { 316 struct sctp_tmit_chunk *chk; 317 uint16_t nxt_todel; 318 uint16_t stream_no; 319 int end = 0; 320 int cntDel; 321 322 /* EY if any out-of-order delivered, then tag it nr on nr_map */ 323 uint32_t nr_tsn, nr_gap; 324 325 struct sctp_queued_to_read *control, *ctl, *ctlat; 326 327 if (stcb == NULL) 328 return; 329 330 cntDel = stream_no = 0; 331 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 332 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || 333 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 334 /* socket above is long gone or going.. */ 335 abandon: 336 asoc->fragmented_delivery_inprogress = 0; 337 chk = TAILQ_FIRST(&asoc->reasmqueue); 338 while (chk) { 339 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 340 asoc->size_on_reasm_queue -= chk->send_size; 341 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 342 /* 343 * Lose the data pointer, since its in the socket 344 * buffer 345 */ 346 if (chk->data) { 347 sctp_m_freem(chk->data); 348 chk->data = NULL; 349 } 350 /* Now free the address and data */ 351 sctp_free_a_chunk(stcb, chk); 352 /* sa_ignore FREED_MEMORY */ 353 chk = TAILQ_FIRST(&asoc->reasmqueue); 354 } 355 return; 356 } 357 SCTP_TCB_LOCK_ASSERT(stcb); 358 do { 359 chk = TAILQ_FIRST(&asoc->reasmqueue); 360 if (chk == NULL) { 361 return; 362 } 363 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 364 /* Can't deliver more :< */ 365 return; 366 } 367 stream_no = chk->rec.data.stream_number; 368 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 369 if (nxt_todel != chk->rec.data.stream_seq && 370 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 371 /* 372 * Not the next sequence to deliver in its stream OR 373 * unordered 374 */ 375 return; 376 } 377 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 378 379 control = sctp_build_readq_entry_chk(stcb, chk); 380 if (control == NULL) { 381 /* out of memory? */ 382 return; 383 } 384 /* save it off for our future deliveries */ 385 stcb->asoc.control_pdapi = control; 386 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 387 end = 1; 388 else 389 end = 0; 390 sctp_add_to_readq(stcb->sctp_ep, 391 stcb, control, &stcb->sctp_socket->so_rcv, end, 392 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 393 cntDel++; 394 } else { 395 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 396 end = 1; 397 else 398 end = 0; 399 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 400 stcb->asoc.control_pdapi, 401 chk->data, end, chk->rec.data.TSN_seq, 402 &stcb->sctp_socket->so_rcv)) { 403 /* 404 * something is very wrong, either 405 * control_pdapi is NULL, or the tail_mbuf 406 * is corrupt, or there is a EOM already on 407 * the mbuf chain. 408 */ 409 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 410 goto abandon; 411 } else { 412 #ifdef INVARIANTS 413 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 414 panic("This should not happen control_pdapi NULL?"); 415 } 416 /* if we did not panic, it was a EOM */ 417 panic("Bad chunking ??"); 418 #else 419 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 420 SCTP_PRINTF("This should not happen control_pdapi NULL?\n"); 421 } 422 SCTP_PRINTF("Bad chunking ??\n"); 423 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n"); 424 425 #endif 426 goto abandon; 427 } 428 } 429 cntDel++; 430 } 431 /* pull it we did it */ 432 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 433 /* 434 * EY this is the chunk that should be tagged nr gapped 435 * calculate the gap and such then tag this TSN nr 436 * chk->rec.data.TSN_seq 437 */ 438 /* 439 * EY!-TODO- this tsn should be tagged nr only if it is 440 * out-of-order, the if statement should be modified 441 */ 442 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 443 asoc->peer_supports_nr_sack) { 444 nr_tsn = chk->rec.data.TSN_seq; 445 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn); 446 if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) { 447 /* 448 * EY The 1st should never happen, as in 449 * process_a_data_chunk method this check 450 * should be done 451 */ 452 /* 453 * EY The 2nd should never happen, because 454 * nr_mapping_array is always expanded when 455 * mapping_array is expanded 456 */ 457 printf("Impossible nr_gap ack range failed\n"); 458 } else { 459 SCTP_TCB_LOCK_ASSERT(stcb); 460 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 461 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc); 462 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) 463 asoc->highest_tsn_inside_nr_map = nr_tsn; 464 } 465 } 466 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 467 asoc->fragmented_delivery_inprogress = 0; 468 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 469 asoc->strmin[stream_no].last_sequence_delivered++; 470 } 471 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 472 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 473 } 474 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 475 /* 476 * turn the flag back on since we just delivered 477 * yet another one. 478 */ 479 asoc->fragmented_delivery_inprogress = 1; 480 } 481 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 482 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 483 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 484 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 485 486 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 487 asoc->size_on_reasm_queue -= chk->send_size; 488 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 489 /* free up the chk */ 490 chk->data = NULL; 491 sctp_free_a_chunk(stcb, chk); 492 493 if (asoc->fragmented_delivery_inprogress == 0) { 494 /* 495 * Now lets see if we can deliver the next one on 496 * the stream 497 */ 498 struct sctp_stream_in *strm; 499 500 strm = &asoc->strmin[stream_no]; 501 nxt_todel = strm->last_sequence_delivered + 1; 502 ctl = TAILQ_FIRST(&strm->inqueue); 503 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 504 while (ctl != NULL) { 505 /* Deliver more if we can. */ 506 if (nxt_todel == ctl->sinfo_ssn) { 507 ctlat = TAILQ_NEXT(ctl, next); 508 TAILQ_REMOVE(&strm->inqueue, ctl, next); 509 asoc->size_on_all_streams -= ctl->length; 510 sctp_ucount_decr(asoc->cnt_on_all_streams); 511 strm->last_sequence_delivered++; 512 /* 513 * EY will be used to 514 * calculate nr-gap 515 */ 516 nr_tsn = ctl->sinfo_tsn; 517 sctp_add_to_readq(stcb->sctp_ep, stcb, 518 ctl, 519 &stcb->sctp_socket->so_rcv, 1, 520 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 521 /* 522 * EY -now something is 523 * delivered, calculate 524 * nr_gap and tag this tsn 525 * NR 526 */ 527 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 528 asoc->peer_supports_nr_sack) { 529 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn); 530 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) || 531 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) { 532 /* 533 * EY The 534 * 1st 535 * should 536 * never 537 * happen, 538 * as in 539 * process_a_ 540 * data_chunk 541 * method 542 * this 543 * check 544 * should be 545 * done 546 */ 547 /* 548 * EY The 549 * 2nd 550 * should 551 * never 552 * happen, 553 * because 554 * nr_mapping 555 * _array is 556 * always 557 * expanded 558 * when 559 * mapping_ar 560 * ray is 561 * expanded 562 */ 563 } else { 564 SCTP_TCB_LOCK_ASSERT(stcb); 565 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 566 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc); 567 if (compare_with_wrap(nr_tsn, 568 asoc->highest_tsn_inside_nr_map, 569 MAX_TSN)) 570 asoc->highest_tsn_inside_nr_map = nr_tsn; 571 } 572 } 573 ctl = ctlat; 574 } else { 575 break; 576 } 577 nxt_todel = strm->last_sequence_delivered + 1; 578 } 579 } 580 break; 581 } 582 /* sa_ignore FREED_MEMORY */ 583 chk = TAILQ_FIRST(&asoc->reasmqueue); 584 } while (chk); 585 } 586 587 /* 588 * Queue the chunk either right into the socket buffer if it is the next one 589 * to go OR put it in the correct place in the delivery queue. If we do 590 * append to the so_buf, keep doing so until we are out of order. One big 591 * question still remains, what to do when the socket buffer is FULL?? 592 */ 593 static void 594 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 595 struct sctp_queued_to_read *control, int *abort_flag) 596 { 597 /* 598 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 599 * all the data in one stream this could happen quite rapidly. One 600 * could use the TSN to keep track of things, but this scheme breaks 601 * down in the other type of stream useage that could occur. Send a 602 * single msg to stream 0, send 4Billion messages to stream 1, now 603 * send a message to stream 0. You have a situation where the TSN 604 * has wrapped but not in the stream. Is this worth worrying about 605 * or should we just change our queue sort at the bottom to be by 606 * TSN. 607 * 608 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 609 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 610 * assignment this could happen... and I don't see how this would be 611 * a violation. So for now I am undecided an will leave the sort by 612 * SSN alone. Maybe a hybred approach is the answer 613 * 614 */ 615 struct sctp_stream_in *strm; 616 struct sctp_queued_to_read *at; 617 int queue_needed; 618 uint16_t nxt_todel; 619 struct mbuf *oper; 620 621 /* EY- will be used to calculate nr-gap for a tsn */ 622 uint32_t nr_tsn, nr_gap; 623 624 queue_needed = 1; 625 asoc->size_on_all_streams += control->length; 626 sctp_ucount_incr(asoc->cnt_on_all_streams); 627 strm = &asoc->strmin[control->sinfo_stream]; 628 nxt_todel = strm->last_sequence_delivered + 1; 629 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 630 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 631 } 632 SCTPDBG(SCTP_DEBUG_INDATA1, 633 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 634 (uint32_t) control->sinfo_stream, 635 (uint32_t) strm->last_sequence_delivered, 636 (uint32_t) nxt_todel); 637 if (compare_with_wrap(strm->last_sequence_delivered, 638 control->sinfo_ssn, MAX_SEQ) || 639 (strm->last_sequence_delivered == control->sinfo_ssn)) { 640 /* The incoming sseq is behind where we last delivered? */ 641 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 642 control->sinfo_ssn, strm->last_sequence_delivered); 643 protocol_error: 644 /* 645 * throw it in the stream so it gets cleaned up in 646 * association destruction 647 */ 648 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 649 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 650 0, M_DONTWAIT, 1, MT_DATA); 651 if (oper) { 652 struct sctp_paramhdr *ph; 653 uint32_t *ippp; 654 655 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 656 (sizeof(uint32_t) * 3); 657 ph = mtod(oper, struct sctp_paramhdr *); 658 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 659 ph->param_length = htons(SCTP_BUF_LEN(oper)); 660 ippp = (uint32_t *) (ph + 1); 661 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 662 ippp++; 663 *ippp = control->sinfo_tsn; 664 ippp++; 665 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 666 } 667 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 668 sctp_abort_an_association(stcb->sctp_ep, stcb, 669 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 670 671 *abort_flag = 1; 672 return; 673 674 } 675 if (nxt_todel == control->sinfo_ssn) { 676 /* can be delivered right away? */ 677 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 678 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 679 } 680 /* EY it wont be queued if it could be delivered directly */ 681 queue_needed = 0; 682 asoc->size_on_all_streams -= control->length; 683 sctp_ucount_decr(asoc->cnt_on_all_streams); 684 strm->last_sequence_delivered++; 685 /* EY will be used to calculate nr-gap */ 686 nr_tsn = control->sinfo_tsn; 687 sctp_add_to_readq(stcb->sctp_ep, stcb, 688 control, 689 &stcb->sctp_socket->so_rcv, 1, 690 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 691 /* 692 * EY this is the chunk that should be tagged nr gapped 693 * calculate the gap and such then tag this TSN nr 694 * chk->rec.data.TSN_seq 695 */ 696 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 697 asoc->peer_supports_nr_sack) { 698 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn); 699 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) || 700 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) { 701 printf("Impossible nr_tsn set 2?\n"); 702 /* 703 * EY The 1st should never happen, as in 704 * process_a_data_chunk method this check 705 * should be done 706 */ 707 /* 708 * EY The 2nd should never happen, because 709 * nr_mapping_array is always expanded when 710 * mapping_array is expanded 711 */ 712 } else { 713 SCTP_TCB_LOCK_ASSERT(stcb); 714 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 715 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc); 716 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) 717 asoc->highest_tsn_inside_nr_map = nr_tsn; 718 } 719 } 720 control = TAILQ_FIRST(&strm->inqueue); 721 while (control != NULL) { 722 /* all delivered */ 723 nxt_todel = strm->last_sequence_delivered + 1; 724 if (nxt_todel == control->sinfo_ssn) { 725 at = TAILQ_NEXT(control, next); 726 TAILQ_REMOVE(&strm->inqueue, control, next); 727 asoc->size_on_all_streams -= control->length; 728 sctp_ucount_decr(asoc->cnt_on_all_streams); 729 strm->last_sequence_delivered++; 730 /* 731 * We ignore the return of deliver_data here 732 * since we always can hold the chunk on the 733 * d-queue. And we have a finite number that 734 * can be delivered from the strq. 735 */ 736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 737 sctp_log_strm_del(control, NULL, 738 SCTP_STR_LOG_FROM_IMMED_DEL); 739 } 740 /* EY will be used to calculate nr-gap */ 741 nr_tsn = control->sinfo_tsn; 742 sctp_add_to_readq(stcb->sctp_ep, stcb, 743 control, 744 &stcb->sctp_socket->so_rcv, 1, 745 SCTP_READ_LOCK_NOT_HELD, 746 SCTP_SO_NOT_LOCKED); 747 /* 748 * EY this is the chunk that should be 749 * tagged nr gapped calculate the gap and 750 * such then tag this TSN nr 751 * chk->rec.data.TSN_seq 752 */ 753 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 754 asoc->peer_supports_nr_sack) { 755 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn); 756 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) || 757 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) { 758 /* 759 * EY The 1st should never 760 * happen, as in 761 * process_a_data_chunk 762 * method this check should 763 * be done 764 */ 765 /* 766 * EY The 2nd should never 767 * happen, because 768 * nr_mapping_array is 769 * always expanded when 770 * mapping_array is expanded 771 */ 772 } else { 773 SCTP_TCB_LOCK_ASSERT(stcb); 774 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc); 775 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 776 if (compare_with_wrap(nr_tsn, 777 asoc->highest_tsn_inside_nr_map, 778 MAX_TSN)) 779 asoc->highest_tsn_inside_nr_map = nr_tsn; 780 } 781 } 782 control = at; 783 continue; 784 } 785 break; 786 } 787 } 788 if (queue_needed) { 789 /* 790 * Ok, we did not deliver this guy, find the correct place 791 * to put it on the queue. 792 */ 793 if ((compare_with_wrap(asoc->cumulative_tsn, 794 control->sinfo_tsn, MAX_TSN)) || 795 (control->sinfo_tsn == asoc->cumulative_tsn)) { 796 goto protocol_error; 797 } 798 if (TAILQ_EMPTY(&strm->inqueue)) { 799 /* Empty queue */ 800 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 801 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 802 } 803 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 804 } else { 805 TAILQ_FOREACH(at, &strm->inqueue, next) { 806 if (compare_with_wrap(at->sinfo_ssn, 807 control->sinfo_ssn, MAX_SEQ)) { 808 /* 809 * one in queue is bigger than the 810 * new one, insert before this one 811 */ 812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 813 sctp_log_strm_del(control, at, 814 SCTP_STR_LOG_FROM_INSERT_MD); 815 } 816 TAILQ_INSERT_BEFORE(at, control, next); 817 break; 818 } else if (at->sinfo_ssn == control->sinfo_ssn) { 819 /* 820 * Gak, He sent me a duplicate str 821 * seq number 822 */ 823 /* 824 * foo bar, I guess I will just free 825 * this new guy, should we abort 826 * too? FIX ME MAYBE? Or it COULD be 827 * that the SSN's have wrapped. 828 * Maybe I should compare to TSN 829 * somehow... sigh for now just blow 830 * away the chunk! 831 */ 832 833 if (control->data) 834 sctp_m_freem(control->data); 835 control->data = NULL; 836 asoc->size_on_all_streams -= control->length; 837 sctp_ucount_decr(asoc->cnt_on_all_streams); 838 if (control->whoFrom) 839 sctp_free_remote_addr(control->whoFrom); 840 control->whoFrom = NULL; 841 sctp_free_a_readq(stcb, control); 842 return; 843 } else { 844 if (TAILQ_NEXT(at, next) == NULL) { 845 /* 846 * We are at the end, insert 847 * it after this one 848 */ 849 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 850 sctp_log_strm_del(control, at, 851 SCTP_STR_LOG_FROM_INSERT_TL); 852 } 853 TAILQ_INSERT_AFTER(&strm->inqueue, 854 at, control, next); 855 break; 856 } 857 } 858 } 859 } 860 } 861 } 862 863 /* 864 * Returns two things: You get the total size of the deliverable parts of the 865 * first fragmented message on the reassembly queue. And you get a 1 back if 866 * all of the message is ready or a 0 back if the message is still incomplete 867 */ 868 static int 869 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 870 { 871 struct sctp_tmit_chunk *chk; 872 uint32_t tsn; 873 874 *t_size = 0; 875 chk = TAILQ_FIRST(&asoc->reasmqueue); 876 if (chk == NULL) { 877 /* nothing on the queue */ 878 return (0); 879 } 880 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 881 /* Not a first on the queue */ 882 return (0); 883 } 884 tsn = chk->rec.data.TSN_seq; 885 while (chk) { 886 if (tsn != chk->rec.data.TSN_seq) { 887 return (0); 888 } 889 *t_size += chk->send_size; 890 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 891 return (1); 892 } 893 tsn++; 894 chk = TAILQ_NEXT(chk, sctp_next); 895 } 896 return (0); 897 } 898 899 static void 900 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 901 { 902 struct sctp_tmit_chunk *chk; 903 uint16_t nxt_todel; 904 uint32_t tsize, pd_point; 905 906 doit_again: 907 chk = TAILQ_FIRST(&asoc->reasmqueue); 908 if (chk == NULL) { 909 /* Huh? */ 910 asoc->size_on_reasm_queue = 0; 911 asoc->cnt_on_reasm_queue = 0; 912 return; 913 } 914 if (asoc->fragmented_delivery_inprogress == 0) { 915 nxt_todel = 916 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 917 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 918 (nxt_todel == chk->rec.data.stream_seq || 919 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 920 /* 921 * Yep the first one is here and its ok to deliver 922 * but should we? 923 */ 924 if (stcb->sctp_socket) { 925 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 926 stcb->sctp_ep->partial_delivery_point); 927 } else { 928 pd_point = stcb->sctp_ep->partial_delivery_point; 929 } 930 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 931 932 /* 933 * Yes, we setup to start reception, by 934 * backing down the TSN just in case we 935 * can't deliver. If we 936 */ 937 asoc->fragmented_delivery_inprogress = 1; 938 asoc->tsn_last_delivered = 939 chk->rec.data.TSN_seq - 1; 940 asoc->str_of_pdapi = 941 chk->rec.data.stream_number; 942 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 943 asoc->pdapi_ppid = chk->rec.data.payloadtype; 944 asoc->fragment_flags = chk->rec.data.rcv_flags; 945 sctp_service_reassembly(stcb, asoc); 946 } 947 } 948 } else { 949 /* 950 * Service re-assembly will deliver stream data queued at 951 * the end of fragmented delivery.. but it wont know to go 952 * back and call itself again... we do that here with the 953 * got doit_again 954 */ 955 sctp_service_reassembly(stcb, asoc); 956 if (asoc->fragmented_delivery_inprogress == 0) { 957 /* 958 * finished our Fragmented delivery, could be more 959 * waiting? 960 */ 961 goto doit_again; 962 } 963 } 964 } 965 966 /* 967 * Dump onto the re-assembly queue, in its proper place. After dumping on the 968 * queue, see if anthing can be delivered. If so pull it off (or as much as 969 * we can. If we run out of space then we must dump what we can and set the 970 * appropriate flag to say we queued what we could. 971 */ 972 static void 973 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 974 struct sctp_tmit_chunk *chk, int *abort_flag) 975 { 976 struct mbuf *oper; 977 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 978 u_char last_flags; 979 struct sctp_tmit_chunk *at, *prev, *next; 980 981 prev = next = NULL; 982 cum_ackp1 = asoc->tsn_last_delivered + 1; 983 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 984 /* This is the first one on the queue */ 985 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 986 /* 987 * we do not check for delivery of anything when only one 988 * fragment is here 989 */ 990 asoc->size_on_reasm_queue = chk->send_size; 991 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 992 if (chk->rec.data.TSN_seq == cum_ackp1) { 993 if (asoc->fragmented_delivery_inprogress == 0 && 994 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 995 SCTP_DATA_FIRST_FRAG) { 996 /* 997 * An empty queue, no delivery inprogress, 998 * we hit the next one and it does NOT have 999 * a FIRST fragment mark. 1000 */ 1001 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 1002 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1003 0, M_DONTWAIT, 1, MT_DATA); 1004 1005 if (oper) { 1006 struct sctp_paramhdr *ph; 1007 uint32_t *ippp; 1008 1009 SCTP_BUF_LEN(oper) = 1010 sizeof(struct sctp_paramhdr) + 1011 (sizeof(uint32_t) * 3); 1012 ph = mtod(oper, struct sctp_paramhdr *); 1013 ph->param_type = 1014 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1015 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1016 ippp = (uint32_t *) (ph + 1); 1017 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 1018 ippp++; 1019 *ippp = chk->rec.data.TSN_seq; 1020 ippp++; 1021 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1022 1023 } 1024 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 1025 sctp_abort_an_association(stcb->sctp_ep, stcb, 1026 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1027 *abort_flag = 1; 1028 } else if (asoc->fragmented_delivery_inprogress && 1029 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1030 /* 1031 * We are doing a partial delivery and the 1032 * NEXT chunk MUST be either the LAST or 1033 * MIDDLE fragment NOT a FIRST 1034 */ 1035 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 1036 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1037 0, M_DONTWAIT, 1, MT_DATA); 1038 if (oper) { 1039 struct sctp_paramhdr *ph; 1040 uint32_t *ippp; 1041 1042 SCTP_BUF_LEN(oper) = 1043 sizeof(struct sctp_paramhdr) + 1044 (3 * sizeof(uint32_t)); 1045 ph = mtod(oper, struct sctp_paramhdr *); 1046 ph->param_type = 1047 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1048 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1049 ippp = (uint32_t *) (ph + 1); 1050 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 1051 ippp++; 1052 *ippp = chk->rec.data.TSN_seq; 1053 ippp++; 1054 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1055 } 1056 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 1057 sctp_abort_an_association(stcb->sctp_ep, stcb, 1058 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1059 *abort_flag = 1; 1060 } else if (asoc->fragmented_delivery_inprogress) { 1061 /* 1062 * Here we are ok with a MIDDLE or LAST 1063 * piece 1064 */ 1065 if (chk->rec.data.stream_number != 1066 asoc->str_of_pdapi) { 1067 /* Got to be the right STR No */ 1068 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 1069 chk->rec.data.stream_number, 1070 asoc->str_of_pdapi); 1071 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1072 0, M_DONTWAIT, 1, MT_DATA); 1073 if (oper) { 1074 struct sctp_paramhdr *ph; 1075 uint32_t *ippp; 1076 1077 SCTP_BUF_LEN(oper) = 1078 sizeof(struct sctp_paramhdr) + 1079 (sizeof(uint32_t) * 3); 1080 ph = mtod(oper, 1081 struct sctp_paramhdr *); 1082 ph->param_type = 1083 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1084 ph->param_length = 1085 htons(SCTP_BUF_LEN(oper)); 1086 ippp = (uint32_t *) (ph + 1); 1087 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 1088 ippp++; 1089 *ippp = chk->rec.data.TSN_seq; 1090 ippp++; 1091 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1092 } 1093 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 1094 sctp_abort_an_association(stcb->sctp_ep, 1095 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1096 *abort_flag = 1; 1097 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 1098 SCTP_DATA_UNORDERED && 1099 chk->rec.data.stream_seq != 1100 asoc->ssn_of_pdapi) { 1101 /* Got to be the right STR Seq */ 1102 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 1103 chk->rec.data.stream_seq, 1104 asoc->ssn_of_pdapi); 1105 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1106 0, M_DONTWAIT, 1, MT_DATA); 1107 if (oper) { 1108 struct sctp_paramhdr *ph; 1109 uint32_t *ippp; 1110 1111 SCTP_BUF_LEN(oper) = 1112 sizeof(struct sctp_paramhdr) + 1113 (3 * sizeof(uint32_t)); 1114 ph = mtod(oper, 1115 struct sctp_paramhdr *); 1116 ph->param_type = 1117 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1118 ph->param_length = 1119 htons(SCTP_BUF_LEN(oper)); 1120 ippp = (uint32_t *) (ph + 1); 1121 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1122 ippp++; 1123 *ippp = chk->rec.data.TSN_seq; 1124 ippp++; 1125 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1126 1127 } 1128 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 1129 sctp_abort_an_association(stcb->sctp_ep, 1130 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1131 *abort_flag = 1; 1132 } 1133 } 1134 } 1135 return; 1136 } 1137 /* Find its place */ 1138 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1139 if (compare_with_wrap(at->rec.data.TSN_seq, 1140 chk->rec.data.TSN_seq, MAX_TSN)) { 1141 /* 1142 * one in queue is bigger than the new one, insert 1143 * before this one 1144 */ 1145 /* A check */ 1146 asoc->size_on_reasm_queue += chk->send_size; 1147 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1148 next = at; 1149 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1150 break; 1151 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1152 /* Gak, He sent me a duplicate str seq number */ 1153 /* 1154 * foo bar, I guess I will just free this new guy, 1155 * should we abort too? FIX ME MAYBE? Or it COULD be 1156 * that the SSN's have wrapped. Maybe I should 1157 * compare to TSN somehow... sigh for now just blow 1158 * away the chunk! 1159 */ 1160 if (chk->data) { 1161 sctp_m_freem(chk->data); 1162 chk->data = NULL; 1163 } 1164 sctp_free_a_chunk(stcb, chk); 1165 return; 1166 } else { 1167 last_flags = at->rec.data.rcv_flags; 1168 last_tsn = at->rec.data.TSN_seq; 1169 prev = at; 1170 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1171 /* 1172 * We are at the end, insert it after this 1173 * one 1174 */ 1175 /* check it first */ 1176 asoc->size_on_reasm_queue += chk->send_size; 1177 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1178 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1179 break; 1180 } 1181 } 1182 } 1183 /* Now the audits */ 1184 if (prev) { 1185 prev_tsn = chk->rec.data.TSN_seq - 1; 1186 if (prev_tsn == prev->rec.data.TSN_seq) { 1187 /* 1188 * Ok the one I am dropping onto the end is the 1189 * NEXT. A bit of valdiation here. 1190 */ 1191 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1192 SCTP_DATA_FIRST_FRAG || 1193 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1194 SCTP_DATA_MIDDLE_FRAG) { 1195 /* 1196 * Insert chk MUST be a MIDDLE or LAST 1197 * fragment 1198 */ 1199 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1200 SCTP_DATA_FIRST_FRAG) { 1201 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 1202 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 1203 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1204 0, M_DONTWAIT, 1, MT_DATA); 1205 if (oper) { 1206 struct sctp_paramhdr *ph; 1207 uint32_t *ippp; 1208 1209 SCTP_BUF_LEN(oper) = 1210 sizeof(struct sctp_paramhdr) + 1211 (3 * sizeof(uint32_t)); 1212 ph = mtod(oper, 1213 struct sctp_paramhdr *); 1214 ph->param_type = 1215 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1216 ph->param_length = 1217 htons(SCTP_BUF_LEN(oper)); 1218 ippp = (uint32_t *) (ph + 1); 1219 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1220 ippp++; 1221 *ippp = chk->rec.data.TSN_seq; 1222 ippp++; 1223 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1224 1225 } 1226 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1227 sctp_abort_an_association(stcb->sctp_ep, 1228 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1229 *abort_flag = 1; 1230 return; 1231 } 1232 if (chk->rec.data.stream_number != 1233 prev->rec.data.stream_number) { 1234 /* 1235 * Huh, need the correct STR here, 1236 * they must be the same. 1237 */ 1238 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1239 chk->rec.data.stream_number, 1240 prev->rec.data.stream_number); 1241 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1242 0, M_DONTWAIT, 1, MT_DATA); 1243 if (oper) { 1244 struct sctp_paramhdr *ph; 1245 uint32_t *ippp; 1246 1247 SCTP_BUF_LEN(oper) = 1248 sizeof(struct sctp_paramhdr) + 1249 (3 * sizeof(uint32_t)); 1250 ph = mtod(oper, 1251 struct sctp_paramhdr *); 1252 ph->param_type = 1253 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1254 ph->param_length = 1255 htons(SCTP_BUF_LEN(oper)); 1256 ippp = (uint32_t *) (ph + 1); 1257 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1258 ippp++; 1259 *ippp = chk->rec.data.TSN_seq; 1260 ippp++; 1261 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1262 } 1263 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1264 sctp_abort_an_association(stcb->sctp_ep, 1265 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1266 1267 *abort_flag = 1; 1268 return; 1269 } 1270 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1271 chk->rec.data.stream_seq != 1272 prev->rec.data.stream_seq) { 1273 /* 1274 * Huh, need the correct STR here, 1275 * they must be the same. 1276 */ 1277 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1278 chk->rec.data.stream_seq, 1279 prev->rec.data.stream_seq); 1280 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1281 0, M_DONTWAIT, 1, MT_DATA); 1282 if (oper) { 1283 struct sctp_paramhdr *ph; 1284 uint32_t *ippp; 1285 1286 SCTP_BUF_LEN(oper) = 1287 sizeof(struct sctp_paramhdr) + 1288 (3 * sizeof(uint32_t)); 1289 ph = mtod(oper, 1290 struct sctp_paramhdr *); 1291 ph->param_type = 1292 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1293 ph->param_length = 1294 htons(SCTP_BUF_LEN(oper)); 1295 ippp = (uint32_t *) (ph + 1); 1296 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1297 ippp++; 1298 *ippp = chk->rec.data.TSN_seq; 1299 ippp++; 1300 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1301 } 1302 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1303 sctp_abort_an_association(stcb->sctp_ep, 1304 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1305 1306 *abort_flag = 1; 1307 return; 1308 } 1309 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1310 SCTP_DATA_LAST_FRAG) { 1311 /* Insert chk MUST be a FIRST */ 1312 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1313 SCTP_DATA_FIRST_FRAG) { 1314 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1315 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1316 0, M_DONTWAIT, 1, MT_DATA); 1317 if (oper) { 1318 struct sctp_paramhdr *ph; 1319 uint32_t *ippp; 1320 1321 SCTP_BUF_LEN(oper) = 1322 sizeof(struct sctp_paramhdr) + 1323 (3 * sizeof(uint32_t)); 1324 ph = mtod(oper, 1325 struct sctp_paramhdr *); 1326 ph->param_type = 1327 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1328 ph->param_length = 1329 htons(SCTP_BUF_LEN(oper)); 1330 ippp = (uint32_t *) (ph + 1); 1331 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1332 ippp++; 1333 *ippp = chk->rec.data.TSN_seq; 1334 ippp++; 1335 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1336 1337 } 1338 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1339 sctp_abort_an_association(stcb->sctp_ep, 1340 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1341 1342 *abort_flag = 1; 1343 return; 1344 } 1345 } 1346 } 1347 } 1348 if (next) { 1349 post_tsn = chk->rec.data.TSN_seq + 1; 1350 if (post_tsn == next->rec.data.TSN_seq) { 1351 /* 1352 * Ok the one I am inserting ahead of is my NEXT 1353 * one. A bit of valdiation here. 1354 */ 1355 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1356 /* Insert chk MUST be a last fragment */ 1357 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1358 != SCTP_DATA_LAST_FRAG) { 1359 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1360 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1361 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1362 0, M_DONTWAIT, 1, MT_DATA); 1363 if (oper) { 1364 struct sctp_paramhdr *ph; 1365 uint32_t *ippp; 1366 1367 SCTP_BUF_LEN(oper) = 1368 sizeof(struct sctp_paramhdr) + 1369 (3 * sizeof(uint32_t)); 1370 ph = mtod(oper, 1371 struct sctp_paramhdr *); 1372 ph->param_type = 1373 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1374 ph->param_length = 1375 htons(SCTP_BUF_LEN(oper)); 1376 ippp = (uint32_t *) (ph + 1); 1377 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1378 ippp++; 1379 *ippp = chk->rec.data.TSN_seq; 1380 ippp++; 1381 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1382 } 1383 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1384 sctp_abort_an_association(stcb->sctp_ep, 1385 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1386 1387 *abort_flag = 1; 1388 return; 1389 } 1390 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1391 SCTP_DATA_MIDDLE_FRAG || 1392 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1393 SCTP_DATA_LAST_FRAG) { 1394 /* 1395 * Insert chk CAN be MIDDLE or FIRST NOT 1396 * LAST 1397 */ 1398 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1399 SCTP_DATA_LAST_FRAG) { 1400 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1401 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1402 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1403 0, M_DONTWAIT, 1, MT_DATA); 1404 if (oper) { 1405 struct sctp_paramhdr *ph; 1406 uint32_t *ippp; 1407 1408 SCTP_BUF_LEN(oper) = 1409 sizeof(struct sctp_paramhdr) + 1410 (3 * sizeof(uint32_t)); 1411 ph = mtod(oper, 1412 struct sctp_paramhdr *); 1413 ph->param_type = 1414 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1415 ph->param_length = 1416 htons(SCTP_BUF_LEN(oper)); 1417 ippp = (uint32_t *) (ph + 1); 1418 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1419 ippp++; 1420 *ippp = chk->rec.data.TSN_seq; 1421 ippp++; 1422 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1423 1424 } 1425 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1426 sctp_abort_an_association(stcb->sctp_ep, 1427 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1428 1429 *abort_flag = 1; 1430 return; 1431 } 1432 if (chk->rec.data.stream_number != 1433 next->rec.data.stream_number) { 1434 /* 1435 * Huh, need the correct STR here, 1436 * they must be the same. 1437 */ 1438 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1439 chk->rec.data.stream_number, 1440 next->rec.data.stream_number); 1441 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1442 0, M_DONTWAIT, 1, MT_DATA); 1443 if (oper) { 1444 struct sctp_paramhdr *ph; 1445 uint32_t *ippp; 1446 1447 SCTP_BUF_LEN(oper) = 1448 sizeof(struct sctp_paramhdr) + 1449 (3 * sizeof(uint32_t)); 1450 ph = mtod(oper, 1451 struct sctp_paramhdr *); 1452 ph->param_type = 1453 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1454 ph->param_length = 1455 htons(SCTP_BUF_LEN(oper)); 1456 ippp = (uint32_t *) (ph + 1); 1457 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1458 ippp++; 1459 *ippp = chk->rec.data.TSN_seq; 1460 ippp++; 1461 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1462 1463 } 1464 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1465 sctp_abort_an_association(stcb->sctp_ep, 1466 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1467 1468 *abort_flag = 1; 1469 return; 1470 } 1471 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1472 chk->rec.data.stream_seq != 1473 next->rec.data.stream_seq) { 1474 /* 1475 * Huh, need the correct STR here, 1476 * they must be the same. 1477 */ 1478 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1479 chk->rec.data.stream_seq, 1480 next->rec.data.stream_seq); 1481 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1482 0, M_DONTWAIT, 1, MT_DATA); 1483 if (oper) { 1484 struct sctp_paramhdr *ph; 1485 uint32_t *ippp; 1486 1487 SCTP_BUF_LEN(oper) = 1488 sizeof(struct sctp_paramhdr) + 1489 (3 * sizeof(uint32_t)); 1490 ph = mtod(oper, 1491 struct sctp_paramhdr *); 1492 ph->param_type = 1493 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1494 ph->param_length = 1495 htons(SCTP_BUF_LEN(oper)); 1496 ippp = (uint32_t *) (ph + 1); 1497 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1498 ippp++; 1499 *ippp = chk->rec.data.TSN_seq; 1500 ippp++; 1501 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1502 } 1503 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1504 sctp_abort_an_association(stcb->sctp_ep, 1505 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1506 1507 *abort_flag = 1; 1508 return; 1509 } 1510 } 1511 } 1512 } 1513 /* Do we need to do some delivery? check */ 1514 sctp_deliver_reasm_check(stcb, asoc); 1515 } 1516 1517 /* 1518 * This is an unfortunate routine. It checks to make sure a evil guy is not 1519 * stuffing us full of bad packet fragments. A broken peer could also do this 1520 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1521 * :< more cycles. 1522 */ 1523 static int 1524 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1525 uint32_t TSN_seq) 1526 { 1527 struct sctp_tmit_chunk *at; 1528 uint32_t tsn_est; 1529 1530 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1531 if (compare_with_wrap(TSN_seq, 1532 at->rec.data.TSN_seq, MAX_TSN)) { 1533 /* is it one bigger? */ 1534 tsn_est = at->rec.data.TSN_seq + 1; 1535 if (tsn_est == TSN_seq) { 1536 /* yep. It better be a last then */ 1537 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1538 SCTP_DATA_LAST_FRAG) { 1539 /* 1540 * Ok this guy belongs next to a guy 1541 * that is NOT last, it should be a 1542 * middle/last, not a complete 1543 * chunk. 1544 */ 1545 return (1); 1546 } else { 1547 /* 1548 * This guy is ok since its a LAST 1549 * and the new chunk is a fully 1550 * self- contained one. 1551 */ 1552 return (0); 1553 } 1554 } 1555 } else if (TSN_seq == at->rec.data.TSN_seq) { 1556 /* Software error since I have a dup? */ 1557 return (1); 1558 } else { 1559 /* 1560 * Ok, 'at' is larger than new chunk but does it 1561 * need to be right before it. 1562 */ 1563 tsn_est = TSN_seq + 1; 1564 if (tsn_est == at->rec.data.TSN_seq) { 1565 /* Yep, It better be a first */ 1566 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1567 SCTP_DATA_FIRST_FRAG) { 1568 return (1); 1569 } else { 1570 return (0); 1571 } 1572 } 1573 } 1574 } 1575 return (0); 1576 } 1577 1578 1579 static int 1580 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1581 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1582 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1583 int *break_flag, int last_chunk) 1584 { 1585 /* Process a data chunk */ 1586 /* struct sctp_tmit_chunk *chk; */ 1587 struct sctp_tmit_chunk *chk; 1588 uint32_t tsn, gap; 1589 1590 /* EY - for nr_sack */ 1591 uint32_t nr_gap; 1592 struct mbuf *dmbuf; 1593 int indx, the_len; 1594 int need_reasm_check = 0; 1595 uint16_t strmno, strmseq; 1596 struct mbuf *oper; 1597 struct sctp_queued_to_read *control; 1598 int ordered; 1599 uint32_t protocol_id; 1600 uint8_t chunk_flags; 1601 struct sctp_stream_reset_list *liste; 1602 1603 chk = NULL; 1604 tsn = ntohl(ch->dp.tsn); 1605 chunk_flags = ch->ch.chunk_flags; 1606 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1607 asoc->send_sack = 1; 1608 } 1609 protocol_id = ch->dp.protocol_id; 1610 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0); 1611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1612 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1613 } 1614 if (stcb == NULL) { 1615 return (0); 1616 } 1617 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1618 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1619 asoc->cumulative_tsn == tsn) { 1620 /* It is a duplicate */ 1621 SCTP_STAT_INCR(sctps_recvdupdata); 1622 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1623 /* Record a dup for the next outbound sack */ 1624 asoc->dup_tsns[asoc->numduptsns] = tsn; 1625 asoc->numduptsns++; 1626 } 1627 asoc->send_sack = 1; 1628 return (0); 1629 } 1630 /* Calculate the number of TSN's between the base and this TSN */ 1631 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1632 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1633 /* Can't hold the bit in the mapping at max array, toss it */ 1634 return (0); 1635 } 1636 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1637 SCTP_TCB_LOCK_ASSERT(stcb); 1638 if (sctp_expand_mapping_array(asoc, gap)) { 1639 /* Can't expand, drop it */ 1640 return (0); 1641 } 1642 } 1643 /* EY - for nr_sack */ 1644 nr_gap = gap; 1645 1646 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1647 *high_tsn = tsn; 1648 } 1649 /* See if we have received this one already */ 1650 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1651 SCTP_STAT_INCR(sctps_recvdupdata); 1652 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1653 /* Record a dup for the next outbound sack */ 1654 asoc->dup_tsns[asoc->numduptsns] = tsn; 1655 asoc->numduptsns++; 1656 } 1657 asoc->send_sack = 1; 1658 return (0); 1659 } 1660 /* 1661 * Check to see about the GONE flag, duplicates would cause a sack 1662 * to be sent up above 1663 */ 1664 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1665 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1666 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1667 ) { 1668 /* 1669 * wait a minute, this guy is gone, there is no longer a 1670 * receiver. Send peer an ABORT! 1671 */ 1672 struct mbuf *op_err; 1673 1674 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1675 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED); 1676 *abort_flag = 1; 1677 return (0); 1678 } 1679 /* 1680 * Now before going further we see if there is room. If NOT then we 1681 * MAY let one through only IF this TSN is the one we are waiting 1682 * for on a partial delivery API. 1683 */ 1684 1685 /* now do the tests */ 1686 if (((asoc->cnt_on_all_streams + 1687 asoc->cnt_on_reasm_queue + 1688 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1689 (((int)asoc->my_rwnd) <= 0)) { 1690 /* 1691 * When we have NO room in the rwnd we check to make sure 1692 * the reader is doing its job... 1693 */ 1694 if (stcb->sctp_socket->so_rcv.sb_cc) { 1695 /* some to read, wake-up */ 1696 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1697 struct socket *so; 1698 1699 so = SCTP_INP_SO(stcb->sctp_ep); 1700 atomic_add_int(&stcb->asoc.refcnt, 1); 1701 SCTP_TCB_UNLOCK(stcb); 1702 SCTP_SOCKET_LOCK(so, 1); 1703 SCTP_TCB_LOCK(stcb); 1704 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1705 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1706 /* assoc was freed while we were unlocked */ 1707 SCTP_SOCKET_UNLOCK(so, 1); 1708 return (0); 1709 } 1710 #endif 1711 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1712 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1713 SCTP_SOCKET_UNLOCK(so, 1); 1714 #endif 1715 } 1716 /* now is it in the mapping array of what we have accepted? */ 1717 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 1718 /* Nope not in the valid range dump it */ 1719 sctp_set_rwnd(stcb, asoc); 1720 if ((asoc->cnt_on_all_streams + 1721 asoc->cnt_on_reasm_queue + 1722 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1723 SCTP_STAT_INCR(sctps_datadropchklmt); 1724 } else { 1725 SCTP_STAT_INCR(sctps_datadroprwnd); 1726 } 1727 indx = *break_flag; 1728 *break_flag = 1; 1729 return (0); 1730 } 1731 } 1732 strmno = ntohs(ch->dp.stream_id); 1733 if (strmno >= asoc->streamincnt) { 1734 struct sctp_paramhdr *phdr; 1735 struct mbuf *mb; 1736 1737 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1738 0, M_DONTWAIT, 1, MT_DATA); 1739 if (mb != NULL) { 1740 /* add some space up front so prepend will work well */ 1741 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1742 phdr = mtod(mb, struct sctp_paramhdr *); 1743 /* 1744 * Error causes are just param's and this one has 1745 * two back to back phdr, one with the error type 1746 * and size, the other with the streamid and a rsvd 1747 */ 1748 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1749 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1750 phdr->param_length = 1751 htons(sizeof(struct sctp_paramhdr) * 2); 1752 phdr++; 1753 /* We insert the stream in the type field */ 1754 phdr->param_type = ch->dp.stream_id; 1755 /* And set the length to 0 for the rsvd field */ 1756 phdr->param_length = 0; 1757 sctp_queue_op_err(stcb, mb); 1758 } 1759 SCTP_STAT_INCR(sctps_badsid); 1760 SCTP_TCB_LOCK_ASSERT(stcb); 1761 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1762 /* EY set this tsn present in nr_sack's nr_mapping_array */ 1763 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 1764 asoc->peer_supports_nr_sack) { 1765 SCTP_TCB_LOCK_ASSERT(stcb); 1766 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1767 SCTP_REVERSE_OUT_TSN_PRES(gap, tsn, asoc); 1768 } 1769 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 1770 /* we have a new high score */ 1771 asoc->highest_tsn_inside_map = tsn; 1772 /* EY nr_sack version of the above */ 1773 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) 1774 asoc->highest_tsn_inside_nr_map = tsn; 1775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1776 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 1777 } 1778 } 1779 if (tsn == (asoc->cumulative_tsn + 1)) { 1780 /* Update cum-ack */ 1781 asoc->cumulative_tsn = tsn; 1782 } 1783 return (0); 1784 } 1785 /* 1786 * Before we continue lets validate that we are not being fooled by 1787 * an evil attacker. We can only have 4k chunks based on our TSN 1788 * spread allowed by the mapping array 512 * 8 bits, so there is no 1789 * way our stream sequence numbers could have wrapped. We of course 1790 * only validate the FIRST fragment so the bit must be set. 1791 */ 1792 strmseq = ntohs(ch->dp.stream_sequence); 1793 #ifdef SCTP_ASOCLOG_OF_TSNS 1794 SCTP_TCB_LOCK_ASSERT(stcb); 1795 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1796 asoc->tsn_in_at = 0; 1797 asoc->tsn_in_wrapped = 1; 1798 } 1799 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1800 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1801 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1802 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1803 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1804 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1805 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1806 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1807 asoc->tsn_in_at++; 1808 #endif 1809 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1810 (TAILQ_EMPTY(&asoc->resetHead)) && 1811 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1812 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1813 strmseq, MAX_SEQ) || 1814 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1815 /* The incoming sseq is behind where we last delivered? */ 1816 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1817 strmseq, asoc->strmin[strmno].last_sequence_delivered); 1818 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1819 0, M_DONTWAIT, 1, MT_DATA); 1820 if (oper) { 1821 struct sctp_paramhdr *ph; 1822 uint32_t *ippp; 1823 1824 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1825 (3 * sizeof(uint32_t)); 1826 ph = mtod(oper, struct sctp_paramhdr *); 1827 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1828 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1829 ippp = (uint32_t *) (ph + 1); 1830 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1831 ippp++; 1832 *ippp = tsn; 1833 ippp++; 1834 *ippp = ((strmno << 16) | strmseq); 1835 1836 } 1837 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1838 sctp_abort_an_association(stcb->sctp_ep, stcb, 1839 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1840 *abort_flag = 1; 1841 return (0); 1842 } 1843 /************************************ 1844 * From here down we may find ch-> invalid 1845 * so its a good idea NOT to use it. 1846 *************************************/ 1847 1848 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1849 if (last_chunk == 0) { 1850 dmbuf = SCTP_M_COPYM(*m, 1851 (offset + sizeof(struct sctp_data_chunk)), 1852 the_len, M_DONTWAIT); 1853 #ifdef SCTP_MBUF_LOGGING 1854 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1855 struct mbuf *mat; 1856 1857 mat = dmbuf; 1858 while (mat) { 1859 if (SCTP_BUF_IS_EXTENDED(mat)) { 1860 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1861 } 1862 mat = SCTP_BUF_NEXT(mat); 1863 } 1864 } 1865 #endif 1866 } else { 1867 /* We can steal the last chunk */ 1868 int l_len; 1869 1870 dmbuf = *m; 1871 /* lop off the top part */ 1872 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1873 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1874 l_len = SCTP_BUF_LEN(dmbuf); 1875 } else { 1876 /* 1877 * need to count up the size hopefully does not hit 1878 * this to often :-0 1879 */ 1880 struct mbuf *lat; 1881 1882 l_len = 0; 1883 lat = dmbuf; 1884 while (lat) { 1885 l_len += SCTP_BUF_LEN(lat); 1886 lat = SCTP_BUF_NEXT(lat); 1887 } 1888 } 1889 if (l_len > the_len) { 1890 /* Trim the end round bytes off too */ 1891 m_adj(dmbuf, -(l_len - the_len)); 1892 } 1893 } 1894 if (dmbuf == NULL) { 1895 SCTP_STAT_INCR(sctps_nomem); 1896 return (0); 1897 } 1898 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1899 asoc->fragmented_delivery_inprogress == 0 && 1900 TAILQ_EMPTY(&asoc->resetHead) && 1901 ((ordered == 0) || 1902 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1903 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1904 /* Candidate for express delivery */ 1905 /* 1906 * Its not fragmented, No PD-API is up, Nothing in the 1907 * delivery queue, Its un-ordered OR ordered and the next to 1908 * deliver AND nothing else is stuck on the stream queue, 1909 * And there is room for it in the socket buffer. Lets just 1910 * stuff it up the buffer.... 1911 */ 1912 1913 /* It would be nice to avoid this copy if we could :< */ 1914 sctp_alloc_a_readq(stcb, control); 1915 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1916 protocol_id, 1917 stcb->asoc.context, 1918 strmno, strmseq, 1919 chunk_flags, 1920 dmbuf); 1921 if (control == NULL) { 1922 goto failed_express_del; 1923 } 1924 sctp_add_to_readq(stcb->sctp_ep, stcb, 1925 control, &stcb->sctp_socket->so_rcv, 1926 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1927 1928 /* 1929 * EY here I should check if this delivered tsn is 1930 * out_of_order, if yes then update the nr_map 1931 */ 1932 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) { 1933 /* 1934 * EY check if the mapping_array and nr_mapping 1935 * array are consistent 1936 */ 1937 if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn) 1938 /* 1939 * printf("EY-IN 1940 * sctp_process_a_data_chunk(5): Something 1941 * is wrong the map base tsn" "\nEY-and 1942 * nr_map base tsn should be equal."); 1943 */ 1944 /* EY debugging block */ 1945 { 1946 /* 1947 * printf("\nEY-Calculating an 1948 * nr_gap!!\nmapping_array_size = %d 1949 * nr_mapping_array_size = %d" 1950 * "\nEY-mapping_array_base = %d 1951 * nr_mapping_array_base = 1952 * %d\nEY-highest_tsn_inside_map = %d" 1953 * "highest_tsn_inside_nr_map = %d\nEY-TSN = 1954 * %d nr_gap = %d",asoc->mapping_array_size, 1955 * asoc->nr_mapping_array_size, 1956 * asoc->mapping_array_base_tsn, 1957 * asoc->nr_mapping_array_base_tsn, 1958 * asoc->highest_tsn_inside_map, 1959 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap 1960 * ); 1961 */ 1962 } 1963 /* EY - not %100 sure about the lock thing */ 1964 SCTP_TCB_LOCK_ASSERT(stcb); 1965 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 1966 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc); 1967 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) 1968 asoc->highest_tsn_inside_nr_map = tsn; 1969 } 1970 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1971 /* for ordered, bump what we delivered */ 1972 asoc->strmin[strmno].last_sequence_delivered++; 1973 } 1974 SCTP_STAT_INCR(sctps_recvexpress); 1975 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1976 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1977 SCTP_STR_LOG_FROM_EXPRS_DEL); 1978 } 1979 control = NULL; 1980 goto finish_express_del; 1981 } 1982 failed_express_del: 1983 /* If we reach here this is a new chunk */ 1984 chk = NULL; 1985 control = NULL; 1986 /* Express for fragmented delivery? */ 1987 if ((asoc->fragmented_delivery_inprogress) && 1988 (stcb->asoc.control_pdapi) && 1989 (asoc->str_of_pdapi == strmno) && 1990 (asoc->ssn_of_pdapi == strmseq) 1991 ) { 1992 control = stcb->asoc.control_pdapi; 1993 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1994 /* Can't be another first? */ 1995 goto failed_pdapi_express_del; 1996 } 1997 if (tsn == (control->sinfo_tsn + 1)) { 1998 /* Yep, we can add it on */ 1999 int end = 0; 2000 uint32_t cumack; 2001 2002 if (chunk_flags & SCTP_DATA_LAST_FRAG) { 2003 end = 1; 2004 } 2005 cumack = asoc->cumulative_tsn; 2006 if ((cumack + 1) == tsn) 2007 cumack = tsn; 2008 2009 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 2010 tsn, 2011 &stcb->sctp_socket->so_rcv)) { 2012 SCTP_PRINTF("Append fails end:%d\n", end); 2013 goto failed_pdapi_express_del; 2014 } 2015 /* 2016 * EY It is appended to the read queue in prev if 2017 * block here I should check if this delivered tsn 2018 * is out_of_order, if yes then update the nr_map 2019 */ 2020 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 2021 asoc->peer_supports_nr_sack) { 2022 /* EY debugging block */ 2023 { 2024 /* 2025 * printf("\nEY-Calculating an 2026 * nr_gap!!\nEY-mapping_array_size = 2027 * %d nr_mapping_array_size = %d" 2028 * "\nEY-mapping_array_base = %d 2029 * nr_mapping_array_base = 2030 * %d\nEY-highest_tsn_inside_map = 2031 * %d" "highest_tsn_inside_nr_map = 2032 * %d\nEY-TSN = %d nr_gap = 2033 * %d",asoc->mapping_array_size, 2034 * asoc->nr_mapping_array_size, 2035 * asoc->mapping_array_base_tsn, 2036 * asoc->nr_mapping_array_base_tsn, 2037 * asoc->highest_tsn_inside_map, 2038 * asoc->highest_tsn_inside_nr_map,ts 2039 * n,nr_gap); 2040 */ 2041 } 2042 /* EY - not %100 sure about the lock thing */ 2043 SCTP_TCB_LOCK_ASSERT(stcb); 2044 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 2045 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc); 2046 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) 2047 asoc->highest_tsn_inside_nr_map = tsn; 2048 } 2049 SCTP_STAT_INCR(sctps_recvexpressm); 2050 control->sinfo_tsn = tsn; 2051 asoc->tsn_last_delivered = tsn; 2052 asoc->fragment_flags = chunk_flags; 2053 asoc->tsn_of_pdapi_last_delivered = tsn; 2054 asoc->last_flags_delivered = chunk_flags; 2055 asoc->last_strm_seq_delivered = strmseq; 2056 asoc->last_strm_no_delivered = strmno; 2057 if (end) { 2058 /* clean up the flags and such */ 2059 asoc->fragmented_delivery_inprogress = 0; 2060 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 2061 asoc->strmin[strmno].last_sequence_delivered++; 2062 } 2063 stcb->asoc.control_pdapi = NULL; 2064 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 2065 /* 2066 * There could be another message 2067 * ready 2068 */ 2069 need_reasm_check = 1; 2070 } 2071 } 2072 control = NULL; 2073 goto finish_express_del; 2074 } 2075 } 2076 failed_pdapi_express_del: 2077 control = NULL; 2078 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2079 sctp_alloc_a_chunk(stcb, chk); 2080 if (chk == NULL) { 2081 /* No memory so we drop the chunk */ 2082 SCTP_STAT_INCR(sctps_nomem); 2083 if (last_chunk == 0) { 2084 /* we copied it, free the copy */ 2085 sctp_m_freem(dmbuf); 2086 } 2087 return (0); 2088 } 2089 chk->rec.data.TSN_seq = tsn; 2090 chk->no_fr_allowed = 0; 2091 chk->rec.data.stream_seq = strmseq; 2092 chk->rec.data.stream_number = strmno; 2093 chk->rec.data.payloadtype = protocol_id; 2094 chk->rec.data.context = stcb->asoc.context; 2095 chk->rec.data.doing_fast_retransmit = 0; 2096 chk->rec.data.rcv_flags = chunk_flags; 2097 chk->asoc = asoc; 2098 chk->send_size = the_len; 2099 chk->whoTo = net; 2100 atomic_add_int(&net->ref_count, 1); 2101 chk->data = dmbuf; 2102 } else { 2103 sctp_alloc_a_readq(stcb, control); 2104 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2105 protocol_id, 2106 stcb->asoc.context, 2107 strmno, strmseq, 2108 chunk_flags, 2109 dmbuf); 2110 if (control == NULL) { 2111 /* No memory so we drop the chunk */ 2112 SCTP_STAT_INCR(sctps_nomem); 2113 if (last_chunk == 0) { 2114 /* we copied it, free the copy */ 2115 sctp_m_freem(dmbuf); 2116 } 2117 return (0); 2118 } 2119 control->length = the_len; 2120 } 2121 2122 /* Mark it as received */ 2123 /* Now queue it where it belongs */ 2124 if (control != NULL) { 2125 /* First a sanity check */ 2126 if (asoc->fragmented_delivery_inprogress) { 2127 /* 2128 * Ok, we have a fragmented delivery in progress if 2129 * this chunk is next to deliver OR belongs in our 2130 * view to the reassembly, the peer is evil or 2131 * broken. 2132 */ 2133 uint32_t estimate_tsn; 2134 2135 estimate_tsn = asoc->tsn_last_delivered + 1; 2136 if (TAILQ_EMPTY(&asoc->reasmqueue) && 2137 (estimate_tsn == control->sinfo_tsn)) { 2138 /* Evil/Broke peer */ 2139 sctp_m_freem(control->data); 2140 control->data = NULL; 2141 if (control->whoFrom) { 2142 sctp_free_remote_addr(control->whoFrom); 2143 control->whoFrom = NULL; 2144 } 2145 sctp_free_a_readq(stcb, control); 2146 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 2147 0, M_DONTWAIT, 1, MT_DATA); 2148 if (oper) { 2149 struct sctp_paramhdr *ph; 2150 uint32_t *ippp; 2151 2152 SCTP_BUF_LEN(oper) = 2153 sizeof(struct sctp_paramhdr) + 2154 (3 * sizeof(uint32_t)); 2155 ph = mtod(oper, struct sctp_paramhdr *); 2156 ph->param_type = 2157 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2158 ph->param_length = htons(SCTP_BUF_LEN(oper)); 2159 ippp = (uint32_t *) (ph + 1); 2160 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 2161 ippp++; 2162 *ippp = tsn; 2163 ippp++; 2164 *ippp = ((strmno << 16) | strmseq); 2165 } 2166 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 2167 sctp_abort_an_association(stcb->sctp_ep, stcb, 2168 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 2169 2170 *abort_flag = 1; 2171 return (0); 2172 } else { 2173 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 2174 sctp_m_freem(control->data); 2175 control->data = NULL; 2176 if (control->whoFrom) { 2177 sctp_free_remote_addr(control->whoFrom); 2178 control->whoFrom = NULL; 2179 } 2180 sctp_free_a_readq(stcb, control); 2181 2182 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 2183 0, M_DONTWAIT, 1, MT_DATA); 2184 if (oper) { 2185 struct sctp_paramhdr *ph; 2186 uint32_t *ippp; 2187 2188 SCTP_BUF_LEN(oper) = 2189 sizeof(struct sctp_paramhdr) + 2190 (3 * sizeof(uint32_t)); 2191 ph = mtod(oper, 2192 struct sctp_paramhdr *); 2193 ph->param_type = 2194 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2195 ph->param_length = 2196 htons(SCTP_BUF_LEN(oper)); 2197 ippp = (uint32_t *) (ph + 1); 2198 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 2199 ippp++; 2200 *ippp = tsn; 2201 ippp++; 2202 *ippp = ((strmno << 16) | strmseq); 2203 } 2204 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 2205 sctp_abort_an_association(stcb->sctp_ep, 2206 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 2207 2208 *abort_flag = 1; 2209 return (0); 2210 } 2211 } 2212 } else { 2213 /* No PDAPI running */ 2214 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 2215 /* 2216 * Reassembly queue is NOT empty validate 2217 * that this tsn does not need to be in 2218 * reasembly queue. If it does then our peer 2219 * is broken or evil. 2220 */ 2221 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 2222 sctp_m_freem(control->data); 2223 control->data = NULL; 2224 if (control->whoFrom) { 2225 sctp_free_remote_addr(control->whoFrom); 2226 control->whoFrom = NULL; 2227 } 2228 sctp_free_a_readq(stcb, control); 2229 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 2230 0, M_DONTWAIT, 1, MT_DATA); 2231 if (oper) { 2232 struct sctp_paramhdr *ph; 2233 uint32_t *ippp; 2234 2235 SCTP_BUF_LEN(oper) = 2236 sizeof(struct sctp_paramhdr) + 2237 (3 * sizeof(uint32_t)); 2238 ph = mtod(oper, 2239 struct sctp_paramhdr *); 2240 ph->param_type = 2241 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2242 ph->param_length = 2243 htons(SCTP_BUF_LEN(oper)); 2244 ippp = (uint32_t *) (ph + 1); 2245 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2246 ippp++; 2247 *ippp = tsn; 2248 ippp++; 2249 *ippp = ((strmno << 16) | strmseq); 2250 } 2251 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 2252 sctp_abort_an_association(stcb->sctp_ep, 2253 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 2254 2255 *abort_flag = 1; 2256 return (0); 2257 } 2258 } 2259 } 2260 /* ok, if we reach here we have passed the sanity checks */ 2261 if (chunk_flags & SCTP_DATA_UNORDERED) { 2262 /* queue directly into socket buffer */ 2263 sctp_add_to_readq(stcb->sctp_ep, stcb, 2264 control, 2265 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2266 /* 2267 * EY It is added to the read queue in prev if block 2268 * here I should check if this delivered tsn is 2269 * out_of_order, if yes then update the nr_map 2270 */ 2271 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 2272 asoc->peer_supports_nr_sack) { 2273 /* 2274 * EY check if the mapping_array and 2275 * nr_mapping array are consistent 2276 */ 2277 if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn) 2278 /* 2279 * printf("EY-IN 2280 * sctp_process_a_data_chunk(6): 2281 * Something is wrong the map base 2282 * tsn" "\nEY-and nr_map base tsn 2283 * should be equal."); 2284 */ 2285 /* 2286 * EY - not %100 sure about the lock 2287 * thing, i think we don't need the 2288 * below, 2289 */ 2290 /* SCTP_TCB_LOCK_ASSERT(stcb); */ 2291 { 2292 /* 2293 * printf("\nEY-Calculating an 2294 * nr_gap!!\nEY-mapping_array_size = 2295 * %d nr_mapping_array_size = %d" 2296 * "\nEY-mapping_array_base = %d 2297 * nr_mapping_array_base = 2298 * %d\nEY-highest_tsn_inside_map = 2299 * %d" "highest_tsn_inside_nr_map = 2300 * %d\nEY-TSN = %d nr_gap = 2301 * %d",asoc->mapping_array_size, 2302 * asoc->nr_mapping_array_size, 2303 * asoc->mapping_array_base_tsn, 2304 * asoc->nr_mapping_array_base_tsn, 2305 * asoc->highest_tsn_inside_map, 2306 * asoc->highest_tsn_inside_nr_map,ts 2307 * n,nr_gap); 2308 */ 2309 } 2310 SCTP_TCB_LOCK_ASSERT(stcb); 2311 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 2312 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc); 2313 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) 2314 asoc->highest_tsn_inside_nr_map = tsn; 2315 } 2316 } else { 2317 /* 2318 * Special check for when streams are resetting. We 2319 * could be more smart about this and check the 2320 * actual stream to see if it is not being reset.. 2321 * that way we would not create a HOLB when amongst 2322 * streams being reset and those not being reset. 2323 * 2324 * We take complete messages that have a stream reset 2325 * intervening (aka the TSN is after where our 2326 * cum-ack needs to be) off and put them on a 2327 * pending_reply_queue. The reassembly ones we do 2328 * not have to worry about since they are all sorted 2329 * and proceessed by TSN order. It is only the 2330 * singletons I must worry about. 2331 */ 2332 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2333 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN))) 2334 ) { 2335 /* 2336 * yep its past where we need to reset... go 2337 * ahead and queue it. 2338 */ 2339 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2340 /* first one on */ 2341 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2342 } else { 2343 struct sctp_queued_to_read *ctlOn; 2344 unsigned char inserted = 0; 2345 2346 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2347 while (ctlOn) { 2348 if (compare_with_wrap(control->sinfo_tsn, 2349 ctlOn->sinfo_tsn, MAX_TSN)) { 2350 ctlOn = TAILQ_NEXT(ctlOn, next); 2351 } else { 2352 /* found it */ 2353 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2354 inserted = 1; 2355 break; 2356 } 2357 } 2358 if (inserted == 0) { 2359 /* 2360 * must be put at end, use 2361 * prevP (all setup from 2362 * loop) to setup nextP. 2363 */ 2364 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2365 } 2366 } 2367 } else { 2368 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2369 if (*abort_flag) { 2370 return (0); 2371 } 2372 } 2373 } 2374 } else { 2375 /* Into the re-assembly queue */ 2376 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2377 if (*abort_flag) { 2378 /* 2379 * the assoc is now gone and chk was put onto the 2380 * reasm queue, which has all been freed. 2381 */ 2382 *m = NULL; 2383 return (0); 2384 } 2385 } 2386 finish_express_del: 2387 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2388 /* we have a new high score */ 2389 asoc->highest_tsn_inside_map = tsn; 2390 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2391 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2392 } 2393 } 2394 if (tsn == (asoc->cumulative_tsn + 1)) { 2395 /* Update cum-ack */ 2396 asoc->cumulative_tsn = tsn; 2397 } 2398 if (last_chunk) { 2399 *m = NULL; 2400 } 2401 if (ordered) { 2402 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2403 } else { 2404 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2405 } 2406 SCTP_STAT_INCR(sctps_recvdata); 2407 /* Set it present please */ 2408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2409 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2410 } 2411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2412 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2413 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2414 } 2415 SCTP_TCB_LOCK_ASSERT(stcb); 2416 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2417 2418 /* 2419 * EY - set tsn present in nr-map if doing nr-sacks and the tsn is 2420 * non-renegable 2421 */ 2422 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 2423 asoc->peer_supports_nr_sack && 2424 (SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) { 2425 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2426 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc); 2427 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) { 2428 asoc->highest_tsn_inside_nr_map = tsn; 2429 } 2430 } 2431 /* check the special flag for stream resets */ 2432 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2433 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2434 (asoc->cumulative_tsn == liste->tsn)) 2435 ) { 2436 /* 2437 * we have finished working through the backlogged TSN's now 2438 * time to reset streams. 1: call reset function. 2: free 2439 * pending_reply space 3: distribute any chunks in 2440 * pending_reply_queue. 2441 */ 2442 struct sctp_queued_to_read *ctl; 2443 2444 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2445 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2446 SCTP_FREE(liste, SCTP_M_STRESET); 2447 /* sa_ignore FREED_MEMORY */ 2448 liste = TAILQ_FIRST(&asoc->resetHead); 2449 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2450 if (ctl && (liste == NULL)) { 2451 /* All can be removed */ 2452 while (ctl) { 2453 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2454 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2455 if (*abort_flag) { 2456 return (0); 2457 } 2458 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2459 } 2460 } else if (ctl) { 2461 /* more than one in queue */ 2462 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2463 /* 2464 * if ctl->sinfo_tsn is <= liste->tsn we can 2465 * process it which is the NOT of 2466 * ctl->sinfo_tsn > liste->tsn 2467 */ 2468 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2469 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2470 if (*abort_flag) { 2471 return (0); 2472 } 2473 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2474 } 2475 } 2476 /* 2477 * Now service re-assembly to pick up anything that has been 2478 * held on reassembly queue? 2479 */ 2480 sctp_deliver_reasm_check(stcb, asoc); 2481 need_reasm_check = 0; 2482 } 2483 if (need_reasm_check) { 2484 /* Another one waits ? */ 2485 sctp_deliver_reasm_check(stcb, asoc); 2486 } 2487 return (1); 2488 } 2489 2490 int8_t sctp_map_lookup_tab[256] = { 2491 -1, 0, -1, 1, -1, 0, -1, 2, 2492 -1, 0, -1, 1, -1, 0, -1, 3, 2493 -1, 0, -1, 1, -1, 0, -1, 2, 2494 -1, 0, -1, 1, -1, 0, -1, 4, 2495 -1, 0, -1, 1, -1, 0, -1, 2, 2496 -1, 0, -1, 1, -1, 0, -1, 3, 2497 -1, 0, -1, 1, -1, 0, -1, 2, 2498 -1, 0, -1, 1, -1, 0, -1, 5, 2499 -1, 0, -1, 1, -1, 0, -1, 2, 2500 -1, 0, -1, 1, -1, 0, -1, 3, 2501 -1, 0, -1, 1, -1, 0, -1, 2, 2502 -1, 0, -1, 1, -1, 0, -1, 4, 2503 -1, 0, -1, 1, -1, 0, -1, 2, 2504 -1, 0, -1, 1, -1, 0, -1, 3, 2505 -1, 0, -1, 1, -1, 0, -1, 2, 2506 -1, 0, -1, 1, -1, 0, -1, 6, 2507 -1, 0, -1, 1, -1, 0, -1, 2, 2508 -1, 0, -1, 1, -1, 0, -1, 3, 2509 -1, 0, -1, 1, -1, 0, -1, 2, 2510 -1, 0, -1, 1, -1, 0, -1, 4, 2511 -1, 0, -1, 1, -1, 0, -1, 2, 2512 -1, 0, -1, 1, -1, 0, -1, 3, 2513 -1, 0, -1, 1, -1, 0, -1, 2, 2514 -1, 0, -1, 1, -1, 0, -1, 5, 2515 -1, 0, -1, 1, -1, 0, -1, 2, 2516 -1, 0, -1, 1, -1, 0, -1, 3, 2517 -1, 0, -1, 1, -1, 0, -1, 2, 2518 -1, 0, -1, 1, -1, 0, -1, 4, 2519 -1, 0, -1, 1, -1, 0, -1, 2, 2520 -1, 0, -1, 1, -1, 0, -1, 3, 2521 -1, 0, -1, 1, -1, 0, -1, 2, 2522 -1, 0, -1, 1, -1, 0, -1, 7, 2523 }; 2524 2525 2526 void 2527 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2528 { 2529 /* 2530 * Now we also need to check the mapping array in a couple of ways. 2531 * 1) Did we move the cum-ack point? 2532 */ 2533 struct sctp_association *asoc; 2534 int at; 2535 uint8_t comb_byte; 2536 int last_all_ones = 0; 2537 int slide_from, slide_end, lgap, distance; 2538 2539 /* EY nr_mapping array variables */ 2540 /* int nr_at; */ 2541 /* int nr_last_all_ones = 0; */ 2542 /* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */ 2543 2544 uint32_t old_cumack, old_base, old_highest; 2545 unsigned char aux_array[64]; 2546 2547 /* 2548 * EY! Don't think this is required but I am immitating the code for 2549 * map just to make sure 2550 */ 2551 unsigned char nr_aux_array[64]; 2552 2553 asoc = &stcb->asoc; 2554 at = 0; 2555 2556 old_cumack = asoc->cumulative_tsn; 2557 old_base = asoc->mapping_array_base_tsn; 2558 old_highest = asoc->highest_tsn_inside_map; 2559 if (asoc->mapping_array_size < 64) 2560 memcpy(aux_array, asoc->mapping_array, 2561 asoc->mapping_array_size); 2562 else 2563 memcpy(aux_array, asoc->mapping_array, 64); 2564 /* EY do the same for nr_mapping_array */ 2565 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) { 2566 if (asoc->nr_mapping_array_size != asoc->mapping_array_size) { 2567 /* 2568 * printf("\nEY-IN sack_check method: \nEY-" "The 2569 * size of map and nr_map are inconsitent") 2570 */ ; 2571 } 2572 if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) { 2573 /* 2574 * printf("\nEY-IN sack_check method VERY CRUCIAL 2575 * error: \nEY-" "The base tsns of map and nr_map 2576 * are inconsitent") 2577 */ ; 2578 } 2579 /* EY! just immitating the above code */ 2580 if (asoc->nr_mapping_array_size < 64) 2581 memcpy(nr_aux_array, asoc->nr_mapping_array, 2582 asoc->nr_mapping_array_size); 2583 else 2584 memcpy(aux_array, asoc->nr_mapping_array, 64); 2585 } 2586 /* 2587 * We could probably improve this a small bit by calculating the 2588 * offset of the current cum-ack as the starting point. 2589 */ 2590 at = 0; 2591 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2592 /* 2593 * We must combine the renegable and non-renegable arrays 2594 * here to form a unified view of what is acked right now 2595 * (since they are kept separate 2596 */ 2597 comb_byte = asoc->mapping_array[slide_from] | asoc->nr_mapping_array[slide_from]; 2598 if (comb_byte == 0xff) { 2599 at += 8; 2600 last_all_ones = 1; 2601 } else { 2602 /* there is a 0 bit */ 2603 at += sctp_map_lookup_tab[comb_byte]; 2604 last_all_ones = 0; 2605 break; 2606 } 2607 } 2608 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones); 2609 /* at is one off, since in the table a embedded -1 is present */ 2610 at++; 2611 2612 if (compare_with_wrap(asoc->cumulative_tsn, 2613 asoc->highest_tsn_inside_map, 2614 MAX_TSN)) { 2615 #ifdef INVARIANTS 2616 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2617 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2618 #else 2619 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2620 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2621 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2622 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2623 } 2624 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2625 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2626 #endif 2627 } 2628 if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) { 2629 /* The complete array was completed by a single FR */ 2630 /* higest becomes the cum-ack */ 2631 int clr; 2632 2633 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2634 /* clear the array */ 2635 clr = (at >> 3) + 1; 2636 if (clr > asoc->mapping_array_size) { 2637 clr = asoc->mapping_array_size; 2638 } 2639 memset(asoc->mapping_array, 0, clr); 2640 /* base becomes one ahead of the cum-ack */ 2641 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2642 2643 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) { 2644 2645 if (clr > asoc->nr_mapping_array_size) 2646 clr = asoc->nr_mapping_array_size; 2647 2648 memset(asoc->nr_mapping_array, 0, clr); 2649 /* base becomes one ahead of the cum-ack */ 2650 asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2651 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2652 } 2653 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2654 sctp_log_map(old_base, old_cumack, old_highest, 2655 SCTP_MAP_PREPARE_SLIDE); 2656 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2657 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2658 } 2659 } else if (at >= 8) { 2660 /* we can slide the mapping array down */ 2661 /* slide_from holds where we hit the first NON 0xff byte */ 2662 2663 /* 2664 * now calculate the ceiling of the move using our highest 2665 * TSN value 2666 */ 2667 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2668 lgap = asoc->highest_tsn_inside_map - 2669 asoc->mapping_array_base_tsn; 2670 } else { 2671 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2672 asoc->highest_tsn_inside_map + 1; 2673 } 2674 slide_end = lgap >> 3; 2675 if (slide_end < slide_from) { 2676 #ifdef INVARIANTS 2677 panic("impossible slide"); 2678 #else 2679 printf("impossible slide?\n"); 2680 return; 2681 #endif 2682 } 2683 if (slide_end > asoc->mapping_array_size) { 2684 #ifdef INVARIANTS 2685 panic("would overrun buffer"); 2686 #else 2687 printf("Gak, would have overrun map end:%d slide_end:%d\n", 2688 asoc->mapping_array_size, slide_end); 2689 slide_end = asoc->mapping_array_size; 2690 #endif 2691 } 2692 distance = (slide_end - slide_from) + 1; 2693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2694 sctp_log_map(old_base, old_cumack, old_highest, 2695 SCTP_MAP_PREPARE_SLIDE); 2696 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2697 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2698 } 2699 if (distance + slide_from > asoc->mapping_array_size || 2700 distance < 0) { 2701 /* 2702 * Here we do NOT slide forward the array so that 2703 * hopefully when more data comes in to fill it up 2704 * we will be able to slide it forward. Really I 2705 * don't think this should happen :-0 2706 */ 2707 2708 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2709 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2710 (uint32_t) asoc->mapping_array_size, 2711 SCTP_MAP_SLIDE_NONE); 2712 } 2713 } else { 2714 int ii; 2715 2716 for (ii = 0; ii < distance; ii++) { 2717 asoc->mapping_array[ii] = 2718 asoc->mapping_array[slide_from + ii]; 2719 } 2720 for (ii = distance; ii <= slide_end; ii++) { 2721 asoc->mapping_array[ii] = 0; 2722 } 2723 asoc->mapping_array_base_tsn += (slide_from << 3); 2724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2725 sctp_log_map(asoc->mapping_array_base_tsn, 2726 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2727 SCTP_MAP_SLIDE_RESULT); 2728 } 2729 /* 2730 * EY if doing nr_sacks then slide the 2731 * nr_mapping_array accordingly please 2732 */ 2733 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) { 2734 for (ii = 0; ii < distance; ii++) { 2735 asoc->nr_mapping_array[ii] = 2736 asoc->nr_mapping_array[slide_from + ii]; 2737 } 2738 for (ii = distance; ii <= slide_end; ii++) { 2739 asoc->nr_mapping_array[ii] = 0; 2740 } 2741 asoc->nr_mapping_array_base_tsn += (slide_from << 3); 2742 } 2743 } 2744 } 2745 /* 2746 * Now we need to see if we need to queue a sack or just start the 2747 * timer (if allowed). 2748 */ 2749 if (ok_to_sack) { 2750 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2751 /* 2752 * Ok special case, in SHUTDOWN-SENT case. here we 2753 * maker sure SACK timer is off and instead send a 2754 * SHUTDOWN and a SACK 2755 */ 2756 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2757 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2758 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2759 } 2760 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2761 /* 2762 * EY if nr_sacks used then send an nr-sack , a sack 2763 * otherwise 2764 */ 2765 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) 2766 sctp_send_nr_sack(stcb); 2767 else 2768 sctp_send_sack(stcb); 2769 } else { 2770 int is_a_gap; 2771 2772 /* is there a gap now ? */ 2773 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2774 stcb->asoc.cumulative_tsn, MAX_TSN); 2775 2776 /* 2777 * CMT DAC algorithm: increase number of packets 2778 * received since last ack 2779 */ 2780 stcb->asoc.cmt_dac_pkts_rcvd++; 2781 2782 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2783 * SACK */ 2784 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2785 * longer is one */ 2786 (stcb->asoc.numduptsns) || /* we have dup's */ 2787 (is_a_gap) || /* is still a gap */ 2788 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2789 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2790 ) { 2791 2792 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) && 2793 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2794 (stcb->asoc.send_sack == 0) && 2795 (stcb->asoc.numduptsns == 0) && 2796 (stcb->asoc.delayed_ack) && 2797 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2798 2799 /* 2800 * CMT DAC algorithm: With CMT, 2801 * delay acks even in the face of 2802 * 2803 * reordering. Therefore, if acks that 2804 * do not have to be sent because of 2805 * the above reasons, will be 2806 * delayed. That is, acks that would 2807 * have been sent due to gap reports 2808 * will be delayed with DAC. Start 2809 * the delayed ack timer. 2810 */ 2811 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2812 stcb->sctp_ep, stcb, NULL); 2813 } else { 2814 /* 2815 * Ok we must build a SACK since the 2816 * timer is pending, we got our 2817 * first packet OR there are gaps or 2818 * duplicates. 2819 */ 2820 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2821 /* 2822 * EY if nr_sacks used then send an 2823 * nr-sack , a sack otherwise 2824 */ 2825 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) 2826 sctp_send_nr_sack(stcb); 2827 else 2828 sctp_send_sack(stcb); 2829 } 2830 } else { 2831 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2832 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2833 stcb->sctp_ep, stcb, NULL); 2834 } 2835 } 2836 } 2837 } 2838 } 2839 2840 void 2841 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2842 { 2843 struct sctp_tmit_chunk *chk; 2844 uint32_t tsize, pd_point; 2845 uint16_t nxt_todel; 2846 2847 if (asoc->fragmented_delivery_inprogress) { 2848 sctp_service_reassembly(stcb, asoc); 2849 } 2850 /* Can we proceed further, i.e. the PD-API is complete */ 2851 if (asoc->fragmented_delivery_inprogress) { 2852 /* no */ 2853 return; 2854 } 2855 /* 2856 * Now is there some other chunk I can deliver from the reassembly 2857 * queue. 2858 */ 2859 doit_again: 2860 chk = TAILQ_FIRST(&asoc->reasmqueue); 2861 if (chk == NULL) { 2862 asoc->size_on_reasm_queue = 0; 2863 asoc->cnt_on_reasm_queue = 0; 2864 return; 2865 } 2866 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2867 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2868 ((nxt_todel == chk->rec.data.stream_seq) || 2869 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2870 /* 2871 * Yep the first one is here. We setup to start reception, 2872 * by backing down the TSN just in case we can't deliver. 2873 */ 2874 2875 /* 2876 * Before we start though either all of the message should 2877 * be here or the socket buffer max or nothing on the 2878 * delivery queue and something can be delivered. 2879 */ 2880 if (stcb->sctp_socket) { 2881 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 2882 stcb->sctp_ep->partial_delivery_point); 2883 } else { 2884 pd_point = stcb->sctp_ep->partial_delivery_point; 2885 } 2886 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 2887 asoc->fragmented_delivery_inprogress = 1; 2888 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2889 asoc->str_of_pdapi = chk->rec.data.stream_number; 2890 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2891 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2892 asoc->fragment_flags = chk->rec.data.rcv_flags; 2893 sctp_service_reassembly(stcb, asoc); 2894 if (asoc->fragmented_delivery_inprogress == 0) { 2895 goto doit_again; 2896 } 2897 } 2898 } 2899 } 2900 2901 int 2902 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2903 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2904 struct sctp_nets *net, uint32_t * high_tsn) 2905 { 2906 struct sctp_data_chunk *ch, chunk_buf; 2907 struct sctp_association *asoc; 2908 int num_chunks = 0; /* number of control chunks processed */ 2909 int stop_proc = 0; 2910 int chk_length, break_flag, last_chunk; 2911 int abort_flag = 0, was_a_gap = 0; 2912 struct mbuf *m; 2913 2914 /* set the rwnd */ 2915 sctp_set_rwnd(stcb, &stcb->asoc); 2916 2917 m = *mm; 2918 SCTP_TCB_LOCK_ASSERT(stcb); 2919 asoc = &stcb->asoc; 2920 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2921 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2922 /* there was a gap before this data was processed */ 2923 was_a_gap = 1; 2924 } 2925 /* 2926 * setup where we got the last DATA packet from for any SACK that 2927 * may need to go out. Don't bump the net. This is done ONLY when a 2928 * chunk is assigned. 2929 */ 2930 asoc->last_data_chunk_from = net; 2931 2932 /*- 2933 * Now before we proceed we must figure out if this is a wasted 2934 * cluster... i.e. it is a small packet sent in and yet the driver 2935 * underneath allocated a full cluster for it. If so we must copy it 2936 * to a smaller mbuf and free up the cluster mbuf. This will help 2937 * with cluster starvation. Note for __Panda__ we don't do this 2938 * since it has clusters all the way down to 64 bytes. 2939 */ 2940 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2941 /* we only handle mbufs that are singletons.. not chains */ 2942 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA); 2943 if (m) { 2944 /* ok lets see if we can copy the data up */ 2945 caddr_t *from, *to; 2946 2947 /* get the pointers and copy */ 2948 to = mtod(m, caddr_t *); 2949 from = mtod((*mm), caddr_t *); 2950 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2951 /* copy the length and free up the old */ 2952 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2953 sctp_m_freem(*mm); 2954 /* sucess, back copy */ 2955 *mm = m; 2956 } else { 2957 /* We are in trouble in the mbuf world .. yikes */ 2958 m = *mm; 2959 } 2960 } 2961 /* get pointer to the first chunk header */ 2962 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2963 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2964 if (ch == NULL) { 2965 return (1); 2966 } 2967 /* 2968 * process all DATA chunks... 2969 */ 2970 *high_tsn = asoc->cumulative_tsn; 2971 break_flag = 0; 2972 asoc->data_pkts_seen++; 2973 while (stop_proc == 0) { 2974 /* validate chunk length */ 2975 chk_length = ntohs(ch->ch.chunk_length); 2976 if (length - *offset < chk_length) { 2977 /* all done, mutulated chunk */ 2978 stop_proc = 1; 2979 break; 2980 } 2981 if (ch->ch.chunk_type == SCTP_DATA) { 2982 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2983 /* 2984 * Need to send an abort since we had a 2985 * invalid data chunk. 2986 */ 2987 struct mbuf *op_err; 2988 2989 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 2990 0, M_DONTWAIT, 1, MT_DATA); 2991 2992 if (op_err) { 2993 struct sctp_paramhdr *ph; 2994 uint32_t *ippp; 2995 2996 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 2997 (2 * sizeof(uint32_t)); 2998 ph = mtod(op_err, struct sctp_paramhdr *); 2999 ph->param_type = 3000 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 3001 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 3002 ippp = (uint32_t *) (ph + 1); 3003 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 3004 ippp++; 3005 *ippp = asoc->cumulative_tsn; 3006 3007 } 3008 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 3009 sctp_abort_association(inp, stcb, m, iphlen, sh, 3010 op_err, 0, net->port); 3011 return (2); 3012 } 3013 #ifdef SCTP_AUDITING_ENABLED 3014 sctp_audit_log(0xB1, 0); 3015 #endif 3016 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 3017 last_chunk = 1; 3018 } else { 3019 last_chunk = 0; 3020 } 3021 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 3022 chk_length, net, high_tsn, &abort_flag, &break_flag, 3023 last_chunk)) { 3024 num_chunks++; 3025 } 3026 if (abort_flag) 3027 return (2); 3028 3029 if (break_flag) { 3030 /* 3031 * Set because of out of rwnd space and no 3032 * drop rep space left. 3033 */ 3034 stop_proc = 1; 3035 break; 3036 } 3037 } else { 3038 /* not a data chunk in the data region */ 3039 switch (ch->ch.chunk_type) { 3040 case SCTP_INITIATION: 3041 case SCTP_INITIATION_ACK: 3042 case SCTP_SELECTIVE_ACK: 3043 case SCTP_NR_SELECTIVE_ACK: /* EY */ 3044 case SCTP_HEARTBEAT_REQUEST: 3045 case SCTP_HEARTBEAT_ACK: 3046 case SCTP_ABORT_ASSOCIATION: 3047 case SCTP_SHUTDOWN: 3048 case SCTP_SHUTDOWN_ACK: 3049 case SCTP_OPERATION_ERROR: 3050 case SCTP_COOKIE_ECHO: 3051 case SCTP_COOKIE_ACK: 3052 case SCTP_ECN_ECHO: 3053 case SCTP_ECN_CWR: 3054 case SCTP_SHUTDOWN_COMPLETE: 3055 case SCTP_AUTHENTICATION: 3056 case SCTP_ASCONF_ACK: 3057 case SCTP_PACKET_DROPPED: 3058 case SCTP_STREAM_RESET: 3059 case SCTP_FORWARD_CUM_TSN: 3060 case SCTP_ASCONF: 3061 /* 3062 * Now, what do we do with KNOWN chunks that 3063 * are NOT in the right place? 3064 * 3065 * For now, I do nothing but ignore them. We 3066 * may later want to add sysctl stuff to 3067 * switch out and do either an ABORT() or 3068 * possibly process them. 3069 */ 3070 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) { 3071 struct mbuf *op_err; 3072 3073 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 3074 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port); 3075 return (2); 3076 } 3077 break; 3078 default: 3079 /* unknown chunk type, use bit rules */ 3080 if (ch->ch.chunk_type & 0x40) { 3081 /* Add a error report to the queue */ 3082 struct mbuf *merr; 3083 struct sctp_paramhdr *phd; 3084 3085 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA); 3086 if (merr) { 3087 phd = mtod(merr, struct sctp_paramhdr *); 3088 /* 3089 * We cheat and use param 3090 * type since we did not 3091 * bother to define a error 3092 * cause struct. They are 3093 * the same basic format 3094 * with different names. 3095 */ 3096 phd->param_type = 3097 htons(SCTP_CAUSE_UNRECOG_CHUNK); 3098 phd->param_length = 3099 htons(chk_length + sizeof(*phd)); 3100 SCTP_BUF_LEN(merr) = sizeof(*phd); 3101 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, 3102 SCTP_SIZE32(chk_length), 3103 M_DONTWAIT); 3104 if (SCTP_BUF_NEXT(merr)) { 3105 sctp_queue_op_err(stcb, merr); 3106 } else { 3107 sctp_m_freem(merr); 3108 } 3109 } 3110 } 3111 if ((ch->ch.chunk_type & 0x80) == 0) { 3112 /* discard the rest of this packet */ 3113 stop_proc = 1; 3114 } /* else skip this bad chunk and 3115 * continue... */ 3116 break; 3117 }; /* switch of chunk type */ 3118 } 3119 *offset += SCTP_SIZE32(chk_length); 3120 if ((*offset >= length) || stop_proc) { 3121 /* no more data left in the mbuf chain */ 3122 stop_proc = 1; 3123 continue; 3124 } 3125 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 3126 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 3127 if (ch == NULL) { 3128 *offset = length; 3129 stop_proc = 1; 3130 break; 3131 3132 } 3133 } /* while */ 3134 if (break_flag) { 3135 /* 3136 * we need to report rwnd overrun drops. 3137 */ 3138 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 3139 } 3140 if (num_chunks) { 3141 /* 3142 * Did we get data, if so update the time for auto-close and 3143 * give peer credit for being alive. 3144 */ 3145 SCTP_STAT_INCR(sctps_recvpktwithdata); 3146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 3147 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3148 stcb->asoc.overall_error_count, 3149 0, 3150 SCTP_FROM_SCTP_INDATA, 3151 __LINE__); 3152 } 3153 stcb->asoc.overall_error_count = 0; 3154 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 3155 } 3156 /* now service all of the reassm queue if needed */ 3157 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 3158 sctp_service_queues(stcb, asoc); 3159 3160 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 3161 /* Assure that we ack right away */ 3162 stcb->asoc.send_sack = 1; 3163 } 3164 /* Start a sack timer or QUEUE a SACK for sending */ 3165 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 3166 (stcb->asoc.mapping_array[0] != 0xff)) { 3167 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) || 3168 (stcb->asoc.delayed_ack == 0) || 3169 (stcb->asoc.numduptsns) || 3170 (stcb->asoc.send_sack == 1)) { 3171 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 3172 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 3173 } 3174 /* 3175 * EY if nr_sacks used then send an nr-sack , a sack 3176 * otherwise 3177 */ 3178 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack) 3179 sctp_send_nr_sack(stcb); 3180 else 3181 sctp_send_sack(stcb); 3182 } else { 3183 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 3184 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 3185 stcb->sctp_ep, stcb, NULL); 3186 } 3187 } 3188 } else { 3189 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 3190 } 3191 if (abort_flag) 3192 return (2); 3193 3194 return (0); 3195 } 3196 3197 static int 3198 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 3199 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 3200 int *num_frs, 3201 uint32_t * biggest_newly_acked_tsn, 3202 uint32_t * this_sack_lowest_newack, 3203 int *ecn_seg_sums) 3204 { 3205 struct sctp_tmit_chunk *tp1; 3206 unsigned int theTSN; 3207 int j, wake_him = 0; 3208 3209 /* Recover the tp1 we last saw */ 3210 tp1 = *p_tp1; 3211 if (tp1 == NULL) { 3212 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3213 } 3214 for (j = frag_strt; j <= frag_end; j++) { 3215 theTSN = j + last_tsn; 3216 while (tp1) { 3217 if (tp1->rec.data.doing_fast_retransmit) 3218 (*num_frs) += 1; 3219 3220 /*- 3221 * CMT: CUCv2 algorithm. For each TSN being 3222 * processed from the sent queue, track the 3223 * next expected pseudo-cumack, or 3224 * rtx_pseudo_cumack, if required. Separate 3225 * cumack trackers for first transmissions, 3226 * and retransmissions. 3227 */ 3228 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 3229 (tp1->snd_count == 1)) { 3230 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 3231 tp1->whoTo->find_pseudo_cumack = 0; 3232 } 3233 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 3234 (tp1->snd_count > 1)) { 3235 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 3236 tp1->whoTo->find_rtx_pseudo_cumack = 0; 3237 } 3238 if (tp1->rec.data.TSN_seq == theTSN) { 3239 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 3240 /*- 3241 * must be held until 3242 * cum-ack passes 3243 */ 3244 /*- 3245 * ECN Nonce: Add the nonce 3246 * value to the sender's 3247 * nonce sum 3248 */ 3249 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3250 /*- 3251 * If it is less than RESEND, it is 3252 * now no-longer in flight. 3253 * Higher values may already be set 3254 * via previous Gap Ack Blocks... 3255 * i.e. ACKED or RESEND. 3256 */ 3257 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3258 *biggest_newly_acked_tsn, MAX_TSN)) { 3259 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 3260 } 3261 /*- 3262 * CMT: SFR algo (and HTNA) - set 3263 * saw_newack to 1 for dest being 3264 * newly acked. update 3265 * this_sack_highest_newack if 3266 * appropriate. 3267 */ 3268 if (tp1->rec.data.chunk_was_revoked == 0) 3269 tp1->whoTo->saw_newack = 1; 3270 3271 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3272 tp1->whoTo->this_sack_highest_newack, 3273 MAX_TSN)) { 3274 tp1->whoTo->this_sack_highest_newack = 3275 tp1->rec.data.TSN_seq; 3276 } 3277 /*- 3278 * CMT DAC algo: also update 3279 * this_sack_lowest_newack 3280 */ 3281 if (*this_sack_lowest_newack == 0) { 3282 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3283 sctp_log_sack(*this_sack_lowest_newack, 3284 last_tsn, 3285 tp1->rec.data.TSN_seq, 3286 0, 3287 0, 3288 SCTP_LOG_TSN_ACKED); 3289 } 3290 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 3291 } 3292 /*- 3293 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3294 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3295 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3296 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3297 * Separate pseudo_cumack trackers for first transmissions and 3298 * retransmissions. 3299 */ 3300 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 3301 if (tp1->rec.data.chunk_was_revoked == 0) { 3302 tp1->whoTo->new_pseudo_cumack = 1; 3303 } 3304 tp1->whoTo->find_pseudo_cumack = 1; 3305 } 3306 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3307 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3308 } 3309 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 3310 if (tp1->rec.data.chunk_was_revoked == 0) { 3311 tp1->whoTo->new_pseudo_cumack = 1; 3312 } 3313 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3314 } 3315 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3316 sctp_log_sack(*biggest_newly_acked_tsn, 3317 last_tsn, 3318 tp1->rec.data.TSN_seq, 3319 frag_strt, 3320 frag_end, 3321 SCTP_LOG_TSN_ACKED); 3322 } 3323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3324 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3325 tp1->whoTo->flight_size, 3326 tp1->book_size, 3327 (uintptr_t) tp1->whoTo, 3328 tp1->rec.data.TSN_seq); 3329 } 3330 sctp_flight_size_decrease(tp1); 3331 sctp_total_flight_decrease(stcb, tp1); 3332 3333 tp1->whoTo->net_ack += tp1->send_size; 3334 if (tp1->snd_count < 2) { 3335 /*- 3336 * True non-retransmited chunk 3337 */ 3338 tp1->whoTo->net_ack2 += tp1->send_size; 3339 3340 /*- 3341 * update RTO too ? 3342 */ 3343 if (tp1->do_rtt) { 3344 tp1->whoTo->RTO = 3345 sctp_calculate_rto(stcb, 3346 &stcb->asoc, 3347 tp1->whoTo, 3348 &tp1->sent_rcv_time, 3349 sctp_align_safe_nocopy); 3350 tp1->do_rtt = 0; 3351 } 3352 } 3353 } 3354 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3355 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3356 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3357 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3358 stcb->asoc.this_sack_highest_gap, 3359 MAX_TSN)) { 3360 stcb->asoc.this_sack_highest_gap = 3361 tp1->rec.data.TSN_seq; 3362 } 3363 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3364 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3365 #ifdef SCTP_AUDITING_ENABLED 3366 sctp_audit_log(0xB2, 3367 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3368 #endif 3369 } 3370 } 3371 /*- 3372 * All chunks NOT UNSENT fall through here and are marked 3373 * (leave PR-SCTP ones that are to skip alone though) 3374 */ 3375 if (tp1->sent != SCTP_FORWARD_TSN_SKIP) 3376 tp1->sent = SCTP_DATAGRAM_MARKED; 3377 3378 if (tp1->rec.data.chunk_was_revoked) { 3379 /* deflate the cwnd */ 3380 tp1->whoTo->cwnd -= tp1->book_size; 3381 tp1->rec.data.chunk_was_revoked = 0; 3382 } 3383 /* NR Sack code here */ 3384 if (nr_sacking) { 3385 if (tp1->sent != SCTP_FORWARD_TSN_SKIP) 3386 tp1->sent = SCTP_DATAGRAM_NR_MARKED; 3387 /* 3388 * TAILQ_REMOVE(&asoc->sent_q 3389 * ueue, tp1, sctp_next); 3390 */ 3391 if (tp1->data) { 3392 /* 3393 * sa_ignore 3394 * NO_NULL_CHK 3395 */ 3396 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3397 sctp_m_freem(tp1->data); 3398 } 3399 tp1->data = NULL; 3400 /* asoc->sent_queue_cnt--; */ 3401 /* 3402 * sctp_free_a_chunk(stcb, 3403 * tp1); 3404 */ 3405 wake_him++; 3406 } 3407 } 3408 break; 3409 } /* if (tp1->TSN_seq == theTSN) */ 3410 if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN, 3411 MAX_TSN)) 3412 break; 3413 3414 tp1 = TAILQ_NEXT(tp1, sctp_next); 3415 } /* end while (tp1) */ 3416 /* In case the fragments were not in order we must reset */ 3417 if (tp1 == NULL) { 3418 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3419 } 3420 } /* end for (j = fragStart */ 3421 *p_tp1 = tp1; 3422 return (wake_him); /* Return value only used for nr-sack */ 3423 } 3424 3425 3426 static int 3427 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3428 uint32_t last_tsn, uint32_t * biggest_tsn_acked, 3429 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 3430 int num_seg, int num_nr_seg, int *ecn_seg_sums) 3431 { 3432 struct sctp_gap_ack_block *frag, block; 3433 struct sctp_tmit_chunk *tp1; 3434 int i; 3435 int num_frs = 0; 3436 int chunk_freed; 3437 int non_revocable; 3438 uint16_t frag_strt, frag_end; 3439 uint32_t last_frag_high; 3440 3441 tp1 = NULL; 3442 last_frag_high = 0; 3443 chunk_freed = 0; 3444 3445 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3446 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3447 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 3448 *offset += sizeof(block); 3449 if (frag == NULL) { 3450 return (chunk_freed); 3451 } 3452 frag_strt = ntohs(frag->start); 3453 frag_end = ntohs(frag->end); 3454 /* some sanity checks on the fragment offsets */ 3455 if (frag_strt > frag_end) { 3456 /* this one is malformed, skip */ 3457 continue; 3458 } 3459 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 3460 MAX_TSN)) 3461 *biggest_tsn_acked = frag_end + last_tsn; 3462 3463 /* mark acked dgs and find out the highestTSN being acked */ 3464 if (tp1 == NULL) { 3465 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3466 /* save the locations of the last frags */ 3467 last_frag_high = frag_end + last_tsn; 3468 } else { 3469 /* 3470 * now lets see if we need to reset the queue due to 3471 * a out-of-order SACK fragment 3472 */ 3473 if (compare_with_wrap(frag_strt + last_tsn, 3474 last_frag_high, MAX_TSN)) { 3475 /* 3476 * if the new frag starts after the last TSN 3477 * frag covered, we are ok and this one is 3478 * beyond the last one 3479 */ 3480 ; 3481 } else { 3482 /* 3483 * ok, they have reset us, so we need to 3484 * reset the queue this will cause extra 3485 * hunting but hey, they chose the 3486 * performance hit when they failed to order 3487 * their gaps 3488 */ 3489 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3490 } 3491 last_frag_high = frag_end + last_tsn; 3492 } 3493 if (i < num_seg) { 3494 non_revocable = 0; 3495 } else { 3496 non_revocable = 1; 3497 } 3498 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3499 non_revocable, &num_frs, biggest_newly_acked_tsn, 3500 this_sack_lowest_newack, ecn_seg_sums)) { 3501 chunk_freed = 1; 3502 } 3503 } 3504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3505 if (num_frs) 3506 sctp_log_fr(*biggest_tsn_acked, 3507 *biggest_newly_acked_tsn, 3508 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3509 } 3510 return (chunk_freed); 3511 } 3512 3513 static void 3514 sctp_check_for_revoked(struct sctp_tcb *stcb, 3515 struct sctp_association *asoc, uint32_t cumack, 3516 uint32_t biggest_tsn_acked) 3517 { 3518 struct sctp_tmit_chunk *tp1; 3519 int tot_revoked = 0; 3520 3521 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3522 while (tp1) { 3523 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3524 MAX_TSN)) { 3525 /* 3526 * ok this guy is either ACK or MARKED. If it is 3527 * ACKED it has been previously acked but not this 3528 * time i.e. revoked. If it is MARKED it was ACK'ed 3529 * again. 3530 */ 3531 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3532 MAX_TSN)) 3533 break; 3534 3535 3536 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3537 /* it has been revoked */ 3538 tp1->sent = SCTP_DATAGRAM_SENT; 3539 tp1->rec.data.chunk_was_revoked = 1; 3540 /* 3541 * We must add this stuff back in to assure 3542 * timers and such get started. 3543 */ 3544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3545 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3546 tp1->whoTo->flight_size, 3547 tp1->book_size, 3548 (uintptr_t) tp1->whoTo, 3549 tp1->rec.data.TSN_seq); 3550 } 3551 sctp_flight_size_increase(tp1); 3552 sctp_total_flight_increase(stcb, tp1); 3553 /* 3554 * We inflate the cwnd to compensate for our 3555 * artificial inflation of the flight_size. 3556 */ 3557 tp1->whoTo->cwnd += tp1->book_size; 3558 tot_revoked++; 3559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3560 sctp_log_sack(asoc->last_acked_seq, 3561 cumack, 3562 tp1->rec.data.TSN_seq, 3563 0, 3564 0, 3565 SCTP_LOG_TSN_REVOKED); 3566 } 3567 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3568 /* it has been re-acked in this SACK */ 3569 tp1->sent = SCTP_DATAGRAM_ACKED; 3570 } 3571 } 3572 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3573 break; 3574 tp1 = TAILQ_NEXT(tp1, sctp_next); 3575 } 3576 if (tot_revoked > 0) { 3577 /* 3578 * Setup the ecn nonce re-sync point. We do this since once 3579 * data is revoked we begin to retransmit things, which do 3580 * NOT have the ECN bits set. This means we are now out of 3581 * sync and must wait until we get back in sync with the 3582 * peer to check ECN bits. 3583 */ 3584 tp1 = TAILQ_FIRST(&asoc->send_queue); 3585 if (tp1 == NULL) { 3586 asoc->nonce_resync_tsn = asoc->sending_seq; 3587 } else { 3588 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3589 } 3590 asoc->nonce_wait_for_ecne = 0; 3591 asoc->nonce_sum_check = 0; 3592 } 3593 } 3594 3595 3596 static void 3597 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3598 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3599 { 3600 struct sctp_tmit_chunk *tp1; 3601 int strike_flag = 0; 3602 struct timeval now; 3603 int tot_retrans = 0; 3604 uint32_t sending_seq; 3605 struct sctp_nets *net; 3606 int num_dests_sacked = 0; 3607 3608 /* 3609 * select the sending_seq, this is either the next thing ready to be 3610 * sent but not transmitted, OR, the next seq we assign. 3611 */ 3612 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3613 if (tp1 == NULL) { 3614 sending_seq = asoc->sending_seq; 3615 } else { 3616 sending_seq = tp1->rec.data.TSN_seq; 3617 } 3618 3619 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3620 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3621 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3622 if (net->saw_newack) 3623 num_dests_sacked++; 3624 } 3625 } 3626 if (stcb->asoc.peer_supports_prsctp) { 3627 (void)SCTP_GETTIME_TIMEVAL(&now); 3628 } 3629 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3630 while (tp1) { 3631 strike_flag = 0; 3632 if (tp1->no_fr_allowed) { 3633 /* this one had a timeout or something */ 3634 tp1 = TAILQ_NEXT(tp1, sctp_next); 3635 continue; 3636 } 3637 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3638 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3639 sctp_log_fr(biggest_tsn_newly_acked, 3640 tp1->rec.data.TSN_seq, 3641 tp1->sent, 3642 SCTP_FR_LOG_CHECK_STRIKE); 3643 } 3644 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3645 MAX_TSN) || 3646 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3647 /* done */ 3648 break; 3649 } 3650 if (stcb->asoc.peer_supports_prsctp) { 3651 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3652 /* Is it expired? */ 3653 if ( 3654 /* 3655 * TODO sctp_constants.h needs alternative 3656 * time macros when _KERNEL is undefined. 3657 */ 3658 (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) 3659 ) { 3660 /* Yes so drop it */ 3661 if (tp1->data != NULL) { 3662 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3663 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3664 SCTP_SO_NOT_LOCKED); 3665 } 3666 tp1 = TAILQ_NEXT(tp1, sctp_next); 3667 continue; 3668 } 3669 } 3670 } 3671 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3672 asoc->this_sack_highest_gap, MAX_TSN)) { 3673 /* we are beyond the tsn in the sack */ 3674 break; 3675 } 3676 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3677 /* either a RESEND, ACKED, or MARKED */ 3678 /* skip */ 3679 tp1 = TAILQ_NEXT(tp1, sctp_next); 3680 continue; 3681 } 3682 /* 3683 * CMT : SFR algo (covers part of DAC and HTNA as well) 3684 */ 3685 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3686 /* 3687 * No new acks were receieved for data sent to this 3688 * dest. Therefore, according to the SFR algo for 3689 * CMT, no data sent to this dest can be marked for 3690 * FR using this SACK. 3691 */ 3692 tp1 = TAILQ_NEXT(tp1, sctp_next); 3693 continue; 3694 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq, 3695 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3696 /* 3697 * CMT: New acks were receieved for data sent to 3698 * this dest. But no new acks were seen for data 3699 * sent after tp1. Therefore, according to the SFR 3700 * algo for CMT, tp1 cannot be marked for FR using 3701 * this SACK. This step covers part of the DAC algo 3702 * and the HTNA algo as well. 3703 */ 3704 tp1 = TAILQ_NEXT(tp1, sctp_next); 3705 continue; 3706 } 3707 /* 3708 * Here we check to see if we were have already done a FR 3709 * and if so we see if the biggest TSN we saw in the sack is 3710 * smaller than the recovery point. If so we don't strike 3711 * the tsn... otherwise we CAN strike the TSN. 3712 */ 3713 /* 3714 * @@@ JRI: Check for CMT if (accum_moved && 3715 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3716 * 0)) { 3717 */ 3718 if (accum_moved && asoc->fast_retran_loss_recovery) { 3719 /* 3720 * Strike the TSN if in fast-recovery and cum-ack 3721 * moved. 3722 */ 3723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3724 sctp_log_fr(biggest_tsn_newly_acked, 3725 tp1->rec.data.TSN_seq, 3726 tp1->sent, 3727 SCTP_FR_LOG_STRIKE_CHUNK); 3728 } 3729 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3730 tp1->sent++; 3731 } 3732 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3733 /* 3734 * CMT DAC algorithm: If SACK flag is set to 3735 * 0, then lowest_newack test will not pass 3736 * because it would have been set to the 3737 * cumack earlier. If not already to be 3738 * rtx'd, If not a mixed sack and if tp1 is 3739 * not between two sacked TSNs, then mark by 3740 * one more. NOTE that we are marking by one 3741 * additional time since the SACK DAC flag 3742 * indicates that two packets have been 3743 * received after this missing TSN. 3744 */ 3745 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3746 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3747 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3748 sctp_log_fr(16 + num_dests_sacked, 3749 tp1->rec.data.TSN_seq, 3750 tp1->sent, 3751 SCTP_FR_LOG_STRIKE_CHUNK); 3752 } 3753 tp1->sent++; 3754 } 3755 } 3756 } else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) { 3757 /* 3758 * For those that have done a FR we must take 3759 * special consideration if we strike. I.e the 3760 * biggest_newly_acked must be higher than the 3761 * sending_seq at the time we did the FR. 3762 */ 3763 if ( 3764 #ifdef SCTP_FR_TO_ALTERNATE 3765 /* 3766 * If FR's go to new networks, then we must only do 3767 * this for singly homed asoc's. However if the FR's 3768 * go to the same network (Armando's work) then its 3769 * ok to FR multiple times. 3770 */ 3771 (asoc->numnets < 2) 3772 #else 3773 (1) 3774 #endif 3775 ) { 3776 3777 if ((compare_with_wrap(biggest_tsn_newly_acked, 3778 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3779 (biggest_tsn_newly_acked == 3780 tp1->rec.data.fast_retran_tsn)) { 3781 /* 3782 * Strike the TSN, since this ack is 3783 * beyond where things were when we 3784 * did a FR. 3785 */ 3786 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3787 sctp_log_fr(biggest_tsn_newly_acked, 3788 tp1->rec.data.TSN_seq, 3789 tp1->sent, 3790 SCTP_FR_LOG_STRIKE_CHUNK); 3791 } 3792 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3793 tp1->sent++; 3794 } 3795 strike_flag = 1; 3796 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3797 /* 3798 * CMT DAC algorithm: If 3799 * SACK flag is set to 0, 3800 * then lowest_newack test 3801 * will not pass because it 3802 * would have been set to 3803 * the cumack earlier. If 3804 * not already to be rtx'd, 3805 * If not a mixed sack and 3806 * if tp1 is not between two 3807 * sacked TSNs, then mark by 3808 * one more. NOTE that we 3809 * are marking by one 3810 * additional time since the 3811 * SACK DAC flag indicates 3812 * that two packets have 3813 * been received after this 3814 * missing TSN. 3815 */ 3816 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3817 (num_dests_sacked == 1) && 3818 compare_with_wrap(this_sack_lowest_newack, 3819 tp1->rec.data.TSN_seq, MAX_TSN)) { 3820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3821 sctp_log_fr(32 + num_dests_sacked, 3822 tp1->rec.data.TSN_seq, 3823 tp1->sent, 3824 SCTP_FR_LOG_STRIKE_CHUNK); 3825 } 3826 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3827 tp1->sent++; 3828 } 3829 } 3830 } 3831 } 3832 } 3833 /* 3834 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3835 * algo covers HTNA. 3836 */ 3837 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3838 biggest_tsn_newly_acked, MAX_TSN)) { 3839 /* 3840 * We don't strike these: This is the HTNA 3841 * algorithm i.e. we don't strike If our TSN is 3842 * larger than the Highest TSN Newly Acked. 3843 */ 3844 ; 3845 } else { 3846 /* Strike the TSN */ 3847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3848 sctp_log_fr(biggest_tsn_newly_acked, 3849 tp1->rec.data.TSN_seq, 3850 tp1->sent, 3851 SCTP_FR_LOG_STRIKE_CHUNK); 3852 } 3853 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3854 tp1->sent++; 3855 } 3856 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3857 /* 3858 * CMT DAC algorithm: If SACK flag is set to 3859 * 0, then lowest_newack test will not pass 3860 * because it would have been set to the 3861 * cumack earlier. If not already to be 3862 * rtx'd, If not a mixed sack and if tp1 is 3863 * not between two sacked TSNs, then mark by 3864 * one more. NOTE that we are marking by one 3865 * additional time since the SACK DAC flag 3866 * indicates that two packets have been 3867 * received after this missing TSN. 3868 */ 3869 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3870 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3871 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3872 sctp_log_fr(48 + num_dests_sacked, 3873 tp1->rec.data.TSN_seq, 3874 tp1->sent, 3875 SCTP_FR_LOG_STRIKE_CHUNK); 3876 } 3877 tp1->sent++; 3878 } 3879 } 3880 } 3881 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3882 struct sctp_nets *alt; 3883 3884 /* fix counts and things */ 3885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3886 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3887 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3888 tp1->book_size, 3889 (uintptr_t) tp1->whoTo, 3890 tp1->rec.data.TSN_seq); 3891 } 3892 if (tp1->whoTo) { 3893 tp1->whoTo->net_ack++; 3894 sctp_flight_size_decrease(tp1); 3895 } 3896 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3897 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3898 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3899 } 3900 /* add back to the rwnd */ 3901 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3902 3903 /* remove from the total flight */ 3904 sctp_total_flight_decrease(stcb, tp1); 3905 3906 if ((stcb->asoc.peer_supports_prsctp) && 3907 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3908 /* 3909 * Has it been retransmitted tv_sec times? - 3910 * we store the retran count there. 3911 */ 3912 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3913 /* Yes, so drop it */ 3914 if (tp1->data != NULL) { 3915 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3916 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3917 SCTP_SO_NOT_LOCKED); 3918 } 3919 /* Make sure to flag we had a FR */ 3920 tp1->whoTo->net_ack++; 3921 tp1 = TAILQ_NEXT(tp1, sctp_next); 3922 continue; 3923 } 3924 } 3925 /* printf("OK, we are now ready to FR this guy\n"); */ 3926 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3927 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3928 0, SCTP_FR_MARKED); 3929 } 3930 if (strike_flag) { 3931 /* This is a subsequent FR */ 3932 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3933 } 3934 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3935 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 3936 /* 3937 * CMT: Using RTX_SSTHRESH policy for CMT. 3938 * If CMT is being used, then pick dest with 3939 * largest ssthresh for any retransmission. 3940 */ 3941 tp1->no_fr_allowed = 1; 3942 alt = tp1->whoTo; 3943 /* sa_ignore NO_NULL_CHK */ 3944 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 3945 /* 3946 * JRS 5/18/07 - If CMT PF is on, 3947 * use the PF version of 3948 * find_alt_net() 3949 */ 3950 alt = sctp_find_alternate_net(stcb, alt, 2); 3951 } else { 3952 /* 3953 * JRS 5/18/07 - If only CMT is on, 3954 * use the CMT version of 3955 * find_alt_net() 3956 */ 3957 /* sa_ignore NO_NULL_CHK */ 3958 alt = sctp_find_alternate_net(stcb, alt, 1); 3959 } 3960 if (alt == NULL) { 3961 alt = tp1->whoTo; 3962 } 3963 /* 3964 * CUCv2: If a different dest is picked for 3965 * the retransmission, then new 3966 * (rtx-)pseudo_cumack needs to be tracked 3967 * for orig dest. Let CUCv2 track new (rtx-) 3968 * pseudo-cumack always. 3969 */ 3970 if (tp1->whoTo) { 3971 tp1->whoTo->find_pseudo_cumack = 1; 3972 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3973 } 3974 } else {/* CMT is OFF */ 3975 3976 #ifdef SCTP_FR_TO_ALTERNATE 3977 /* Can we find an alternate? */ 3978 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3979 #else 3980 /* 3981 * default behavior is to NOT retransmit 3982 * FR's to an alternate. Armando Caro's 3983 * paper details why. 3984 */ 3985 alt = tp1->whoTo; 3986 #endif 3987 } 3988 3989 tp1->rec.data.doing_fast_retransmit = 1; 3990 tot_retrans++; 3991 /* mark the sending seq for possible subsequent FR's */ 3992 /* 3993 * printf("Marking TSN for FR new value %x\n", 3994 * (uint32_t)tpi->rec.data.TSN_seq); 3995 */ 3996 if (TAILQ_EMPTY(&asoc->send_queue)) { 3997 /* 3998 * If the queue of send is empty then its 3999 * the next sequence number that will be 4000 * assigned so we subtract one from this to 4001 * get the one we last sent. 4002 */ 4003 tp1->rec.data.fast_retran_tsn = sending_seq; 4004 } else { 4005 /* 4006 * If there are chunks on the send queue 4007 * (unsent data that has made it from the 4008 * stream queues but not out the door, we 4009 * take the first one (which will have the 4010 * lowest TSN) and subtract one to get the 4011 * one we last sent. 4012 */ 4013 struct sctp_tmit_chunk *ttt; 4014 4015 ttt = TAILQ_FIRST(&asoc->send_queue); 4016 tp1->rec.data.fast_retran_tsn = 4017 ttt->rec.data.TSN_seq; 4018 } 4019 4020 if (tp1->do_rtt) { 4021 /* 4022 * this guy had a RTO calculation pending on 4023 * it, cancel it 4024 */ 4025 tp1->do_rtt = 0; 4026 } 4027 if (alt != tp1->whoTo) { 4028 /* yes, there is an alternate. */ 4029 sctp_free_remote_addr(tp1->whoTo); 4030 /* sa_ignore FREED_MEMORY */ 4031 tp1->whoTo = alt; 4032 atomic_add_int(&alt->ref_count, 1); 4033 } 4034 } 4035 tp1 = TAILQ_NEXT(tp1, sctp_next); 4036 } /* while (tp1) */ 4037 4038 if (tot_retrans > 0) { 4039 /* 4040 * Setup the ecn nonce re-sync point. We do this since once 4041 * we go to FR something we introduce a Karn's rule scenario 4042 * and won't know the totals for the ECN bits. 4043 */ 4044 asoc->nonce_resync_tsn = sending_seq; 4045 asoc->nonce_wait_for_ecne = 0; 4046 asoc->nonce_sum_check = 0; 4047 } 4048 } 4049 4050 struct sctp_tmit_chunk * 4051 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 4052 struct sctp_association *asoc) 4053 { 4054 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 4055 struct timeval now; 4056 int now_filled = 0; 4057 4058 if (asoc->peer_supports_prsctp == 0) { 4059 return (NULL); 4060 } 4061 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4062 while (tp1) { 4063 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 4064 tp1->sent != SCTP_DATAGRAM_RESEND) { 4065 /* no chance to advance, out of here */ 4066 break; 4067 } 4068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 4069 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 4070 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 4071 asoc->advanced_peer_ack_point, 4072 tp1->rec.data.TSN_seq, 0, 0); 4073 } 4074 } 4075 if (!PR_SCTP_ENABLED(tp1->flags)) { 4076 /* 4077 * We can't fwd-tsn past any that are reliable aka 4078 * retransmitted until the asoc fails. 4079 */ 4080 break; 4081 } 4082 if (!now_filled) { 4083 (void)SCTP_GETTIME_TIMEVAL(&now); 4084 now_filled = 1; 4085 } 4086 tp2 = TAILQ_NEXT(tp1, sctp_next); 4087 /* 4088 * now we got a chunk which is marked for another 4089 * retransmission to a PR-stream but has run out its chances 4090 * already maybe OR has been marked to skip now. Can we skip 4091 * it if its a resend? 4092 */ 4093 if (tp1->sent == SCTP_DATAGRAM_RESEND && 4094 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 4095 /* 4096 * Now is this one marked for resend and its time is 4097 * now up? 4098 */ 4099 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 4100 /* Yes so drop it */ 4101 if (tp1->data) { 4102 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 4103 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 4104 SCTP_SO_NOT_LOCKED); 4105 } 4106 } else { 4107 /* 4108 * No, we are done when hit one for resend 4109 * whos time as not expired. 4110 */ 4111 break; 4112 } 4113 } 4114 /* 4115 * Ok now if this chunk is marked to drop it we can clean up 4116 * the chunk, advance our peer ack point and we can check 4117 * the next chunk. 4118 */ 4119 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 4120 /* advance PeerAckPoint goes forward */ 4121 if (compare_with_wrap(tp1->rec.data.TSN_seq, 4122 asoc->advanced_peer_ack_point, 4123 MAX_TSN)) { 4124 4125 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 4126 a_adv = tp1; 4127 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { 4128 /* No update but we do save the chk */ 4129 a_adv = tp1; 4130 } 4131 } else { 4132 /* 4133 * If it is still in RESEND we can advance no 4134 * further 4135 */ 4136 break; 4137 } 4138 /* 4139 * If we hit here we just dumped tp1, move to next tsn on 4140 * sent queue. 4141 */ 4142 tp1 = tp2; 4143 } 4144 return (a_adv); 4145 } 4146 4147 static int 4148 sctp_fs_audit(struct sctp_association *asoc) 4149 { 4150 struct sctp_tmit_chunk *chk; 4151 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 4152 int entry_flight, entry_cnt, ret; 4153 4154 entry_flight = asoc->total_flight; 4155 entry_cnt = asoc->total_flight_count; 4156 ret = 0; 4157 4158 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 4159 return (0); 4160 4161 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 4162 if (chk->sent < SCTP_DATAGRAM_RESEND) { 4163 printf("Chk TSN:%u size:%d inflight cnt:%d\n", 4164 chk->rec.data.TSN_seq, 4165 chk->send_size, 4166 chk->snd_count 4167 ); 4168 inflight++; 4169 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 4170 resend++; 4171 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 4172 inbetween++; 4173 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 4174 above++; 4175 } else { 4176 acked++; 4177 } 4178 } 4179 4180 if ((inflight > 0) || (inbetween > 0)) { 4181 #ifdef INVARIANTS 4182 panic("Flight size-express incorrect? \n"); 4183 #else 4184 printf("asoc->total_flight:%d cnt:%d\n", 4185 entry_flight, entry_cnt); 4186 4187 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n", 4188 inflight, inbetween, resend, above, acked); 4189 ret = 1; 4190 #endif 4191 } 4192 return (ret); 4193 } 4194 4195 4196 static void 4197 sctp_window_probe_recovery(struct sctp_tcb *stcb, 4198 struct sctp_association *asoc, 4199 struct sctp_nets *net, 4200 struct sctp_tmit_chunk *tp1) 4201 { 4202 tp1->window_probe = 0; 4203 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 4204 /* TSN's skipped we do NOT move back. */ 4205 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 4206 tp1->whoTo->flight_size, 4207 tp1->book_size, 4208 (uintptr_t) tp1->whoTo, 4209 tp1->rec.data.TSN_seq); 4210 return; 4211 } 4212 /* First setup this by shrinking flight */ 4213 sctp_flight_size_decrease(tp1); 4214 sctp_total_flight_decrease(stcb, tp1); 4215 /* Now mark for resend */ 4216 tp1->sent = SCTP_DATAGRAM_RESEND; 4217 asoc->sent_queue_retran_cnt++; 4218 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4219 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 4220 tp1->whoTo->flight_size, 4221 tp1->book_size, 4222 (uintptr_t) tp1->whoTo, 4223 tp1->rec.data.TSN_seq); 4224 } 4225 } 4226 4227 void 4228 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 4229 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 4230 { 4231 struct sctp_nets *net; 4232 struct sctp_association *asoc; 4233 struct sctp_tmit_chunk *tp1, *tp2; 4234 uint32_t old_rwnd; 4235 int win_probe_recovery = 0; 4236 int win_probe_recovered = 0; 4237 int j, done_once = 0; 4238 4239 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4240 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 4241 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4242 } 4243 SCTP_TCB_LOCK_ASSERT(stcb); 4244 #ifdef SCTP_ASOCLOG_OF_TSNS 4245 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 4246 stcb->asoc.cumack_log_at++; 4247 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4248 stcb->asoc.cumack_log_at = 0; 4249 } 4250 #endif 4251 asoc = &stcb->asoc; 4252 old_rwnd = asoc->peers_rwnd; 4253 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) { 4254 /* old ack */ 4255 return; 4256 } else if (asoc->last_acked_seq == cumack) { 4257 /* Window update sack */ 4258 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4259 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4260 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4261 /* SWS sender side engages */ 4262 asoc->peers_rwnd = 0; 4263 } 4264 if (asoc->peers_rwnd > old_rwnd) { 4265 goto again; 4266 } 4267 return; 4268 } 4269 /* First setup for CC stuff */ 4270 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4271 net->prev_cwnd = net->cwnd; 4272 net->net_ack = 0; 4273 net->net_ack2 = 0; 4274 4275 /* 4276 * CMT: Reset CUC and Fast recovery algo variables before 4277 * SACK processing 4278 */ 4279 net->new_pseudo_cumack = 0; 4280 net->will_exit_fast_recovery = 0; 4281 } 4282 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4283 uint32_t send_s; 4284 4285 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4286 tp1 = TAILQ_LAST(&asoc->sent_queue, 4287 sctpchunk_listhead); 4288 send_s = tp1->rec.data.TSN_seq + 1; 4289 } else { 4290 send_s = asoc->sending_seq; 4291 } 4292 if ((cumack == send_s) || 4293 compare_with_wrap(cumack, send_s, MAX_TSN)) { 4294 #ifndef INVARIANTS 4295 struct mbuf *oper; 4296 4297 #endif 4298 #ifdef INVARIANTS 4299 panic("Impossible sack 1"); 4300 #else 4301 *abort_now = 1; 4302 /* XXX */ 4303 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4304 0, M_DONTWAIT, 1, MT_DATA); 4305 if (oper) { 4306 struct sctp_paramhdr *ph; 4307 uint32_t *ippp; 4308 4309 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4310 sizeof(uint32_t); 4311 ph = mtod(oper, struct sctp_paramhdr *); 4312 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4313 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4314 ippp = (uint32_t *) (ph + 1); 4315 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4316 } 4317 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4318 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 4319 return; 4320 #endif 4321 } 4322 } 4323 asoc->this_sack_highest_gap = cumack; 4324 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4325 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4326 stcb->asoc.overall_error_count, 4327 0, 4328 SCTP_FROM_SCTP_INDATA, 4329 __LINE__); 4330 } 4331 stcb->asoc.overall_error_count = 0; 4332 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) { 4333 /* process the new consecutive TSN first */ 4334 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4335 while (tp1) { 4336 tp2 = TAILQ_NEXT(tp1, sctp_next); 4337 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 4338 MAX_TSN) || 4339 cumack == tp1->rec.data.TSN_seq) { 4340 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4341 printf("Warning, an unsent is now acked?\n"); 4342 } 4343 /* 4344 * ECN Nonce: Add the nonce to the sender's 4345 * nonce sum 4346 */ 4347 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4348 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4349 /* 4350 * If it is less than ACKED, it is 4351 * now no-longer in flight. Higher 4352 * values may occur during marking 4353 */ 4354 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4355 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4356 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4357 tp1->whoTo->flight_size, 4358 tp1->book_size, 4359 (uintptr_t) tp1->whoTo, 4360 tp1->rec.data.TSN_seq); 4361 } 4362 sctp_flight_size_decrease(tp1); 4363 /* sa_ignore NO_NULL_CHK */ 4364 sctp_total_flight_decrease(stcb, tp1); 4365 } 4366 tp1->whoTo->net_ack += tp1->send_size; 4367 if (tp1->snd_count < 2) { 4368 /* 4369 * True non-retransmited 4370 * chunk 4371 */ 4372 tp1->whoTo->net_ack2 += 4373 tp1->send_size; 4374 4375 /* update RTO too? */ 4376 if (tp1->do_rtt) { 4377 tp1->whoTo->RTO = 4378 /* 4379 * sa_ignore 4380 * NO_NULL_CHK 4381 */ 4382 sctp_calculate_rto(stcb, 4383 asoc, tp1->whoTo, 4384 &tp1->sent_rcv_time, 4385 sctp_align_safe_nocopy); 4386 tp1->do_rtt = 0; 4387 } 4388 } 4389 /* 4390 * CMT: CUCv2 algorithm. From the 4391 * cumack'd TSNs, for each TSN being 4392 * acked for the first time, set the 4393 * following variables for the 4394 * corresp destination. 4395 * new_pseudo_cumack will trigger a 4396 * cwnd update. 4397 * find_(rtx_)pseudo_cumack will 4398 * trigger search for the next 4399 * expected (rtx-)pseudo-cumack. 4400 */ 4401 tp1->whoTo->new_pseudo_cumack = 1; 4402 tp1->whoTo->find_pseudo_cumack = 1; 4403 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4404 4405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4406 /* sa_ignore NO_NULL_CHK */ 4407 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4408 } 4409 } 4410 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4411 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4412 } 4413 if (tp1->rec.data.chunk_was_revoked) { 4414 /* deflate the cwnd */ 4415 tp1->whoTo->cwnd -= tp1->book_size; 4416 tp1->rec.data.chunk_was_revoked = 0; 4417 } 4418 tp1->sent = SCTP_DATAGRAM_ACKED; 4419 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4420 if (tp1->data) { 4421 /* sa_ignore NO_NULL_CHK */ 4422 sctp_free_bufspace(stcb, asoc, tp1, 1); 4423 sctp_m_freem(tp1->data); 4424 } 4425 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4426 sctp_log_sack(asoc->last_acked_seq, 4427 cumack, 4428 tp1->rec.data.TSN_seq, 4429 0, 4430 0, 4431 SCTP_LOG_FREE_SENT); 4432 } 4433 tp1->data = NULL; 4434 asoc->sent_queue_cnt--; 4435 sctp_free_a_chunk(stcb, tp1); 4436 tp1 = tp2; 4437 } else { 4438 break; 4439 } 4440 } 4441 4442 } 4443 /* sa_ignore NO_NULL_CHK */ 4444 if (stcb->sctp_socket) { 4445 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4446 struct socket *so; 4447 4448 #endif 4449 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4450 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4451 /* sa_ignore NO_NULL_CHK */ 4452 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 4453 } 4454 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4455 so = SCTP_INP_SO(stcb->sctp_ep); 4456 atomic_add_int(&stcb->asoc.refcnt, 1); 4457 SCTP_TCB_UNLOCK(stcb); 4458 SCTP_SOCKET_LOCK(so, 1); 4459 SCTP_TCB_LOCK(stcb); 4460 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4461 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4462 /* assoc was freed while we were unlocked */ 4463 SCTP_SOCKET_UNLOCK(so, 1); 4464 return; 4465 } 4466 #endif 4467 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4468 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4469 SCTP_SOCKET_UNLOCK(so, 1); 4470 #endif 4471 } else { 4472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4473 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 4474 } 4475 } 4476 4477 /* JRS - Use the congestion control given in the CC module */ 4478 if (asoc->last_acked_seq != cumack) 4479 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4480 4481 asoc->last_acked_seq = cumack; 4482 4483 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4484 /* nothing left in-flight */ 4485 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4486 net->flight_size = 0; 4487 net->partial_bytes_acked = 0; 4488 } 4489 asoc->total_flight = 0; 4490 asoc->total_flight_count = 0; 4491 } 4492 /* ECN Nonce updates */ 4493 if (asoc->ecn_nonce_allowed) { 4494 if (asoc->nonce_sum_check) { 4495 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4496 if (asoc->nonce_wait_for_ecne == 0) { 4497 struct sctp_tmit_chunk *lchk; 4498 4499 lchk = TAILQ_FIRST(&asoc->send_queue); 4500 asoc->nonce_wait_for_ecne = 1; 4501 if (lchk) { 4502 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4503 } else { 4504 asoc->nonce_wait_tsn = asoc->sending_seq; 4505 } 4506 } else { 4507 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4508 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4509 /* 4510 * Misbehaving peer. We need 4511 * to react to this guy 4512 */ 4513 asoc->ecn_allowed = 0; 4514 asoc->ecn_nonce_allowed = 0; 4515 } 4516 } 4517 } 4518 } else { 4519 /* See if Resynchronization Possible */ 4520 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4521 asoc->nonce_sum_check = 1; 4522 /* 4523 * Now we must calculate what the base is. 4524 * We do this based on two things, we know 4525 * the total's for all the segments 4526 * gap-acked in the SACK (none). We also 4527 * know the SACK's nonce sum, its in 4528 * nonce_sum_flag. So we can build a truth 4529 * table to back-calculate the new value of 4530 * asoc->nonce_sum_expect_base: 4531 * 4532 * SACK-flag-Value Seg-Sums Base 0 0 0 4533 * 1 0 1 0 1 1 1 4534 * 1 0 4535 */ 4536 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4537 } 4538 } 4539 } 4540 /* RWND update */ 4541 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4542 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4543 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4544 /* SWS sender side engages */ 4545 asoc->peers_rwnd = 0; 4546 } 4547 if (asoc->peers_rwnd > old_rwnd) { 4548 win_probe_recovery = 1; 4549 } 4550 /* Now assure a timer where data is queued at */ 4551 again: 4552 j = 0; 4553 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4554 int to_ticks; 4555 4556 if (win_probe_recovery && (net->window_probe)) { 4557 win_probe_recovered = 1; 4558 /* 4559 * Find first chunk that was used with window probe 4560 * and clear the sent 4561 */ 4562 /* sa_ignore FREED_MEMORY */ 4563 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4564 if (tp1->window_probe) { 4565 /* move back to data send queue */ 4566 sctp_window_probe_recovery(stcb, asoc, net, tp1); 4567 break; 4568 } 4569 } 4570 } 4571 if (net->RTO == 0) { 4572 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4573 } else { 4574 to_ticks = MSEC_TO_TICKS(net->RTO); 4575 } 4576 if (net->flight_size) { 4577 j++; 4578 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4579 sctp_timeout_handler, &net->rxt_timer); 4580 if (net->window_probe) { 4581 net->window_probe = 0; 4582 } 4583 } else { 4584 if (net->window_probe) { 4585 /* 4586 * In window probes we must assure a timer 4587 * is still running there 4588 */ 4589 net->window_probe = 0; 4590 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4591 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4592 sctp_timeout_handler, &net->rxt_timer); 4593 } 4594 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4595 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4596 stcb, net, 4597 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4598 } 4599 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 4600 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4601 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4602 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4603 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4604 } 4605 } 4606 } 4607 } 4608 if ((j == 0) && 4609 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4610 (asoc->sent_queue_retran_cnt == 0) && 4611 (win_probe_recovered == 0) && 4612 (done_once == 0)) { 4613 /* 4614 * huh, this should not happen unless all packets are 4615 * PR-SCTP and marked to skip of course. 4616 */ 4617 if (sctp_fs_audit(asoc)) { 4618 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4619 net->flight_size = 0; 4620 } 4621 asoc->total_flight = 0; 4622 asoc->total_flight_count = 0; 4623 asoc->sent_queue_retran_cnt = 0; 4624 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4625 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4626 sctp_flight_size_increase(tp1); 4627 sctp_total_flight_increase(stcb, tp1); 4628 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4629 asoc->sent_queue_retran_cnt++; 4630 } 4631 } 4632 } 4633 done_once = 1; 4634 goto again; 4635 } 4636 /**********************************/ 4637 /* Now what about shutdown issues */ 4638 /**********************************/ 4639 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4640 /* nothing left on sendqueue.. consider done */ 4641 /* clean up */ 4642 if ((asoc->stream_queue_cnt == 1) && 4643 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4644 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4645 (asoc->locked_on_sending) 4646 ) { 4647 struct sctp_stream_queue_pending *sp; 4648 4649 /* 4650 * I may be in a state where we got all across.. but 4651 * cannot write more due to a shutdown... we abort 4652 * since the user did not indicate EOR in this case. 4653 * The sp will be cleaned during free of the asoc. 4654 */ 4655 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4656 sctp_streamhead); 4657 if ((sp) && (sp->length == 0)) { 4658 /* Let cleanup code purge it */ 4659 if (sp->msg_is_complete) { 4660 asoc->stream_queue_cnt--; 4661 } else { 4662 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4663 asoc->locked_on_sending = NULL; 4664 asoc->stream_queue_cnt--; 4665 } 4666 } 4667 } 4668 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4669 (asoc->stream_queue_cnt == 0)) { 4670 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4671 /* Need to abort here */ 4672 struct mbuf *oper; 4673 4674 abort_out_now: 4675 *abort_now = 1; 4676 /* XXX */ 4677 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4678 0, M_DONTWAIT, 1, MT_DATA); 4679 if (oper) { 4680 struct sctp_paramhdr *ph; 4681 uint32_t *ippp; 4682 4683 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4684 sizeof(uint32_t); 4685 ph = mtod(oper, struct sctp_paramhdr *); 4686 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4687 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4688 ippp = (uint32_t *) (ph + 1); 4689 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4690 } 4691 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4692 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED); 4693 } else { 4694 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4695 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4696 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4697 } 4698 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4699 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4700 sctp_stop_timers_for_shutdown(stcb); 4701 sctp_send_shutdown(stcb, 4702 stcb->asoc.primary_destination); 4703 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4704 stcb->sctp_ep, stcb, asoc->primary_destination); 4705 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4706 stcb->sctp_ep, stcb, asoc->primary_destination); 4707 } 4708 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4709 (asoc->stream_queue_cnt == 0)) { 4710 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4711 goto abort_out_now; 4712 } 4713 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4714 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4715 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4716 sctp_send_shutdown_ack(stcb, 4717 stcb->asoc.primary_destination); 4718 4719 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4720 stcb->sctp_ep, stcb, asoc->primary_destination); 4721 } 4722 } 4723 /*********************************************/ 4724 /* Here we perform PR-SCTP procedures */ 4725 /* (section 4.2) */ 4726 /*********************************************/ 4727 /* C1. update advancedPeerAckPoint */ 4728 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4729 asoc->advanced_peer_ack_point = cumack; 4730 } 4731 /* PR-Sctp issues need to be addressed too */ 4732 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 4733 struct sctp_tmit_chunk *lchk; 4734 uint32_t old_adv_peer_ack_point; 4735 4736 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4737 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4738 /* C3. See if we need to send a Fwd-TSN */ 4739 if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack, 4740 MAX_TSN)) { 4741 /* 4742 * ISSUE with ECN, see FWD-TSN processing for notes 4743 * on issues that will occur when the ECN NONCE 4744 * stuff is put into SCTP for cross checking. 4745 */ 4746 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point, 4747 MAX_TSN)) { 4748 send_forward_tsn(stcb, asoc); 4749 /* 4750 * ECN Nonce: Disable Nonce Sum check when 4751 * FWD TSN is sent and store resync tsn 4752 */ 4753 asoc->nonce_sum_check = 0; 4754 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 4755 } else if (lchk) { 4756 /* try to FR fwd-tsn's that get lost too */ 4757 lchk->rec.data.fwd_tsn_cnt++; 4758 if (lchk->rec.data.fwd_tsn_cnt > 3) { 4759 send_forward_tsn(stcb, asoc); 4760 lchk->rec.data.fwd_tsn_cnt = 0; 4761 } 4762 } 4763 } 4764 if (lchk) { 4765 /* Assure a timer is up */ 4766 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4767 stcb->sctp_ep, stcb, lchk->whoTo); 4768 } 4769 } 4770 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4771 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4772 rwnd, 4773 stcb->asoc.peers_rwnd, 4774 stcb->asoc.total_flight, 4775 stcb->asoc.total_output_queue_size); 4776 } 4777 } 4778 4779 /* EY- nr_sack */ 4780 /* Identifies the non-renegable tsns that are revoked*/ 4781 static void 4782 sctp_check_for_nr_revoked(struct sctp_tcb *stcb, 4783 struct sctp_association *asoc, uint32_t cumack, 4784 uint32_t biggest_tsn_acked) 4785 { 4786 struct sctp_tmit_chunk *tp1; 4787 4788 for (tp1 = TAILQ_FIRST(&asoc->sent_queue); tp1; tp1 = TAILQ_NEXT(tp1, sctp_next)) { 4789 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 4790 MAX_TSN)) { 4791 /* 4792 * ok this guy is either ACK or MARKED. If it is 4793 * ACKED it has been previously acked but not this 4794 * time i.e. revoked. If it is MARKED it was ACK'ed 4795 * again. 4796 */ 4797 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 4798 MAX_TSN)) 4799 break; 4800 4801 4802 if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) { 4803 /* 4804 * EY! a non-renegable TSN is revoked, need 4805 * to abort the association 4806 */ 4807 /* 4808 * EY TODO: put in the code to abort the 4809 * assoc. 4810 */ 4811 return; 4812 } else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) { 4813 /* it has been re-acked in this SACK */ 4814 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 4815 } 4816 } 4817 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 4818 break; 4819 } 4820 return; 4821 } 4822 4823 void 4824 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4825 struct sctp_tcb *stcb, struct sctp_nets *net_from, 4826 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4827 int *abort_now, uint8_t flags, 4828 uint32_t cum_ack, uint32_t rwnd) 4829 { 4830 struct sctp_association *asoc; 4831 struct sctp_tmit_chunk *tp1, *tp2; 4832 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4833 uint32_t sav_cum_ack; 4834 uint16_t wake_him = 0; 4835 uint32_t send_s = 0; 4836 long j; 4837 int accum_moved = 0; 4838 int will_exit_fast_recovery = 0; 4839 uint32_t a_rwnd, old_rwnd; 4840 int win_probe_recovery = 0; 4841 int win_probe_recovered = 0; 4842 struct sctp_nets *net = NULL; 4843 int nonce_sum_flag, ecn_seg_sums = 0; 4844 int done_once; 4845 uint8_t reneged_all = 0; 4846 uint8_t cmt_dac_flag; 4847 4848 /* 4849 * we take any chance we can to service our queues since we cannot 4850 * get awoken when the socket is read from :< 4851 */ 4852 /* 4853 * Now perform the actual SACK handling: 1) Verify that it is not an 4854 * old sack, if so discard. 2) If there is nothing left in the send 4855 * queue (cum-ack is equal to last acked) then you have a duplicate 4856 * too, update any rwnd change and verify no timers are running. 4857 * then return. 3) Process any new consequtive data i.e. cum-ack 4858 * moved process these first and note that it moved. 4) Process any 4859 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4860 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4861 * sync up flightsizes and things, stop all timers and also check 4862 * for shutdown_pending state. If so then go ahead and send off the 4863 * shutdown. If in shutdown recv, send off the shutdown-ack and 4864 * start that timer, Ret. 9) Strike any non-acked things and do FR 4865 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4866 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4867 * if in shutdown_recv state. 4868 */ 4869 SCTP_TCB_LOCK_ASSERT(stcb); 4870 /* CMT DAC algo */ 4871 this_sack_lowest_newack = 0; 4872 j = 0; 4873 SCTP_STAT_INCR(sctps_slowpath_sack); 4874 last_tsn = cum_ack; 4875 nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM; 4876 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4877 #ifdef SCTP_ASOCLOG_OF_TSNS 4878 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4879 stcb->asoc.cumack_log_at++; 4880 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4881 stcb->asoc.cumack_log_at = 0; 4882 } 4883 #endif 4884 a_rwnd = rwnd; 4885 4886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4887 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4888 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4889 } 4890 old_rwnd = stcb->asoc.peers_rwnd; 4891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4892 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4893 stcb->asoc.overall_error_count, 4894 0, 4895 SCTP_FROM_SCTP_INDATA, 4896 __LINE__); 4897 } 4898 stcb->asoc.overall_error_count = 0; 4899 asoc = &stcb->asoc; 4900 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4901 sctp_log_sack(asoc->last_acked_seq, 4902 cum_ack, 4903 0, 4904 num_seg, 4905 num_dup, 4906 SCTP_LOG_NEW_SACK); 4907 } 4908 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) { 4909 uint16_t i; 4910 uint32_t *dupdata, dblock; 4911 4912 for (i = 0; i < num_dup; i++) { 4913 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4914 sizeof(uint32_t), (uint8_t *) & dblock); 4915 if (dupdata == NULL) { 4916 break; 4917 } 4918 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4919 } 4920 } 4921 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4922 /* reality check */ 4923 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4924 tp1 = TAILQ_LAST(&asoc->sent_queue, 4925 sctpchunk_listhead); 4926 send_s = tp1->rec.data.TSN_seq + 1; 4927 } else { 4928 send_s = asoc->sending_seq; 4929 } 4930 if (cum_ack == send_s || 4931 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4932 #ifndef INVARIANTS 4933 struct mbuf *oper; 4934 4935 #endif 4936 #ifdef INVARIANTS 4937 hopeless_peer: 4938 panic("Impossible sack 1"); 4939 #else 4940 /* 4941 * no way, we have not even sent this TSN out yet. 4942 * Peer is hopelessly messed up with us. 4943 */ 4944 hopeless_peer: 4945 *abort_now = 1; 4946 /* XXX */ 4947 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4948 0, M_DONTWAIT, 1, MT_DATA); 4949 if (oper) { 4950 struct sctp_paramhdr *ph; 4951 uint32_t *ippp; 4952 4953 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4954 sizeof(uint32_t); 4955 ph = mtod(oper, struct sctp_paramhdr *); 4956 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4957 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4958 ippp = (uint32_t *) (ph + 1); 4959 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4960 } 4961 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4962 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 4963 return; 4964 #endif 4965 } 4966 } 4967 /**********************/ 4968 /* 1) check the range */ 4969 /**********************/ 4970 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4971 /* acking something behind */ 4972 return; 4973 } 4974 sav_cum_ack = asoc->last_acked_seq; 4975 4976 /* update the Rwnd of the peer */ 4977 if (TAILQ_EMPTY(&asoc->sent_queue) && 4978 TAILQ_EMPTY(&asoc->send_queue) && 4979 (asoc->stream_queue_cnt == 0)) { 4980 /* nothing left on send/sent and strmq */ 4981 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4982 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4983 asoc->peers_rwnd, 0, 0, a_rwnd); 4984 } 4985 asoc->peers_rwnd = a_rwnd; 4986 if (asoc->sent_queue_retran_cnt) { 4987 asoc->sent_queue_retran_cnt = 0; 4988 } 4989 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4990 /* SWS sender side engages */ 4991 asoc->peers_rwnd = 0; 4992 } 4993 /* stop any timers */ 4994 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4995 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4996 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4997 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 4998 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4999 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 5000 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5001 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 5002 } 5003 } 5004 net->partial_bytes_acked = 0; 5005 net->flight_size = 0; 5006 } 5007 asoc->total_flight = 0; 5008 asoc->total_flight_count = 0; 5009 return; 5010 } 5011 /* 5012 * We init netAckSz and netAckSz2 to 0. These are used to track 2 5013 * things. The total byte count acked is tracked in netAckSz AND 5014 * netAck2 is used to track the total bytes acked that are un- 5015 * amibguious and were never retransmitted. We track these on a per 5016 * destination address basis. 5017 */ 5018 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5019 net->prev_cwnd = net->cwnd; 5020 net->net_ack = 0; 5021 net->net_ack2 = 0; 5022 5023 /* 5024 * CMT: Reset CUC and Fast recovery algo variables before 5025 * SACK processing 5026 */ 5027 net->new_pseudo_cumack = 0; 5028 net->will_exit_fast_recovery = 0; 5029 } 5030 /* process the new consecutive TSN first */ 5031 tp1 = TAILQ_FIRST(&asoc->sent_queue); 5032 while (tp1) { 5033 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 5034 MAX_TSN) || 5035 last_tsn == tp1->rec.data.TSN_seq) { 5036 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 5037 /* 5038 * ECN Nonce: Add the nonce to the sender's 5039 * nonce sum 5040 */ 5041 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 5042 accum_moved = 1; 5043 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 5044 /* 5045 * If it is less than ACKED, it is 5046 * now no-longer in flight. Higher 5047 * values may occur during marking 5048 */ 5049 if ((tp1->whoTo->dest_state & 5050 SCTP_ADDR_UNCONFIRMED) && 5051 (tp1->snd_count < 2)) { 5052 /* 5053 * If there was no retran 5054 * and the address is 5055 * un-confirmed and we sent 5056 * there and are now 5057 * sacked.. its confirmed, 5058 * mark it so. 5059 */ 5060 tp1->whoTo->dest_state &= 5061 ~SCTP_ADDR_UNCONFIRMED; 5062 } 5063 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5064 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 5065 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 5066 tp1->whoTo->flight_size, 5067 tp1->book_size, 5068 (uintptr_t) tp1->whoTo, 5069 tp1->rec.data.TSN_seq); 5070 } 5071 sctp_flight_size_decrease(tp1); 5072 sctp_total_flight_decrease(stcb, tp1); 5073 } 5074 tp1->whoTo->net_ack += tp1->send_size; 5075 5076 /* CMT SFR and DAC algos */ 5077 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 5078 tp1->whoTo->saw_newack = 1; 5079 5080 if (tp1->snd_count < 2) { 5081 /* 5082 * True non-retransmited 5083 * chunk 5084 */ 5085 tp1->whoTo->net_ack2 += 5086 tp1->send_size; 5087 5088 /* update RTO too? */ 5089 if (tp1->do_rtt) { 5090 tp1->whoTo->RTO = 5091 sctp_calculate_rto(stcb, 5092 asoc, tp1->whoTo, 5093 &tp1->sent_rcv_time, 5094 sctp_align_safe_nocopy); 5095 tp1->do_rtt = 0; 5096 } 5097 } 5098 /* 5099 * CMT: CUCv2 algorithm. From the 5100 * cumack'd TSNs, for each TSN being 5101 * acked for the first time, set the 5102 * following variables for the 5103 * corresp destination. 5104 * new_pseudo_cumack will trigger a 5105 * cwnd update. 5106 * find_(rtx_)pseudo_cumack will 5107 * trigger search for the next 5108 * expected (rtx-)pseudo-cumack. 5109 */ 5110 tp1->whoTo->new_pseudo_cumack = 1; 5111 tp1->whoTo->find_pseudo_cumack = 1; 5112 tp1->whoTo->find_rtx_pseudo_cumack = 1; 5113 5114 5115 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 5116 sctp_log_sack(asoc->last_acked_seq, 5117 cum_ack, 5118 tp1->rec.data.TSN_seq, 5119 0, 5120 0, 5121 SCTP_LOG_TSN_ACKED); 5122 } 5123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 5124 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 5125 } 5126 } 5127 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5128 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 5129 #ifdef SCTP_AUDITING_ENABLED 5130 sctp_audit_log(0xB3, 5131 (asoc->sent_queue_retran_cnt & 0x000000ff)); 5132 #endif 5133 } 5134 if (tp1->rec.data.chunk_was_revoked) { 5135 /* deflate the cwnd */ 5136 tp1->whoTo->cwnd -= tp1->book_size; 5137 tp1->rec.data.chunk_was_revoked = 0; 5138 } 5139 tp1->sent = SCTP_DATAGRAM_ACKED; 5140 } 5141 } else { 5142 break; 5143 } 5144 tp1 = TAILQ_NEXT(tp1, sctp_next); 5145 } 5146 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 5147 /* always set this up to cum-ack */ 5148 asoc->this_sack_highest_gap = last_tsn; 5149 5150 if ((num_seg > 0) || (num_nr_seg > 0)) { 5151 5152 /* 5153 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 5154 * to be greater than the cumack. Also reset saw_newack to 0 5155 * for all dests. 5156 */ 5157 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5158 net->saw_newack = 0; 5159 net->this_sack_highest_newack = last_tsn; 5160 } 5161 5162 /* 5163 * thisSackHighestGap will increase while handling NEW 5164 * segments this_sack_highest_newack will increase while 5165 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 5166 * used for CMT DAC algo. saw_newack will also change. 5167 */ 5168 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 5169 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 5170 num_seg, num_nr_seg, &ecn_seg_sums)) { 5171 wake_him++; 5172 } 5173 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 5174 /* 5175 * validate the biggest_tsn_acked in the gap acks if 5176 * strict adherence is wanted. 5177 */ 5178 if ((biggest_tsn_acked == send_s) || 5179 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 5180 /* 5181 * peer is either confused or we are under 5182 * attack. We must abort. 5183 */ 5184 goto hopeless_peer; 5185 } 5186 } 5187 } 5188 /*******************************************/ 5189 /* cancel ALL T3-send timer if accum moved */ 5190 /*******************************************/ 5191 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 5192 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5193 if (net->new_pseudo_cumack) 5194 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5195 stcb, net, 5196 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 5197 5198 } 5199 } else { 5200 if (accum_moved) { 5201 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5202 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5203 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 5204 } 5205 } 5206 } 5207 /********************************************/ 5208 /* drop the acked chunks from the sendqueue */ 5209 /********************************************/ 5210 asoc->last_acked_seq = cum_ack; 5211 5212 tp1 = TAILQ_FIRST(&asoc->sent_queue); 5213 if (tp1 == NULL) 5214 goto done_with_it; 5215 do { 5216 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 5217 MAX_TSN)) { 5218 break; 5219 } 5220 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 5221 /* no more sent on list */ 5222 printf("Warning, tp1->sent == %d and its now acked?\n", 5223 tp1->sent); 5224 } 5225 tp2 = TAILQ_NEXT(tp1, sctp_next); 5226 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 5227 if (tp1->pr_sctp_on) { 5228 if (asoc->pr_sctp_cnt != 0) 5229 asoc->pr_sctp_cnt--; 5230 } 5231 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 5232 (asoc->total_flight > 0)) { 5233 #ifdef INVARIANTS 5234 panic("Warning flight size is postive and should be 0"); 5235 #else 5236 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 5237 asoc->total_flight); 5238 #endif 5239 asoc->total_flight = 0; 5240 } 5241 if (tp1->data) { 5242 /* sa_ignore NO_NULL_CHK */ 5243 sctp_free_bufspace(stcb, asoc, tp1, 1); 5244 sctp_m_freem(tp1->data); 5245 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) { 5246 asoc->sent_queue_cnt_removeable--; 5247 } 5248 } 5249 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 5250 sctp_log_sack(asoc->last_acked_seq, 5251 cum_ack, 5252 tp1->rec.data.TSN_seq, 5253 0, 5254 0, 5255 SCTP_LOG_FREE_SENT); 5256 } 5257 tp1->data = NULL; 5258 asoc->sent_queue_cnt--; 5259 sctp_free_a_chunk(stcb, tp1); 5260 wake_him++; 5261 tp1 = tp2; 5262 } while (tp1 != NULL); 5263 5264 done_with_it: 5265 /* sa_ignore NO_NULL_CHK */ 5266 if ((wake_him) && (stcb->sctp_socket)) { 5267 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5268 struct socket *so; 5269 5270 #endif 5271 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 5272 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 5273 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 5274 } 5275 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5276 so = SCTP_INP_SO(stcb->sctp_ep); 5277 atomic_add_int(&stcb->asoc.refcnt, 1); 5278 SCTP_TCB_UNLOCK(stcb); 5279 SCTP_SOCKET_LOCK(so, 1); 5280 SCTP_TCB_LOCK(stcb); 5281 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5282 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 5283 /* assoc was freed while we were unlocked */ 5284 SCTP_SOCKET_UNLOCK(so, 1); 5285 return; 5286 } 5287 #endif 5288 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 5289 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5290 SCTP_SOCKET_UNLOCK(so, 1); 5291 #endif 5292 } else { 5293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 5294 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 5295 } 5296 } 5297 5298 if (asoc->fast_retran_loss_recovery && accum_moved) { 5299 if (compare_with_wrap(asoc->last_acked_seq, 5300 asoc->fast_recovery_tsn, MAX_TSN) || 5301 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 5302 /* Setup so we will exit RFC2582 fast recovery */ 5303 will_exit_fast_recovery = 1; 5304 } 5305 } 5306 /* 5307 * Check for revoked fragments: 5308 * 5309 * if Previous sack - Had no frags then we can't have any revoked if 5310 * Previous sack - Had frag's then - If we now have frags aka 5311 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 5312 * some of them. else - The peer revoked all ACKED fragments, since 5313 * we had some before and now we have NONE. 5314 */ 5315 5316 if (num_seg) 5317 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 5318 else if (asoc->saw_sack_with_frags) { 5319 int cnt_revoked = 0; 5320 5321 tp1 = TAILQ_FIRST(&asoc->sent_queue); 5322 if (tp1 != NULL) { 5323 /* Peer revoked all dg's marked or acked */ 5324 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5325 /* 5326 * EY- maybe check only if it is nr_acked 5327 * nr_marked may not be possible 5328 */ 5329 if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) || 5330 (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) { 5331 /* 5332 * EY! - TODO: Something previously 5333 * nr_gapped is reneged, abort the 5334 * association 5335 */ 5336 return; 5337 } 5338 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 5339 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 5340 tp1->sent = SCTP_DATAGRAM_SENT; 5341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 5342 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 5343 tp1->whoTo->flight_size, 5344 tp1->book_size, 5345 (uintptr_t) tp1->whoTo, 5346 tp1->rec.data.TSN_seq); 5347 } 5348 sctp_flight_size_increase(tp1); 5349 sctp_total_flight_increase(stcb, tp1); 5350 tp1->rec.data.chunk_was_revoked = 1; 5351 /* 5352 * To ensure that this increase in 5353 * flightsize, which is artificial, 5354 * does not throttle the sender, we 5355 * also increase the cwnd 5356 * artificially. 5357 */ 5358 tp1->whoTo->cwnd += tp1->book_size; 5359 cnt_revoked++; 5360 } 5361 } 5362 if (cnt_revoked) { 5363 reneged_all = 1; 5364 } 5365 } 5366 asoc->saw_sack_with_frags = 0; 5367 } 5368 if (num_seg) 5369 asoc->saw_sack_with_frags = 1; 5370 else 5371 asoc->saw_sack_with_frags = 0; 5372 5373 /* EY! - not sure about if there should be an IF */ 5374 if (num_nr_seg > 0) 5375 sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 5376 5377 /* JRS - Use the congestion control given in the CC module */ 5378 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 5379 5380 if (TAILQ_EMPTY(&asoc->sent_queue)) { 5381 /* nothing left in-flight */ 5382 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5383 /* stop all timers */ 5384 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 5385 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 5386 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 5387 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5388 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 5389 } 5390 } 5391 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5392 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 5393 net->flight_size = 0; 5394 net->partial_bytes_acked = 0; 5395 } 5396 asoc->total_flight = 0; 5397 asoc->total_flight_count = 0; 5398 } 5399 /**********************************/ 5400 /* Now what about shutdown issues */ 5401 /**********************************/ 5402 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 5403 /* nothing left on sendqueue.. consider done */ 5404 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5405 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5406 asoc->peers_rwnd, 0, 0, a_rwnd); 5407 } 5408 asoc->peers_rwnd = a_rwnd; 5409 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5410 /* SWS sender side engages */ 5411 asoc->peers_rwnd = 0; 5412 } 5413 /* clean up */ 5414 if ((asoc->stream_queue_cnt == 1) && 5415 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5416 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 5417 (asoc->locked_on_sending) 5418 ) { 5419 struct sctp_stream_queue_pending *sp; 5420 5421 /* 5422 * I may be in a state where we got all across.. but 5423 * cannot write more due to a shutdown... we abort 5424 * since the user did not indicate EOR in this case. 5425 */ 5426 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 5427 sctp_streamhead); 5428 if ((sp) && (sp->length == 0)) { 5429 asoc->locked_on_sending = NULL; 5430 if (sp->msg_is_complete) { 5431 asoc->stream_queue_cnt--; 5432 } else { 5433 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 5434 asoc->stream_queue_cnt--; 5435 } 5436 } 5437 } 5438 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5439 (asoc->stream_queue_cnt == 0)) { 5440 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 5441 /* Need to abort here */ 5442 struct mbuf *oper; 5443 5444 abort_out_now: 5445 *abort_now = 1; 5446 /* XXX */ 5447 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 5448 0, M_DONTWAIT, 1, MT_DATA); 5449 if (oper) { 5450 struct sctp_paramhdr *ph; 5451 uint32_t *ippp; 5452 5453 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 5454 sizeof(uint32_t); 5455 ph = mtod(oper, struct sctp_paramhdr *); 5456 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 5457 ph->param_length = htons(SCTP_BUF_LEN(oper)); 5458 ippp = (uint32_t *) (ph + 1); 5459 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 5460 } 5461 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 5462 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED); 5463 return; 5464 } else { 5465 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 5466 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5467 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5468 } 5469 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 5470 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5471 sctp_stop_timers_for_shutdown(stcb); 5472 sctp_send_shutdown(stcb, 5473 stcb->asoc.primary_destination); 5474 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5475 stcb->sctp_ep, stcb, asoc->primary_destination); 5476 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5477 stcb->sctp_ep, stcb, asoc->primary_destination); 5478 } 5479 return; 5480 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5481 (asoc->stream_queue_cnt == 0)) { 5482 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 5483 goto abort_out_now; 5484 } 5485 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5486 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 5487 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5488 sctp_send_shutdown_ack(stcb, 5489 stcb->asoc.primary_destination); 5490 5491 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5492 stcb->sctp_ep, stcb, asoc->primary_destination); 5493 return; 5494 } 5495 } 5496 /* 5497 * Now here we are going to recycle net_ack for a different use... 5498 * HEADS UP. 5499 */ 5500 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5501 net->net_ack = 0; 5502 } 5503 5504 /* 5505 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5506 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5507 * automatically ensure that. 5508 */ 5509 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) { 5510 this_sack_lowest_newack = cum_ack; 5511 } 5512 if ((num_seg > 0) || (num_nr_seg > 0)) { 5513 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5514 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5515 } 5516 /* JRS - Use the congestion control given in the CC module */ 5517 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5518 5519 /****************************************************************** 5520 * Here we do the stuff with ECN Nonce checking. 5521 * We basically check to see if the nonce sum flag was incorrect 5522 * or if resynchronization needs to be done. Also if we catch a 5523 * misbehaving receiver we give him the kick. 5524 ******************************************************************/ 5525 5526 if (asoc->ecn_nonce_allowed) { 5527 if (asoc->nonce_sum_check) { 5528 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 5529 if (asoc->nonce_wait_for_ecne == 0) { 5530 struct sctp_tmit_chunk *lchk; 5531 5532 lchk = TAILQ_FIRST(&asoc->send_queue); 5533 asoc->nonce_wait_for_ecne = 1; 5534 if (lchk) { 5535 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 5536 } else { 5537 asoc->nonce_wait_tsn = asoc->sending_seq; 5538 } 5539 } else { 5540 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 5541 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 5542 /* 5543 * Misbehaving peer. We need 5544 * to react to this guy 5545 */ 5546 asoc->ecn_allowed = 0; 5547 asoc->ecn_nonce_allowed = 0; 5548 } 5549 } 5550 } 5551 } else { 5552 /* See if Resynchronization Possible */ 5553 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 5554 asoc->nonce_sum_check = 1; 5555 /* 5556 * now we must calculate what the base is. 5557 * We do this based on two things, we know 5558 * the total's for all the segments 5559 * gap-acked in the SACK, its stored in 5560 * ecn_seg_sums. We also know the SACK's 5561 * nonce sum, its in nonce_sum_flag. So we 5562 * can build a truth table to back-calculate 5563 * the new value of 5564 * asoc->nonce_sum_expect_base: 5565 * 5566 * SACK-flag-Value Seg-Sums Base 0 0 0 5567 * 1 0 1 0 1 1 1 5568 * 1 0 5569 */ 5570 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 5571 } 5572 } 5573 } 5574 /* Now are we exiting loss recovery ? */ 5575 if (will_exit_fast_recovery) { 5576 /* Ok, we must exit fast recovery */ 5577 asoc->fast_retran_loss_recovery = 0; 5578 } 5579 if ((asoc->sat_t3_loss_recovery) && 5580 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 5581 MAX_TSN) || 5582 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 5583 /* end satellite t3 loss recovery */ 5584 asoc->sat_t3_loss_recovery = 0; 5585 } 5586 /* 5587 * CMT Fast recovery 5588 */ 5589 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5590 if (net->will_exit_fast_recovery) { 5591 /* Ok, we must exit fast recovery */ 5592 net->fast_retran_loss_recovery = 0; 5593 } 5594 } 5595 5596 /* Adjust and set the new rwnd value */ 5597 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5598 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5599 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5600 } 5601 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5602 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5603 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5604 /* SWS sender side engages */ 5605 asoc->peers_rwnd = 0; 5606 } 5607 if (asoc->peers_rwnd > old_rwnd) { 5608 win_probe_recovery = 1; 5609 } 5610 /* 5611 * Now we must setup so we have a timer up for anyone with 5612 * outstanding data. 5613 */ 5614 done_once = 0; 5615 again: 5616 j = 0; 5617 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5618 if (win_probe_recovery && (net->window_probe)) { 5619 win_probe_recovered = 1; 5620 /*- 5621 * Find first chunk that was used with 5622 * window probe and clear the event. Put 5623 * it back into the send queue as if has 5624 * not been sent. 5625 */ 5626 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5627 if (tp1->window_probe) { 5628 sctp_window_probe_recovery(stcb, asoc, net, tp1); 5629 break; 5630 } 5631 } 5632 } 5633 if (net->flight_size) { 5634 j++; 5635 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5636 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5637 stcb->sctp_ep, stcb, net); 5638 } 5639 if (net->window_probe) { 5640 net->window_probe = 0; 5641 } 5642 } else { 5643 if (net->window_probe) { 5644 /* 5645 * In window probes we must assure a timer 5646 * is still running there 5647 */ 5648 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5649 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5650 stcb->sctp_ep, stcb, net); 5651 5652 } 5653 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5654 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5655 stcb, net, 5656 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 5657 } 5658 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 5659 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 5660 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 5661 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5662 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 5663 } 5664 } 5665 } 5666 } 5667 if ((j == 0) && 5668 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5669 (asoc->sent_queue_retran_cnt == 0) && 5670 (win_probe_recovered == 0) && 5671 (done_once == 0)) { 5672 /* 5673 * huh, this should not happen unless all packets are 5674 * PR-SCTP and marked to skip of course. 5675 */ 5676 if (sctp_fs_audit(asoc)) { 5677 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5678 net->flight_size = 0; 5679 } 5680 asoc->total_flight = 0; 5681 asoc->total_flight_count = 0; 5682 asoc->sent_queue_retran_cnt = 0; 5683 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5684 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5685 sctp_flight_size_increase(tp1); 5686 sctp_total_flight_increase(stcb, tp1); 5687 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5688 asoc->sent_queue_retran_cnt++; 5689 } 5690 } 5691 } 5692 done_once = 1; 5693 goto again; 5694 } 5695 /*********************************************/ 5696 /* Here we perform PR-SCTP procedures */ 5697 /* (section 4.2) */ 5698 /*********************************************/ 5699 /* C1. update advancedPeerAckPoint */ 5700 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 5701 asoc->advanced_peer_ack_point = cum_ack; 5702 } 5703 /* C2. try to further move advancedPeerAckPoint ahead */ 5704 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 5705 struct sctp_tmit_chunk *lchk; 5706 uint32_t old_adv_peer_ack_point; 5707 5708 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5709 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5710 /* C3. See if we need to send a Fwd-TSN */ 5711 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 5712 MAX_TSN)) { 5713 /* 5714 * ISSUE with ECN, see FWD-TSN processing for notes 5715 * on issues that will occur when the ECN NONCE 5716 * stuff is put into SCTP for cross checking. 5717 */ 5718 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5719 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5720 0xee, cum_ack, asoc->advanced_peer_ack_point, 5721 old_adv_peer_ack_point); 5722 } 5723 if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point, 5724 MAX_TSN)) { 5725 send_forward_tsn(stcb, asoc); 5726 /* 5727 * ECN Nonce: Disable Nonce Sum check when 5728 * FWD TSN is sent and store resync tsn 5729 */ 5730 asoc->nonce_sum_check = 0; 5731 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 5732 } else if (lchk) { 5733 /* try to FR fwd-tsn's that get lost too */ 5734 lchk->rec.data.fwd_tsn_cnt++; 5735 if (lchk->rec.data.fwd_tsn_cnt > 3) { 5736 send_forward_tsn(stcb, asoc); 5737 lchk->rec.data.fwd_tsn_cnt = 0; 5738 } 5739 } 5740 } 5741 if (lchk) { 5742 /* Assure a timer is up */ 5743 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5744 stcb->sctp_ep, stcb, lchk->whoTo); 5745 } 5746 } 5747 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5748 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5749 a_rwnd, 5750 stcb->asoc.peers_rwnd, 5751 stcb->asoc.total_flight, 5752 stcb->asoc.total_output_queue_size); 5753 } 5754 } 5755 5756 void 5757 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5758 struct sctp_nets *netp, int *abort_flag) 5759 { 5760 /* Copy cum-ack */ 5761 uint32_t cum_ack, a_rwnd; 5762 5763 cum_ack = ntohl(cp->cumulative_tsn_ack); 5764 /* Arrange so a_rwnd does NOT change */ 5765 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5766 5767 /* Now call the express sack handling */ 5768 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5769 } 5770 5771 static void 5772 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5773 struct sctp_stream_in *strmin) 5774 { 5775 struct sctp_queued_to_read *ctl, *nctl; 5776 struct sctp_association *asoc; 5777 int tt; 5778 5779 /* EY -used to calculate nr_gap information */ 5780 uint32_t nr_tsn, nr_gap; 5781 5782 asoc = &stcb->asoc; 5783 tt = strmin->last_sequence_delivered; 5784 /* 5785 * First deliver anything prior to and including the stream no that 5786 * came in 5787 */ 5788 ctl = TAILQ_FIRST(&strmin->inqueue); 5789 while (ctl) { 5790 nctl = TAILQ_NEXT(ctl, next); 5791 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5792 (tt == ctl->sinfo_ssn)) { 5793 /* this is deliverable now */ 5794 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5795 /* subtract pending on streams */ 5796 asoc->size_on_all_streams -= ctl->length; 5797 sctp_ucount_decr(asoc->cnt_on_all_streams); 5798 /* deliver it to at least the delivery-q */ 5799 if (stcb->sctp_socket) { 5800 /* EY need the tsn info for calculating nr */ 5801 nr_tsn = ctl->sinfo_tsn; 5802 sctp_add_to_readq(stcb->sctp_ep, stcb, 5803 ctl, 5804 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5805 /* 5806 * EY this is the chunk that should be 5807 * tagged nr gapped calculate the gap and 5808 * such then tag this TSN nr 5809 * chk->rec.data.TSN_seq 5810 */ 5811 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 5812 asoc->peer_supports_nr_sack) { 5813 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn); 5814 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) || 5815 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) { 5816 /* 5817 * EY These should never 5818 * happen- explained before 5819 */ 5820 } else { 5821 SCTP_TCB_LOCK_ASSERT(stcb); 5822 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 5823 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc); 5824 if (compare_with_wrap(nr_tsn, 5825 asoc->highest_tsn_inside_nr_map, 5826 MAX_TSN)) 5827 asoc->highest_tsn_inside_nr_map = nr_tsn; 5828 } 5829 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap)) 5830 /* 5831 * printf("In 5832 * sctp_kick_prsctp_reorder_q 5833 * ueue(7): Something wrong, 5834 * the TSN to be tagged" 5835 * "\nas NR is not even in 5836 * the mapping_array, or map 5837 * and nr_map are 5838 * inconsistent"); 5839 */ 5840 /* 5841 * EY - not %100 sure about 5842 * the lock thing, don't 5843 * think its required 5844 */ 5845 /* 5846 * SCTP_TCB_LOCK_ASSERT(stcb) 5847 * ; 5848 */ 5849 { 5850 /* 5851 * printf("\nCalculating an 5852 * nr_gap!!\nmapping_array_si 5853 * ze = %d 5854 * nr_mapping_array_size = 5855 * %d" "\nmapping_array_base 5856 * = %d 5857 * nr_mapping_array_base = 5858 * %d\nhighest_tsn_inside_map 5859 * = %d" 5860 * "highest_tsn_inside_nr_map 5861 * = %d\nTSN = %d nr_gap = 5862 * %d",asoc->mapping_array_si 5863 * ze, 5864 * asoc->nr_mapping_array_siz 5865 * e, 5866 * asoc->mapping_array_base_t 5867 * sn, 5868 * asoc->nr_mapping_array_bas 5869 * e_tsn, 5870 * asoc->highest_tsn_inside_m 5871 * ap, 5872 * asoc->highest_tsn_inside_n 5873 * r_map,tsn,nr_gap); 5874 */ 5875 } 5876 } 5877 } 5878 } else { 5879 /* no more delivery now. */ 5880 break; 5881 } 5882 ctl = nctl; 5883 } 5884 /* 5885 * now we must deliver things in queue the normal way if any are 5886 * now ready. 5887 */ 5888 tt = strmin->last_sequence_delivered + 1; 5889 ctl = TAILQ_FIRST(&strmin->inqueue); 5890 while (ctl) { 5891 nctl = TAILQ_NEXT(ctl, next); 5892 if (tt == ctl->sinfo_ssn) { 5893 /* this is deliverable now */ 5894 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5895 /* subtract pending on streams */ 5896 asoc->size_on_all_streams -= ctl->length; 5897 sctp_ucount_decr(asoc->cnt_on_all_streams); 5898 /* deliver it to at least the delivery-q */ 5899 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5900 if (stcb->sctp_socket) { 5901 /* EY */ 5902 nr_tsn = ctl->sinfo_tsn; 5903 sctp_add_to_readq(stcb->sctp_ep, stcb, 5904 ctl, 5905 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5906 /* 5907 * EY this is the chunk that should be 5908 * tagged nr gapped calculate the gap and 5909 * such then tag this TSN nr 5910 * chk->rec.data.TSN_seq 5911 */ 5912 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 5913 asoc->peer_supports_nr_sack) { 5914 SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn); 5915 if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) || 5916 (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) { 5917 /* 5918 * EY These should never 5919 * happen, explained before 5920 */ 5921 } else { 5922 SCTP_TCB_LOCK_ASSERT(stcb); 5923 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap); 5924 SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc); 5925 if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, 5926 MAX_TSN)) 5927 asoc->highest_tsn_inside_nr_map = nr_tsn; 5928 } 5929 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap)) 5930 /* 5931 * printf("In 5932 * sctp_kick_prsctp_reorder_q 5933 * ueue(8): Something wrong, 5934 * the TSN to be tagged" 5935 * "\nas NR is not even in 5936 * the mapping_array, or map 5937 * and nr_map are 5938 * inconsistent"); 5939 */ 5940 /* 5941 * EY - not %100 sure about 5942 * the lock thing, don't 5943 * think its required 5944 */ 5945 /* 5946 * SCTP_TCB_LOCK_ASSERT(stcb) 5947 * ; 5948 */ 5949 { 5950 /* 5951 * printf("\nCalculating an 5952 * nr_gap!!\nmapping_array_si 5953 * ze = %d 5954 * nr_mapping_array_size = 5955 * %d" "\nmapping_array_base 5956 * = %d 5957 * nr_mapping_array_base = 5958 * %d\nhighest_tsn_inside_map 5959 * = %d" 5960 * "highest_tsn_inside_nr_map 5961 * = %d\nTSN = %d nr_gap = 5962 * %d",asoc->mapping_array_si 5963 * ze, 5964 * asoc->nr_mapping_array_siz 5965 * e, 5966 * asoc->mapping_array_base_t 5967 * sn, 5968 * asoc->nr_mapping_array_bas 5969 * e_tsn, 5970 * asoc->highest_tsn_inside_m 5971 * ap, 5972 * asoc->highest_tsn_inside_n 5973 * r_map,tsn,nr_gap); 5974 */ 5975 } 5976 } 5977 } 5978 tt = strmin->last_sequence_delivered + 1; 5979 } else { 5980 break; 5981 } 5982 ctl = nctl; 5983 } 5984 } 5985 5986 static void 5987 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5988 struct sctp_association *asoc, 5989 uint16_t stream, uint16_t seq) 5990 { 5991 struct sctp_tmit_chunk *chk, *at; 5992 5993 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5994 /* For each one on here see if we need to toss it */ 5995 /* 5996 * For now large messages held on the reasmqueue that are 5997 * complete will be tossed too. We could in theory do more 5998 * work to spin through and stop after dumping one msg aka 5999 * seeing the start of a new msg at the head, and call the 6000 * delivery function... to see if it can be delivered... But 6001 * for now we just dump everything on the queue. 6002 */ 6003 chk = TAILQ_FIRST(&asoc->reasmqueue); 6004 while (chk) { 6005 at = TAILQ_NEXT(chk, sctp_next); 6006 /* 6007 * Do not toss it if on a different stream or marked 6008 * for unordered delivery in which case the stream 6009 * sequence number has no meaning. 6010 */ 6011 if ((chk->rec.data.stream_number != stream) || 6012 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) { 6013 chk = at; 6014 continue; 6015 } 6016 if (chk->rec.data.stream_seq == seq) { 6017 /* It needs to be tossed */ 6018 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 6019 if (compare_with_wrap(chk->rec.data.TSN_seq, 6020 asoc->tsn_last_delivered, MAX_TSN)) { 6021 asoc->tsn_last_delivered = 6022 chk->rec.data.TSN_seq; 6023 asoc->str_of_pdapi = 6024 chk->rec.data.stream_number; 6025 asoc->ssn_of_pdapi = 6026 chk->rec.data.stream_seq; 6027 asoc->fragment_flags = 6028 chk->rec.data.rcv_flags; 6029 } 6030 asoc->size_on_reasm_queue -= chk->send_size; 6031 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 6032 6033 /* Clear up any stream problem */ 6034 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 6035 SCTP_DATA_UNORDERED && 6036 (compare_with_wrap(chk->rec.data.stream_seq, 6037 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 6038 MAX_SEQ))) { 6039 /* 6040 * We must dump forward this streams 6041 * sequence number if the chunk is 6042 * not unordered that is being 6043 * skipped. There is a chance that 6044 * if the peer does not include the 6045 * last fragment in its FWD-TSN we 6046 * WILL have a problem here since 6047 * you would have a partial chunk in 6048 * queue that may not be 6049 * deliverable. Also if a Partial 6050 * delivery API as started the user 6051 * may get a partial chunk. The next 6052 * read returning a new chunk... 6053 * really ugly but I see no way 6054 * around it! Maybe a notify?? 6055 */ 6056 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 6057 chk->rec.data.stream_seq; 6058 } 6059 if (chk->data) { 6060 sctp_m_freem(chk->data); 6061 chk->data = NULL; 6062 } 6063 sctp_free_a_chunk(stcb, chk); 6064 } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) { 6065 /* 6066 * If the stream_seq is > than the purging 6067 * one, we are done 6068 */ 6069 break; 6070 } 6071 chk = at; 6072 } 6073 } 6074 } 6075 6076 6077 void 6078 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 6079 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset) 6080 { 6081 /* 6082 * ISSUES that MUST be fixed for ECN! When we are the sender of the 6083 * forward TSN, when the SACK comes back that acknowledges the 6084 * FWD-TSN we must reset the NONCE sum to match correctly. This will 6085 * get quite tricky since we may have sent more data interveneing 6086 * and must carefully account for what the SACK says on the nonce 6087 * and any gaps that are reported. This work will NOT be done here, 6088 * but I note it here since it is really related to PR-SCTP and 6089 * FWD-TSN's 6090 */ 6091 6092 /* The pr-sctp fwd tsn */ 6093 /* 6094 * here we will perform all the data receiver side steps for 6095 * processing FwdTSN, as required in by pr-sctp draft: 6096 * 6097 * Assume we get FwdTSN(x): 6098 * 6099 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 6100 * others we have 3) examine and update re-ordering queue on 6101 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 6102 * report where we are. 6103 */ 6104 struct sctp_association *asoc; 6105 uint32_t new_cum_tsn, gap; 6106 unsigned int i, fwd_sz, cumack_set_flag, m_size; 6107 uint32_t str_seq; 6108 struct sctp_stream_in *strm; 6109 struct sctp_tmit_chunk *chk, *at; 6110 struct sctp_queued_to_read *ctl, *sv; 6111 6112 cumack_set_flag = 0; 6113 asoc = &stcb->asoc; 6114 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 6115 SCTPDBG(SCTP_DEBUG_INDATA1, 6116 "Bad size too small/big fwd-tsn\n"); 6117 return; 6118 } 6119 m_size = (stcb->asoc.mapping_array_size << 3); 6120 /*************************************************************/ 6121 /* 1. Here we update local cumTSN and shift the bitmap array */ 6122 /*************************************************************/ 6123 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 6124 6125 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 6126 asoc->cumulative_tsn == new_cum_tsn) { 6127 /* Already got there ... */ 6128 return; 6129 } 6130 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 6131 MAX_TSN)) { 6132 asoc->highest_tsn_inside_map = new_cum_tsn; 6133 /* EY nr_mapping_array version of the above */ 6134 /* 6135 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && 6136 * asoc->peer_supports_nr_sack) 6137 */ 6138 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 6139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 6140 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 6141 } 6142 } 6143 /* 6144 * now we know the new TSN is more advanced, let's find the actual 6145 * gap 6146 */ 6147 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 6148 if (gap >= m_size) { 6149 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 6150 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 6151 } 6152 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 6153 struct mbuf *oper; 6154 6155 /* 6156 * out of range (of single byte chunks in the rwnd I 6157 * give out). This must be an attacker. 6158 */ 6159 *abort_flag = 1; 6160 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 6161 0, M_DONTWAIT, 1, MT_DATA); 6162 if (oper) { 6163 struct sctp_paramhdr *ph; 6164 uint32_t *ippp; 6165 6166 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 6167 (sizeof(uint32_t) * 3); 6168 ph = mtod(oper, struct sctp_paramhdr *); 6169 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 6170 ph->param_length = htons(SCTP_BUF_LEN(oper)); 6171 ippp = (uint32_t *) (ph + 1); 6172 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 6173 ippp++; 6174 *ippp = asoc->highest_tsn_inside_map; 6175 ippp++; 6176 *ippp = new_cum_tsn; 6177 } 6178 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 6179 sctp_abort_an_association(stcb->sctp_ep, stcb, 6180 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 6181 return; 6182 } 6183 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 6184 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 6185 cumack_set_flag = 1; 6186 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 6187 asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn; 6188 /* EY - nr_sack: nr_mapping_array version of the above */ 6189 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) { 6190 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size); 6191 asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1; 6192 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 6193 if (asoc->nr_mapping_array_size != asoc->mapping_array_size) { 6194 /* 6195 * printf("IN sctp_handle_forward_tsn: 6196 * Something is wrong the size of" "map and 6197 * nr_map should be equal!") 6198 */ ; 6199 } 6200 } 6201 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 6202 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 6203 } 6204 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 6205 } else { 6206 SCTP_TCB_LOCK_ASSERT(stcb); 6207 for (i = 0; i <= gap; i++) { 6208 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack 6209 && SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 6210 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 6211 } else { 6212 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 6213 } 6214 } 6215 /* 6216 * Now after marking all, slide thing forward but no sack 6217 * please. 6218 */ 6219 sctp_sack_check(stcb, 0, 0, abort_flag); 6220 if (*abort_flag) 6221 return; 6222 } 6223 /*************************************************************/ 6224 /* 2. Clear up re-assembly queue */ 6225 /*************************************************************/ 6226 /* 6227 * First service it if pd-api is up, just in case we can progress it 6228 * forward 6229 */ 6230 if (asoc->fragmented_delivery_inprogress) { 6231 sctp_service_reassembly(stcb, asoc); 6232 } 6233 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 6234 /* For each one on here see if we need to toss it */ 6235 /* 6236 * For now large messages held on the reasmqueue that are 6237 * complete will be tossed too. We could in theory do more 6238 * work to spin through and stop after dumping one msg aka 6239 * seeing the start of a new msg at the head, and call the 6240 * delivery function... to see if it can be delivered... But 6241 * for now we just dump everything on the queue. 6242 */ 6243 chk = TAILQ_FIRST(&asoc->reasmqueue); 6244 while (chk) { 6245 at = TAILQ_NEXT(chk, sctp_next); 6246 if ((compare_with_wrap(new_cum_tsn, 6247 chk->rec.data.TSN_seq, MAX_TSN)) || 6248 (new_cum_tsn == chk->rec.data.TSN_seq)) { 6249 /* It needs to be tossed */ 6250 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 6251 if (compare_with_wrap(chk->rec.data.TSN_seq, 6252 asoc->tsn_last_delivered, MAX_TSN)) { 6253 asoc->tsn_last_delivered = 6254 chk->rec.data.TSN_seq; 6255 asoc->str_of_pdapi = 6256 chk->rec.data.stream_number; 6257 asoc->ssn_of_pdapi = 6258 chk->rec.data.stream_seq; 6259 asoc->fragment_flags = 6260 chk->rec.data.rcv_flags; 6261 } 6262 asoc->size_on_reasm_queue -= chk->send_size; 6263 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 6264 6265 /* Clear up any stream problem */ 6266 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 6267 SCTP_DATA_UNORDERED && 6268 (compare_with_wrap(chk->rec.data.stream_seq, 6269 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 6270 MAX_SEQ))) { 6271 /* 6272 * We must dump forward this streams 6273 * sequence number if the chunk is 6274 * not unordered that is being 6275 * skipped. There is a chance that 6276 * if the peer does not include the 6277 * last fragment in its FWD-TSN we 6278 * WILL have a problem here since 6279 * you would have a partial chunk in 6280 * queue that may not be 6281 * deliverable. Also if a Partial 6282 * delivery API as started the user 6283 * may get a partial chunk. The next 6284 * read returning a new chunk... 6285 * really ugly but I see no way 6286 * around it! Maybe a notify?? 6287 */ 6288 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 6289 chk->rec.data.stream_seq; 6290 } 6291 if (chk->data) { 6292 sctp_m_freem(chk->data); 6293 chk->data = NULL; 6294 } 6295 sctp_free_a_chunk(stcb, chk); 6296 } else { 6297 /* 6298 * Ok we have gone beyond the end of the 6299 * fwd-tsn's mark. 6300 */ 6301 break; 6302 } 6303 chk = at; 6304 } 6305 } 6306 /*******************************************************/ 6307 /* 3. Update the PR-stream re-ordering queues and fix */ 6308 /* delivery issues as needed. */ 6309 /*******************************************************/ 6310 fwd_sz -= sizeof(*fwd); 6311 if (m && fwd_sz) { 6312 /* New method. */ 6313 unsigned int num_str; 6314 struct sctp_strseq *stseq, strseqbuf; 6315 6316 offset += sizeof(*fwd); 6317 6318 SCTP_INP_READ_LOCK(stcb->sctp_ep); 6319 num_str = fwd_sz / sizeof(struct sctp_strseq); 6320 for (i = 0; i < num_str; i++) { 6321 uint16_t st; 6322 6323 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 6324 sizeof(struct sctp_strseq), 6325 (uint8_t *) & strseqbuf); 6326 offset += sizeof(struct sctp_strseq); 6327 if (stseq == NULL) { 6328 break; 6329 } 6330 /* Convert */ 6331 st = ntohs(stseq->stream); 6332 stseq->stream = st; 6333 st = ntohs(stseq->sequence); 6334 stseq->sequence = st; 6335 6336 /* now process */ 6337 6338 /* 6339 * Ok we now look for the stream/seq on the read 6340 * queue where its not all delivered. If we find it 6341 * we transmute the read entry into a PDI_ABORTED. 6342 */ 6343 if (stseq->stream >= asoc->streamincnt) { 6344 /* screwed up streams, stop! */ 6345 break; 6346 } 6347 if ((asoc->str_of_pdapi == stseq->stream) && 6348 (asoc->ssn_of_pdapi == stseq->sequence)) { 6349 /* 6350 * If this is the one we were partially 6351 * delivering now then we no longer are. 6352 * Note this will change with the reassembly 6353 * re-write. 6354 */ 6355 asoc->fragmented_delivery_inprogress = 0; 6356 } 6357 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence); 6358 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { 6359 if ((ctl->sinfo_stream == stseq->stream) && 6360 (ctl->sinfo_ssn == stseq->sequence)) { 6361 str_seq = (stseq->stream << 16) | stseq->sequence; 6362 ctl->end_added = 1; 6363 ctl->pdapi_aborted = 1; 6364 sv = stcb->asoc.control_pdapi; 6365 stcb->asoc.control_pdapi = ctl; 6366 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 6367 stcb, 6368 SCTP_PARTIAL_DELIVERY_ABORTED, 6369 (void *)&str_seq, 6370 SCTP_SO_NOT_LOCKED); 6371 stcb->asoc.control_pdapi = sv; 6372 break; 6373 } else if ((ctl->sinfo_stream == stseq->stream) && 6374 (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) { 6375 /* We are past our victim SSN */ 6376 break; 6377 } 6378 } 6379 strm = &asoc->strmin[stseq->stream]; 6380 if (compare_with_wrap(stseq->sequence, 6381 strm->last_sequence_delivered, MAX_SEQ)) { 6382 /* Update the sequence number */ 6383 strm->last_sequence_delivered = 6384 stseq->sequence; 6385 } 6386 /* now kick the stream the new way */ 6387 /* sa_ignore NO_NULL_CHK */ 6388 sctp_kick_prsctp_reorder_queue(stcb, strm); 6389 } 6390 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 6391 } 6392 if (TAILQ_FIRST(&asoc->reasmqueue)) { 6393 /* now lets kick out and check for more fragmented delivery */ 6394 /* sa_ignore NO_NULL_CHK */ 6395 sctp_deliver_reasm_check(stcb, &stcb->asoc); 6396 } 6397 } 6398