1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_pcb.h> 39 #include <netinet/sctp_header.h> 40 #include <netinet/sctputil.h> 41 #include <netinet/sctp_output.h> 42 #include <netinet/sctp_input.h> 43 #include <netinet/sctp_indata.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_timer.h> 46 47 48 #ifdef SCTP_DEBUG 49 extern uint32_t sctp_debug_on; 50 51 #endif 52 53 /* 54 * NOTES: On the outbound side of things I need to check the sack timer to 55 * see if I should generate a sack into the chunk queue (if I have data to 56 * send that is and will be sending it .. for bundling. 57 * 58 * The callback in sctp_usrreq.c will get called when the socket is read from. 59 * This will cause sctp_service_queues() to get called on the top entry in 60 * the list. 61 */ 62 63 extern int sctp_strict_sacks; 64 65 __inline void 66 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 67 { 68 uint32_t calc, calc_w_oh; 69 70 /* 71 * This is really set wrong with respect to a 1-2-m socket. Since 72 * the sb_cc is the count that everyone as put up. When we re-write 73 * sctp_soreceive then we will fix this so that ONLY this 74 * associations data is taken into account. 75 */ 76 if (stcb->sctp_socket == NULL) 77 return; 78 79 if (stcb->asoc.sb_cc == 0 && 80 asoc->size_on_reasm_queue == 0 && 81 asoc->size_on_all_streams == 0) { 82 /* Full rwnd granted */ 83 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat, 84 SCTP_MINIMAL_RWND); 85 return; 86 } 87 /* get actual space */ 88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 89 90 /* 91 * take out what has NOT been put on socket queue and we yet hold 92 * for putting up. 93 */ 94 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 95 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 96 97 if (calc == 0) { 98 /* out of space */ 99 asoc->my_rwnd = 0; 100 return; 101 } 102 /* what is the overhead of all these rwnd's */ 103 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 104 asoc->my_rwnd = calc; 105 if (calc_w_oh == 0) { 106 /* 107 * If our overhead is greater than the advertised rwnd, we 108 * clamp the rwnd to 1. This lets us still accept inbound 109 * segments, but hopefully will shut the sender down when he 110 * finally gets the message. 111 */ 112 asoc->my_rwnd = 1; 113 } else { 114 /* SWS threshold */ 115 if (asoc->my_rwnd && 116 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 117 /* SWS engaged, tell peer none left */ 118 asoc->my_rwnd = 1; 119 } 120 } 121 } 122 123 /* Calculate what the rwnd would be */ 124 125 __inline uint32_t 126 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 127 { 128 uint32_t calc = 0, calc_w_oh; 129 130 /* 131 * This is really set wrong with respect to a 1-2-m socket. Since 132 * the sb_cc is the count that everyone as put up. When we re-write 133 * sctp_soreceive then we will fix this so that ONLY this 134 * associations data is taken into account. 135 */ 136 if (stcb->sctp_socket == NULL) 137 return (calc); 138 139 if (stcb->asoc.sb_cc == 0 && 140 asoc->size_on_reasm_queue == 0 && 141 asoc->size_on_all_streams == 0) { 142 /* Full rwnd granted */ 143 calc = max(stcb->sctp_socket->so_rcv.sb_hiwat, 144 SCTP_MINIMAL_RWND); 145 return (calc); 146 } 147 /* get actual space */ 148 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 149 150 /* 151 * take out what has NOT been put on socket queue and we yet hold 152 * for putting up. 153 */ 154 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 155 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 156 157 if (calc == 0) { 158 /* out of space */ 159 return (calc); 160 } 161 /* what is the overhead of all these rwnd's */ 162 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 163 if (calc_w_oh == 0) { 164 /* 165 * If our overhead is greater than the advertised rwnd, we 166 * clamp the rwnd to 1. This lets us still accept inbound 167 * segments, but hopefully will shut the sender down when he 168 * finally gets the message. 169 */ 170 calc = 1; 171 } else { 172 /* SWS threshold */ 173 if (calc && 174 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 175 /* SWS engaged, tell peer none left */ 176 calc = 1; 177 } 178 } 179 return (calc); 180 } 181 182 183 184 /* 185 * Build out our readq entry based on the incoming packet. 186 */ 187 struct sctp_queued_to_read * 188 sctp_build_readq_entry(struct sctp_tcb *stcb, 189 struct sctp_nets *net, 190 uint32_t tsn, uint32_t ppid, 191 uint32_t context, uint16_t stream_no, 192 uint16_t stream_seq, uint8_t flags, 193 struct mbuf *dm) 194 { 195 struct sctp_queued_to_read *read_queue_e = NULL; 196 197 sctp_alloc_a_readq(stcb, read_queue_e); 198 if (read_queue_e == NULL) { 199 goto failed_build; 200 } 201 read_queue_e->sinfo_stream = stream_no; 202 read_queue_e->sinfo_ssn = stream_seq; 203 read_queue_e->sinfo_flags = (flags << 8); 204 read_queue_e->sinfo_ppid = ppid; 205 read_queue_e->sinfo_context = stcb->asoc.context; 206 read_queue_e->sinfo_timetolive = 0; 207 read_queue_e->sinfo_tsn = tsn; 208 read_queue_e->sinfo_cumtsn = tsn; 209 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 210 read_queue_e->whoFrom = net; 211 read_queue_e->length = 0; 212 atomic_add_int(&net->ref_count, 1); 213 read_queue_e->data = dm; 214 read_queue_e->spec_flags = 0; 215 read_queue_e->tail_mbuf = NULL; 216 read_queue_e->stcb = stcb; 217 read_queue_e->port_from = stcb->rport; 218 read_queue_e->do_not_ref_stcb = 0; 219 read_queue_e->end_added = 0; 220 read_queue_e->pdapi_aborted = 0; 221 failed_build: 222 return (read_queue_e); 223 } 224 225 226 /* 227 * Build out our readq entry based on the incoming packet. 228 */ 229 static struct sctp_queued_to_read * 230 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 231 struct sctp_tmit_chunk *chk) 232 { 233 struct sctp_queued_to_read *read_queue_e = NULL; 234 235 sctp_alloc_a_readq(stcb, read_queue_e); 236 if (read_queue_e == NULL) { 237 goto failed_build; 238 } 239 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 240 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 241 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 242 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 243 read_queue_e->sinfo_context = stcb->asoc.context; 244 read_queue_e->sinfo_timetolive = 0; 245 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 246 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 247 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 248 read_queue_e->whoFrom = chk->whoTo; 249 read_queue_e->length = 0; 250 atomic_add_int(&chk->whoTo->ref_count, 1); 251 read_queue_e->data = chk->data; 252 read_queue_e->tail_mbuf = NULL; 253 read_queue_e->stcb = stcb; 254 read_queue_e->port_from = stcb->rport; 255 read_queue_e->spec_flags = 0; 256 read_queue_e->do_not_ref_stcb = 0; 257 read_queue_e->end_added = 0; 258 read_queue_e->pdapi_aborted = 0; 259 failed_build: 260 return (read_queue_e); 261 } 262 263 264 struct mbuf * 265 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 266 struct sctp_sndrcvinfo *sinfo) 267 { 268 struct sctp_sndrcvinfo *outinfo; 269 struct cmsghdr *cmh; 270 struct mbuf *ret; 271 int len; 272 int use_extended = 0; 273 274 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 275 /* user does not want the sndrcv ctl */ 276 return (NULL); 277 } 278 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 279 use_extended = 1; 280 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 281 } else { 282 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 283 } 284 285 286 ret = sctp_get_mbuf_for_msg(len, 287 0, M_DONTWAIT, 1, MT_DATA); 288 289 if (ret == NULL) { 290 /* No space */ 291 return (ret); 292 } 293 /* We need a CMSG header followed by the struct */ 294 cmh = mtod(ret, struct cmsghdr *); 295 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 296 cmh->cmsg_level = IPPROTO_SCTP; 297 if (use_extended) { 298 cmh->cmsg_type = SCTP_EXTRCV; 299 cmh->cmsg_len = len; 300 memcpy(outinfo, sinfo, len); 301 } else { 302 cmh->cmsg_type = SCTP_SNDRCV; 303 cmh->cmsg_len = len; 304 *outinfo = *sinfo; 305 } 306 SCTP_BUF_LEN(ret) = cmh->cmsg_len; 307 return (ret); 308 } 309 310 311 /* 312 * We are delivering currently from the reassembly queue. We must continue to 313 * deliver until we either: 1) run out of space. 2) run out of sequential 314 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 315 */ 316 static void 317 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 318 { 319 struct sctp_tmit_chunk *chk; 320 uint16_t nxt_todel; 321 uint16_t stream_no; 322 int end = 0; 323 int cntDel; 324 struct sctp_queued_to_read *control, *ctl, *ctlat; 325 326 cntDel = stream_no = 0; 327 if (stcb && 328 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 329 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 330 /* socket above is long gone */ 331 asoc->fragmented_delivery_inprogress = 0; 332 chk = TAILQ_FIRST(&asoc->reasmqueue); 333 while (chk) { 334 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 335 asoc->size_on_reasm_queue -= chk->send_size; 336 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 337 /* 338 * Lose the data pointer, since its in the socket 339 * buffer 340 */ 341 if (chk->data) { 342 sctp_m_freem(chk->data); 343 chk->data = NULL; 344 } 345 /* Now free the address and data */ 346 sctp_free_remote_addr(chk->whoTo); 347 sctp_free_a_chunk(stcb, chk); 348 chk = TAILQ_FIRST(&asoc->reasmqueue); 349 } 350 return; 351 } 352 SCTP_TCB_LOCK_ASSERT(stcb); 353 do { 354 chk = TAILQ_FIRST(&asoc->reasmqueue); 355 if (chk == NULL) { 356 return; 357 } 358 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 359 /* Can't deliver more :< */ 360 return; 361 } 362 stream_no = chk->rec.data.stream_number; 363 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 364 if (nxt_todel != chk->rec.data.stream_seq && 365 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 366 /* 367 * Not the next sequence to deliver in its stream OR 368 * unordered 369 */ 370 return; 371 } 372 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 373 374 control = sctp_build_readq_entry_chk(stcb, chk); 375 if (control == NULL) { 376 /* out of memory? */ 377 return; 378 } 379 /* save it off for our future deliveries */ 380 stcb->asoc.control_pdapi = control; 381 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 382 end = 1; 383 else 384 end = 0; 385 sctp_add_to_readq(stcb->sctp_ep, 386 stcb, control, &stcb->sctp_socket->so_rcv, end); 387 cntDel++; 388 } else { 389 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 390 end = 1; 391 else 392 end = 0; 393 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 394 stcb->asoc.control_pdapi, 395 chk->data, end, chk->rec.data.TSN_seq, 396 &stcb->sctp_socket->so_rcv)) { 397 /* 398 * something is very wrong, either 399 * control_pdapi is NULL, or the tail_mbuf 400 * is corrupt, or there is a EOM already on 401 * the mbuf chain. 402 */ 403 if (stcb->asoc.control_pdapi == NULL) { 404 panic("This should not happen control_pdapi NULL?"); 405 } 406 if (stcb->asoc.control_pdapi->tail_mbuf == NULL) { 407 panic("This should not happen, tail_mbuf not being maintained?"); 408 } 409 /* if we did not panic, it was a EOM */ 410 panic("Bad chunking ??"); 411 } 412 cntDel++; 413 } 414 /* pull it we did it */ 415 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 416 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 417 asoc->fragmented_delivery_inprogress = 0; 418 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 419 asoc->strmin[stream_no].last_sequence_delivered++; 420 } 421 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 422 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 423 } 424 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 425 /* 426 * turn the flag back on since we just delivered 427 * yet another one. 428 */ 429 asoc->fragmented_delivery_inprogress = 1; 430 } 431 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 432 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 433 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 434 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 435 436 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 437 asoc->size_on_reasm_queue -= chk->send_size; 438 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 439 /* free up the chk */ 440 chk->data = NULL; 441 sctp_free_remote_addr(chk->whoTo); 442 sctp_free_a_chunk(stcb, chk); 443 444 if (asoc->fragmented_delivery_inprogress == 0) { 445 /* 446 * Now lets see if we can deliver the next one on 447 * the stream 448 */ 449 uint16_t nxt_todel; 450 struct sctp_stream_in *strm; 451 452 strm = &asoc->strmin[stream_no]; 453 nxt_todel = strm->last_sequence_delivered + 1; 454 ctl = TAILQ_FIRST(&strm->inqueue); 455 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 456 while (ctl != NULL) { 457 /* Deliver more if we can. */ 458 if (nxt_todel == ctl->sinfo_ssn) { 459 ctlat = TAILQ_NEXT(ctl, next); 460 TAILQ_REMOVE(&strm->inqueue, ctl, next); 461 asoc->size_on_all_streams -= ctl->length; 462 sctp_ucount_decr(asoc->cnt_on_all_streams); 463 strm->last_sequence_delivered++; 464 sctp_add_to_readq(stcb->sctp_ep, stcb, 465 ctl, 466 &stcb->sctp_socket->so_rcv, 1); 467 ctl = ctlat; 468 } else { 469 break; 470 } 471 nxt_todel = strm->last_sequence_delivered + 1; 472 } 473 } 474 break; 475 } 476 chk = TAILQ_FIRST(&asoc->reasmqueue); 477 } while (chk); 478 } 479 480 /* 481 * Queue the chunk either right into the socket buffer if it is the next one 482 * to go OR put it in the correct place in the delivery queue. If we do 483 * append to the so_buf, keep doing so until we are out of order. One big 484 * question still remains, what to do when the socket buffer is FULL?? 485 */ 486 static void 487 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 488 struct sctp_queued_to_read *control, int *abort_flag) 489 { 490 /* 491 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 492 * all the data in one stream this could happen quite rapidly. One 493 * could use the TSN to keep track of things, but this scheme breaks 494 * down in the other type of stream useage that could occur. Send a 495 * single msg to stream 0, send 4Billion messages to stream 1, now 496 * send a message to stream 0. You have a situation where the TSN 497 * has wrapped but not in the stream. Is this worth worrying about 498 * or should we just change our queue sort at the bottom to be by 499 * TSN. 500 * 501 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 502 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 503 * assignment this could happen... and I don't see how this would be 504 * a violation. So for now I am undecided an will leave the sort by 505 * SSN alone. Maybe a hybred approach is the answer 506 * 507 */ 508 struct sctp_stream_in *strm; 509 struct sctp_queued_to_read *at; 510 int queue_needed; 511 uint16_t nxt_todel; 512 struct mbuf *oper; 513 514 queue_needed = 1; 515 asoc->size_on_all_streams += control->length; 516 sctp_ucount_incr(asoc->cnt_on_all_streams); 517 strm = &asoc->strmin[control->sinfo_stream]; 518 nxt_todel = strm->last_sequence_delivered + 1; 519 #ifdef SCTP_STR_LOGGING 520 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 521 #endif 522 #ifdef SCTP_DEBUG 523 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 524 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 525 (uint32_t) control->sinfo_stream, 526 (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel); 527 } 528 #endif 529 if (compare_with_wrap(strm->last_sequence_delivered, 530 control->sinfo_ssn, MAX_SEQ) || 531 (strm->last_sequence_delivered == control->sinfo_ssn)) { 532 /* The incoming sseq is behind where we last delivered? */ 533 #ifdef SCTP_DEBUG 534 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 535 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 536 control->sinfo_ssn, 537 strm->last_sequence_delivered); 538 } 539 #endif 540 /* 541 * throw it in the stream so it gets cleaned up in 542 * association destruction 543 */ 544 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 545 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 546 0, M_DONTWAIT, 1, MT_DATA); 547 if (oper) { 548 struct sctp_paramhdr *ph; 549 uint32_t *ippp; 550 551 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 552 (sizeof(uint32_t) * 3); 553 ph = mtod(oper, struct sctp_paramhdr *); 554 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 555 ph->param_length = htons(SCTP_BUF_LEN(oper)); 556 ippp = (uint32_t *) (ph + 1); 557 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 558 ippp++; 559 *ippp = control->sinfo_tsn; 560 ippp++; 561 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 562 } 563 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 564 sctp_abort_an_association(stcb->sctp_ep, stcb, 565 SCTP_PEER_FAULTY, oper); 566 567 *abort_flag = 1; 568 return; 569 570 } 571 if (nxt_todel == control->sinfo_ssn) { 572 /* can be delivered right away? */ 573 #ifdef SCTP_STR_LOGGING 574 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 575 #endif 576 queue_needed = 0; 577 asoc->size_on_all_streams -= control->length; 578 sctp_ucount_decr(asoc->cnt_on_all_streams); 579 strm->last_sequence_delivered++; 580 sctp_add_to_readq(stcb->sctp_ep, stcb, 581 control, 582 &stcb->sctp_socket->so_rcv, 1); 583 control = TAILQ_FIRST(&strm->inqueue); 584 while (control != NULL) { 585 /* all delivered */ 586 nxt_todel = strm->last_sequence_delivered + 1; 587 if (nxt_todel == control->sinfo_ssn) { 588 at = TAILQ_NEXT(control, next); 589 TAILQ_REMOVE(&strm->inqueue, control, next); 590 asoc->size_on_all_streams -= control->length; 591 sctp_ucount_decr(asoc->cnt_on_all_streams); 592 strm->last_sequence_delivered++; 593 /* 594 * We ignore the return of deliver_data here 595 * since we always can hold the chunk on the 596 * d-queue. And we have a finite number that 597 * can be delivered from the strq. 598 */ 599 #ifdef SCTP_STR_LOGGING 600 sctp_log_strm_del(control, NULL, 601 SCTP_STR_LOG_FROM_IMMED_DEL); 602 #endif 603 sctp_add_to_readq(stcb->sctp_ep, stcb, 604 control, 605 &stcb->sctp_socket->so_rcv, 1); 606 control = at; 607 continue; 608 } 609 break; 610 } 611 } 612 if (queue_needed) { 613 /* 614 * Ok, we did not deliver this guy, find the correct place 615 * to put it on the queue. 616 */ 617 if (TAILQ_EMPTY(&strm->inqueue)) { 618 /* Empty queue */ 619 #ifdef SCTP_STR_LOGGING 620 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 621 #endif 622 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 623 } else { 624 TAILQ_FOREACH(at, &strm->inqueue, next) { 625 if (compare_with_wrap(at->sinfo_ssn, 626 control->sinfo_ssn, MAX_SEQ)) { 627 /* 628 * one in queue is bigger than the 629 * new one, insert before this one 630 */ 631 #ifdef SCTP_STR_LOGGING 632 sctp_log_strm_del(control, at, 633 SCTP_STR_LOG_FROM_INSERT_MD); 634 #endif 635 TAILQ_INSERT_BEFORE(at, control, next); 636 break; 637 } else if (at->sinfo_ssn == control->sinfo_ssn) { 638 /* 639 * Gak, He sent me a duplicate str 640 * seq number 641 */ 642 /* 643 * foo bar, I guess I will just free 644 * this new guy, should we abort 645 * too? FIX ME MAYBE? Or it COULD be 646 * that the SSN's have wrapped. 647 * Maybe I should compare to TSN 648 * somehow... sigh for now just blow 649 * away the chunk! 650 */ 651 652 if (control->data) 653 sctp_m_freem(control->data); 654 control->data = NULL; 655 asoc->size_on_all_streams -= control->length; 656 sctp_ucount_decr(asoc->cnt_on_all_streams); 657 sctp_free_remote_addr(control->whoFrom); 658 sctp_free_a_readq(stcb, control); 659 return; 660 } else { 661 if (TAILQ_NEXT(at, next) == NULL) { 662 /* 663 * We are at the end, insert 664 * it after this one 665 */ 666 #ifdef SCTP_STR_LOGGING 667 sctp_log_strm_del(control, at, 668 SCTP_STR_LOG_FROM_INSERT_TL); 669 #endif 670 TAILQ_INSERT_AFTER(&strm->inqueue, 671 at, control, next); 672 break; 673 } 674 } 675 } 676 } 677 } 678 } 679 680 /* 681 * Returns two things: You get the total size of the deliverable parts of the 682 * first fragmented message on the reassembly queue. And you get a 1 back if 683 * all of the message is ready or a 0 back if the message is still incomplete 684 */ 685 static int 686 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 687 { 688 struct sctp_tmit_chunk *chk; 689 uint32_t tsn; 690 691 *t_size = 0; 692 chk = TAILQ_FIRST(&asoc->reasmqueue); 693 if (chk == NULL) { 694 /* nothing on the queue */ 695 return (0); 696 } 697 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 698 /* Not a first on the queue */ 699 return (0); 700 } 701 tsn = chk->rec.data.TSN_seq; 702 while (chk) { 703 if (tsn != chk->rec.data.TSN_seq) { 704 return (0); 705 } 706 *t_size += chk->send_size; 707 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 708 return (1); 709 } 710 tsn++; 711 chk = TAILQ_NEXT(chk, sctp_next); 712 } 713 return (0); 714 } 715 716 static void 717 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 718 { 719 struct sctp_tmit_chunk *chk; 720 uint16_t nxt_todel; 721 uint32_t tsize; 722 723 doit_again: 724 chk = TAILQ_FIRST(&asoc->reasmqueue); 725 if (chk == NULL) { 726 /* Huh? */ 727 asoc->size_on_reasm_queue = 0; 728 asoc->cnt_on_reasm_queue = 0; 729 return; 730 } 731 if (asoc->fragmented_delivery_inprogress == 0) { 732 nxt_todel = 733 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 734 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 735 (nxt_todel == chk->rec.data.stream_seq || 736 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 737 /* 738 * Yep the first one is here and its ok to deliver 739 * but should we? 740 */ 741 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 742 (tsize > stcb->sctp_ep->partial_delivery_point))) { 743 744 /* 745 * Yes, we setup to start reception, by 746 * backing down the TSN just in case we 747 * can't deliver. If we 748 */ 749 asoc->fragmented_delivery_inprogress = 1; 750 asoc->tsn_last_delivered = 751 chk->rec.data.TSN_seq - 1; 752 asoc->str_of_pdapi = 753 chk->rec.data.stream_number; 754 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 755 asoc->pdapi_ppid = chk->rec.data.payloadtype; 756 asoc->fragment_flags = chk->rec.data.rcv_flags; 757 sctp_service_reassembly(stcb, asoc); 758 } 759 } 760 } else { 761 /* 762 * Service re-assembly will deliver stream data queued at 763 * the end of fragmented delivery.. but it wont know to go 764 * back and call itself again... we do that here with the 765 * got doit_again 766 */ 767 sctp_service_reassembly(stcb, asoc); 768 if (asoc->fragmented_delivery_inprogress == 0) { 769 /* 770 * finished our Fragmented delivery, could be more 771 * waiting? 772 */ 773 goto doit_again; 774 } 775 } 776 } 777 778 /* 779 * Dump onto the re-assembly queue, in its proper place. After dumping on the 780 * queue, see if anthing can be delivered. If so pull it off (or as much as 781 * we can. If we run out of space then we must dump what we can and set the 782 * appropriate flag to say we queued what we could. 783 */ 784 static void 785 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 786 struct sctp_tmit_chunk *chk, int *abort_flag) 787 { 788 struct mbuf *oper; 789 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 790 u_char last_flags; 791 struct sctp_tmit_chunk *at, *prev, *next; 792 793 prev = next = NULL; 794 cum_ackp1 = asoc->tsn_last_delivered + 1; 795 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 796 /* This is the first one on the queue */ 797 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 798 /* 799 * we do not check for delivery of anything when only one 800 * fragment is here 801 */ 802 asoc->size_on_reasm_queue = chk->send_size; 803 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 804 if (chk->rec.data.TSN_seq == cum_ackp1) { 805 if (asoc->fragmented_delivery_inprogress == 0 && 806 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 807 SCTP_DATA_FIRST_FRAG) { 808 /* 809 * An empty queue, no delivery inprogress, 810 * we hit the next one and it does NOT have 811 * a FIRST fragment mark. 812 */ 813 #ifdef SCTP_DEBUG 814 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 815 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 816 } 817 #endif 818 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 819 0, M_DONTWAIT, 1, MT_DATA); 820 821 if (oper) { 822 struct sctp_paramhdr *ph; 823 uint32_t *ippp; 824 825 SCTP_BUF_LEN(oper) = 826 sizeof(struct sctp_paramhdr) + 827 (sizeof(uint32_t) * 3); 828 ph = mtod(oper, struct sctp_paramhdr *); 829 ph->param_type = 830 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 831 ph->param_length = htons(SCTP_BUF_LEN(oper)); 832 ippp = (uint32_t *) (ph + 1); 833 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 834 ippp++; 835 *ippp = chk->rec.data.TSN_seq; 836 ippp++; 837 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 838 839 } 840 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 841 sctp_abort_an_association(stcb->sctp_ep, stcb, 842 SCTP_PEER_FAULTY, oper); 843 *abort_flag = 1; 844 } else if (asoc->fragmented_delivery_inprogress && 845 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 846 /* 847 * We are doing a partial delivery and the 848 * NEXT chunk MUST be either the LAST or 849 * MIDDLE fragment NOT a FIRST 850 */ 851 #ifdef SCTP_DEBUG 852 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 853 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 854 } 855 #endif 856 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 857 0, M_DONTWAIT, 1, MT_DATA); 858 if (oper) { 859 struct sctp_paramhdr *ph; 860 uint32_t *ippp; 861 862 SCTP_BUF_LEN(oper) = 863 sizeof(struct sctp_paramhdr) + 864 (3 * sizeof(uint32_t)); 865 ph = mtod(oper, struct sctp_paramhdr *); 866 ph->param_type = 867 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 868 ph->param_length = htons(SCTP_BUF_LEN(oper)); 869 ippp = (uint32_t *) (ph + 1); 870 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 871 ippp++; 872 *ippp = chk->rec.data.TSN_seq; 873 ippp++; 874 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 875 } 876 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 877 sctp_abort_an_association(stcb->sctp_ep, stcb, 878 SCTP_PEER_FAULTY, oper); 879 *abort_flag = 1; 880 } else if (asoc->fragmented_delivery_inprogress) { 881 /* 882 * Here we are ok with a MIDDLE or LAST 883 * piece 884 */ 885 if (chk->rec.data.stream_number != 886 asoc->str_of_pdapi) { 887 /* Got to be the right STR No */ 888 #ifdef SCTP_DEBUG 889 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 890 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n", 891 chk->rec.data.stream_number, 892 asoc->str_of_pdapi); 893 } 894 #endif 895 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 896 0, M_DONTWAIT, 1, MT_DATA); 897 if (oper) { 898 struct sctp_paramhdr *ph; 899 uint32_t *ippp; 900 901 SCTP_BUF_LEN(oper) = 902 sizeof(struct sctp_paramhdr) + 903 (sizeof(uint32_t) * 3); 904 ph = mtod(oper, 905 struct sctp_paramhdr *); 906 ph->param_type = 907 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 908 ph->param_length = 909 htons(SCTP_BUF_LEN(oper)); 910 ippp = (uint32_t *) (ph + 1); 911 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 912 ippp++; 913 *ippp = chk->rec.data.TSN_seq; 914 ippp++; 915 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 916 } 917 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 918 sctp_abort_an_association(stcb->sctp_ep, 919 stcb, SCTP_PEER_FAULTY, oper); 920 *abort_flag = 1; 921 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 922 SCTP_DATA_UNORDERED && 923 chk->rec.data.stream_seq != 924 asoc->ssn_of_pdapi) { 925 /* Got to be the right STR Seq */ 926 #ifdef SCTP_DEBUG 927 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 928 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n", 929 chk->rec.data.stream_seq, 930 asoc->ssn_of_pdapi); 931 } 932 #endif 933 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 934 0, M_DONTWAIT, 1, MT_DATA); 935 if (oper) { 936 struct sctp_paramhdr *ph; 937 uint32_t *ippp; 938 939 SCTP_BUF_LEN(oper) = 940 sizeof(struct sctp_paramhdr) + 941 (3 * sizeof(uint32_t)); 942 ph = mtod(oper, 943 struct sctp_paramhdr *); 944 ph->param_type = 945 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 946 ph->param_length = 947 htons(SCTP_BUF_LEN(oper)); 948 ippp = (uint32_t *) (ph + 1); 949 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 950 ippp++; 951 *ippp = chk->rec.data.TSN_seq; 952 ippp++; 953 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 954 955 } 956 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 957 sctp_abort_an_association(stcb->sctp_ep, 958 stcb, SCTP_PEER_FAULTY, oper); 959 *abort_flag = 1; 960 } 961 } 962 } 963 return; 964 } 965 /* Find its place */ 966 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 967 if (compare_with_wrap(at->rec.data.TSN_seq, 968 chk->rec.data.TSN_seq, MAX_TSN)) { 969 /* 970 * one in queue is bigger than the new one, insert 971 * before this one 972 */ 973 /* A check */ 974 asoc->size_on_reasm_queue += chk->send_size; 975 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 976 next = at; 977 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 978 break; 979 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 980 /* Gak, He sent me a duplicate str seq number */ 981 /* 982 * foo bar, I guess I will just free this new guy, 983 * should we abort too? FIX ME MAYBE? Or it COULD be 984 * that the SSN's have wrapped. Maybe I should 985 * compare to TSN somehow... sigh for now just blow 986 * away the chunk! 987 */ 988 if (chk->data) { 989 sctp_m_freem(chk->data); 990 chk->data = NULL; 991 } 992 sctp_free_remote_addr(chk->whoTo); 993 sctp_free_a_chunk(stcb, chk); 994 return; 995 } else { 996 last_flags = at->rec.data.rcv_flags; 997 last_tsn = at->rec.data.TSN_seq; 998 prev = at; 999 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1000 /* 1001 * We are at the end, insert it after this 1002 * one 1003 */ 1004 /* check it first */ 1005 asoc->size_on_reasm_queue += chk->send_size; 1006 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1007 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1008 break; 1009 } 1010 } 1011 } 1012 /* Now the audits */ 1013 if (prev) { 1014 prev_tsn = chk->rec.data.TSN_seq - 1; 1015 if (prev_tsn == prev->rec.data.TSN_seq) { 1016 /* 1017 * Ok the one I am dropping onto the end is the 1018 * NEXT. A bit of valdiation here. 1019 */ 1020 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1021 SCTP_DATA_FIRST_FRAG || 1022 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1023 SCTP_DATA_MIDDLE_FRAG) { 1024 /* 1025 * Insert chk MUST be a MIDDLE or LAST 1026 * fragment 1027 */ 1028 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1029 SCTP_DATA_FIRST_FRAG) { 1030 #ifdef SCTP_DEBUG 1031 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1032 printf("Prev check - It can be a midlle or last but not a first\n"); 1033 printf("Gak, Evil plot, it's a FIRST!\n"); 1034 } 1035 #endif 1036 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1037 0, M_DONTWAIT, 1, MT_DATA); 1038 if (oper) { 1039 struct sctp_paramhdr *ph; 1040 uint32_t *ippp; 1041 1042 SCTP_BUF_LEN(oper) = 1043 sizeof(struct sctp_paramhdr) + 1044 (3 * sizeof(uint32_t)); 1045 ph = mtod(oper, 1046 struct sctp_paramhdr *); 1047 ph->param_type = 1048 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1049 ph->param_length = 1050 htons(SCTP_BUF_LEN(oper)); 1051 ippp = (uint32_t *) (ph + 1); 1052 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1053 ippp++; 1054 *ippp = chk->rec.data.TSN_seq; 1055 ippp++; 1056 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1057 1058 } 1059 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1060 sctp_abort_an_association(stcb->sctp_ep, 1061 stcb, SCTP_PEER_FAULTY, oper); 1062 *abort_flag = 1; 1063 return; 1064 } 1065 if (chk->rec.data.stream_number != 1066 prev->rec.data.stream_number) { 1067 /* 1068 * Huh, need the correct STR here, 1069 * they must be the same. 1070 */ 1071 #ifdef SCTP_DEBUG 1072 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1073 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1074 chk->rec.data.stream_number, 1075 prev->rec.data.stream_number); 1076 } 1077 #endif 1078 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1079 0, M_DONTWAIT, 1, MT_DATA); 1080 if (oper) { 1081 struct sctp_paramhdr *ph; 1082 uint32_t *ippp; 1083 1084 SCTP_BUF_LEN(oper) = 1085 sizeof(struct sctp_paramhdr) + 1086 (3 * sizeof(uint32_t)); 1087 ph = mtod(oper, 1088 struct sctp_paramhdr *); 1089 ph->param_type = 1090 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1091 ph->param_length = 1092 htons(SCTP_BUF_LEN(oper)); 1093 ippp = (uint32_t *) (ph + 1); 1094 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1095 ippp++; 1096 *ippp = chk->rec.data.TSN_seq; 1097 ippp++; 1098 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1099 } 1100 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1101 sctp_abort_an_association(stcb->sctp_ep, 1102 stcb, SCTP_PEER_FAULTY, oper); 1103 1104 *abort_flag = 1; 1105 return; 1106 } 1107 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1108 chk->rec.data.stream_seq != 1109 prev->rec.data.stream_seq) { 1110 /* 1111 * Huh, need the correct STR here, 1112 * they must be the same. 1113 */ 1114 #ifdef SCTP_DEBUG 1115 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1116 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1117 chk->rec.data.stream_seq, 1118 prev->rec.data.stream_seq); 1119 } 1120 #endif 1121 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1122 0, M_DONTWAIT, 1, MT_DATA); 1123 if (oper) { 1124 struct sctp_paramhdr *ph; 1125 uint32_t *ippp; 1126 1127 SCTP_BUF_LEN(oper) = 1128 sizeof(struct sctp_paramhdr) + 1129 (3 * sizeof(uint32_t)); 1130 ph = mtod(oper, 1131 struct sctp_paramhdr *); 1132 ph->param_type = 1133 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1134 ph->param_length = 1135 htons(SCTP_BUF_LEN(oper)); 1136 ippp = (uint32_t *) (ph + 1); 1137 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1138 ippp++; 1139 *ippp = chk->rec.data.TSN_seq; 1140 ippp++; 1141 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1142 } 1143 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1144 sctp_abort_an_association(stcb->sctp_ep, 1145 stcb, SCTP_PEER_FAULTY, oper); 1146 1147 *abort_flag = 1; 1148 return; 1149 } 1150 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1151 SCTP_DATA_LAST_FRAG) { 1152 /* Insert chk MUST be a FIRST */ 1153 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1154 SCTP_DATA_FIRST_FRAG) { 1155 #ifdef SCTP_DEBUG 1156 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1157 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1158 } 1159 #endif 1160 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1161 0, M_DONTWAIT, 1, MT_DATA); 1162 if (oper) { 1163 struct sctp_paramhdr *ph; 1164 uint32_t *ippp; 1165 1166 SCTP_BUF_LEN(oper) = 1167 sizeof(struct sctp_paramhdr) + 1168 (3 * sizeof(uint32_t)); 1169 ph = mtod(oper, 1170 struct sctp_paramhdr *); 1171 ph->param_type = 1172 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1173 ph->param_length = 1174 htons(SCTP_BUF_LEN(oper)); 1175 ippp = (uint32_t *) (ph + 1); 1176 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1177 ippp++; 1178 *ippp = chk->rec.data.TSN_seq; 1179 ippp++; 1180 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1181 1182 } 1183 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1184 sctp_abort_an_association(stcb->sctp_ep, 1185 stcb, SCTP_PEER_FAULTY, oper); 1186 1187 *abort_flag = 1; 1188 return; 1189 } 1190 } 1191 } 1192 } 1193 if (next) { 1194 post_tsn = chk->rec.data.TSN_seq + 1; 1195 if (post_tsn == next->rec.data.TSN_seq) { 1196 /* 1197 * Ok the one I am inserting ahead of is my NEXT 1198 * one. A bit of valdiation here. 1199 */ 1200 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1201 /* Insert chk MUST be a last fragment */ 1202 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1203 != SCTP_DATA_LAST_FRAG) { 1204 #ifdef SCTP_DEBUG 1205 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1206 printf("Next chk - Next is FIRST, we must be LAST\n"); 1207 printf("Gak, Evil plot, its not a last!\n"); 1208 } 1209 #endif 1210 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1211 0, M_DONTWAIT, 1, MT_DATA); 1212 if (oper) { 1213 struct sctp_paramhdr *ph; 1214 uint32_t *ippp; 1215 1216 SCTP_BUF_LEN(oper) = 1217 sizeof(struct sctp_paramhdr) + 1218 (3 * sizeof(uint32_t)); 1219 ph = mtod(oper, 1220 struct sctp_paramhdr *); 1221 ph->param_type = 1222 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1223 ph->param_length = 1224 htons(SCTP_BUF_LEN(oper)); 1225 ippp = (uint32_t *) (ph + 1); 1226 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1227 ippp++; 1228 *ippp = chk->rec.data.TSN_seq; 1229 ippp++; 1230 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1231 } 1232 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1233 sctp_abort_an_association(stcb->sctp_ep, 1234 stcb, SCTP_PEER_FAULTY, oper); 1235 1236 *abort_flag = 1; 1237 return; 1238 } 1239 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1240 SCTP_DATA_MIDDLE_FRAG || 1241 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1242 SCTP_DATA_LAST_FRAG) { 1243 /* 1244 * Insert chk CAN be MIDDLE or FIRST NOT 1245 * LAST 1246 */ 1247 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1248 SCTP_DATA_LAST_FRAG) { 1249 #ifdef SCTP_DEBUG 1250 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1251 printf("Next chk - Next is a MIDDLE/LAST\n"); 1252 printf("Gak, Evil plot, new prev chunk is a LAST\n"); 1253 } 1254 #endif 1255 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1256 0, M_DONTWAIT, 1, MT_DATA); 1257 if (oper) { 1258 struct sctp_paramhdr *ph; 1259 uint32_t *ippp; 1260 1261 SCTP_BUF_LEN(oper) = 1262 sizeof(struct sctp_paramhdr) + 1263 (3 * sizeof(uint32_t)); 1264 ph = mtod(oper, 1265 struct sctp_paramhdr *); 1266 ph->param_type = 1267 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1268 ph->param_length = 1269 htons(SCTP_BUF_LEN(oper)); 1270 ippp = (uint32_t *) (ph + 1); 1271 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1272 ippp++; 1273 *ippp = chk->rec.data.TSN_seq; 1274 ippp++; 1275 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1276 1277 } 1278 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1279 sctp_abort_an_association(stcb->sctp_ep, 1280 stcb, SCTP_PEER_FAULTY, oper); 1281 1282 *abort_flag = 1; 1283 return; 1284 } 1285 if (chk->rec.data.stream_number != 1286 next->rec.data.stream_number) { 1287 /* 1288 * Huh, need the correct STR here, 1289 * they must be the same. 1290 */ 1291 #ifdef SCTP_DEBUG 1292 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1293 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1294 chk->rec.data.stream_number, 1295 next->rec.data.stream_number); 1296 } 1297 #endif 1298 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1299 0, M_DONTWAIT, 1, MT_DATA); 1300 if (oper) { 1301 struct sctp_paramhdr *ph; 1302 uint32_t *ippp; 1303 1304 SCTP_BUF_LEN(oper) = 1305 sizeof(struct sctp_paramhdr) + 1306 (3 * sizeof(uint32_t)); 1307 ph = mtod(oper, 1308 struct sctp_paramhdr *); 1309 ph->param_type = 1310 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1311 ph->param_length = 1312 htons(SCTP_BUF_LEN(oper)); 1313 ippp = (uint32_t *) (ph + 1); 1314 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1315 ippp++; 1316 *ippp = chk->rec.data.TSN_seq; 1317 ippp++; 1318 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1319 1320 } 1321 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1322 sctp_abort_an_association(stcb->sctp_ep, 1323 stcb, SCTP_PEER_FAULTY, oper); 1324 1325 *abort_flag = 1; 1326 return; 1327 } 1328 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1329 chk->rec.data.stream_seq != 1330 next->rec.data.stream_seq) { 1331 /* 1332 * Huh, need the correct STR here, 1333 * they must be the same. 1334 */ 1335 #ifdef SCTP_DEBUG 1336 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1337 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1338 chk->rec.data.stream_seq, 1339 next->rec.data.stream_seq); 1340 } 1341 #endif 1342 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1343 0, M_DONTWAIT, 1, MT_DATA); 1344 if (oper) { 1345 struct sctp_paramhdr *ph; 1346 uint32_t *ippp; 1347 1348 SCTP_BUF_LEN(oper) = 1349 sizeof(struct sctp_paramhdr) + 1350 (3 * sizeof(uint32_t)); 1351 ph = mtod(oper, 1352 struct sctp_paramhdr *); 1353 ph->param_type = 1354 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1355 ph->param_length = 1356 htons(SCTP_BUF_LEN(oper)); 1357 ippp = (uint32_t *) (ph + 1); 1358 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1359 ippp++; 1360 *ippp = chk->rec.data.TSN_seq; 1361 ippp++; 1362 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1363 } 1364 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1365 sctp_abort_an_association(stcb->sctp_ep, 1366 stcb, SCTP_PEER_FAULTY, oper); 1367 1368 *abort_flag = 1; 1369 return; 1370 1371 } 1372 } 1373 } 1374 } 1375 /* Do we need to do some delivery? check */ 1376 sctp_deliver_reasm_check(stcb, asoc); 1377 } 1378 1379 /* 1380 * This is an unfortunate routine. It checks to make sure a evil guy is not 1381 * stuffing us full of bad packet fragments. A broken peer could also do this 1382 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1383 * :< more cycles. 1384 */ 1385 static int 1386 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1387 uint32_t TSN_seq) 1388 { 1389 struct sctp_tmit_chunk *at; 1390 uint32_t tsn_est; 1391 1392 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1393 if (compare_with_wrap(TSN_seq, 1394 at->rec.data.TSN_seq, MAX_TSN)) { 1395 /* is it one bigger? */ 1396 tsn_est = at->rec.data.TSN_seq + 1; 1397 if (tsn_est == TSN_seq) { 1398 /* yep. It better be a last then */ 1399 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1400 SCTP_DATA_LAST_FRAG) { 1401 /* 1402 * Ok this guy belongs next to a guy 1403 * that is NOT last, it should be a 1404 * middle/last, not a complete 1405 * chunk. 1406 */ 1407 return (1); 1408 } else { 1409 /* 1410 * This guy is ok since its a LAST 1411 * and the new chunk is a fully 1412 * self- contained one. 1413 */ 1414 return (0); 1415 } 1416 } 1417 } else if (TSN_seq == at->rec.data.TSN_seq) { 1418 /* Software error since I have a dup? */ 1419 return (1); 1420 } else { 1421 /* 1422 * Ok, 'at' is larger than new chunk but does it 1423 * need to be right before it. 1424 */ 1425 tsn_est = TSN_seq + 1; 1426 if (tsn_est == at->rec.data.TSN_seq) { 1427 /* Yep, It better be a first */ 1428 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1429 SCTP_DATA_FIRST_FRAG) { 1430 return (1); 1431 } else { 1432 return (0); 1433 } 1434 } 1435 } 1436 } 1437 return (0); 1438 } 1439 1440 1441 extern unsigned int sctp_max_chunks_on_queue; 1442 static int 1443 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1444 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1445 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1446 int *break_flag, int last_chunk) 1447 { 1448 /* Process a data chunk */ 1449 /* struct sctp_tmit_chunk *chk; */ 1450 struct sctp_tmit_chunk *chk; 1451 uint32_t tsn, gap; 1452 struct mbuf *dmbuf; 1453 int indx, the_len; 1454 int need_reasm_check = 0; 1455 uint16_t strmno, strmseq; 1456 struct mbuf *oper; 1457 struct sctp_queued_to_read *control; 1458 int ordered; 1459 uint32_t protocol_id; 1460 uint8_t chunk_flags; 1461 1462 chk = NULL; 1463 tsn = ntohl(ch->dp.tsn); 1464 chunk_flags = ch->ch.chunk_flags; 1465 protocol_id = ch->dp.protocol_id; 1466 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0); 1467 #ifdef SCTP_MAP_LOGGING 1468 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE); 1469 #endif 1470 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1471 asoc->cumulative_tsn == tsn) { 1472 /* It is a duplicate */ 1473 SCTP_STAT_INCR(sctps_recvdupdata); 1474 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1475 /* Record a dup for the next outbound sack */ 1476 asoc->dup_tsns[asoc->numduptsns] = tsn; 1477 asoc->numduptsns++; 1478 } 1479 return (0); 1480 } 1481 /* Calculate the number of TSN's between the base and this TSN */ 1482 if (tsn >= asoc->mapping_array_base_tsn) { 1483 gap = tsn - asoc->mapping_array_base_tsn; 1484 } else { 1485 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1486 } 1487 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1488 /* Can't hold the bit in the mapping at max array, toss it */ 1489 return (0); 1490 } 1491 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1492 if (sctp_expand_mapping_array(asoc)) { 1493 /* Can't expand, drop it */ 1494 return (0); 1495 } 1496 } 1497 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1498 *high_tsn = tsn; 1499 } 1500 /* See if we have received this one already */ 1501 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1502 SCTP_STAT_INCR(sctps_recvdupdata); 1503 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1504 /* Record a dup for the next outbound sack */ 1505 asoc->dup_tsns[asoc->numduptsns] = tsn; 1506 asoc->numduptsns++; 1507 } 1508 if (!SCTP_OS_TIMER_PENDING(&asoc->dack_timer.timer)) { 1509 /* 1510 * By starting the timer we assure that we WILL sack 1511 * at the end of the packet when sctp_sack_check 1512 * gets called. 1513 */ 1514 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, 1515 stcb, NULL); 1516 } 1517 return (0); 1518 } 1519 /* 1520 * Check to see about the GONE flag, duplicates would cause a sack 1521 * to be sent up above 1522 */ 1523 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1524 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1525 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1526 ) { 1527 /* 1528 * wait a minute, this guy is gone, there is no longer a 1529 * receiver. Send peer an ABORT! 1530 */ 1531 struct mbuf *op_err; 1532 1533 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1534 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 1535 *abort_flag = 1; 1536 return (0); 1537 } 1538 /* 1539 * Now before going further we see if there is room. If NOT then we 1540 * MAY let one through only IF this TSN is the one we are waiting 1541 * for on a partial delivery API. 1542 */ 1543 1544 /* now do the tests */ 1545 if (((asoc->cnt_on_all_streams + 1546 asoc->cnt_on_reasm_queue + 1547 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1548 (((int)asoc->my_rwnd) <= 0)) { 1549 /* 1550 * When we have NO room in the rwnd we check to make sure 1551 * the reader is doing its job... 1552 */ 1553 if (stcb->sctp_socket->so_rcv.sb_cc) { 1554 /* some to read, wake-up */ 1555 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1556 } 1557 /* now is it in the mapping array of what we have accepted? */ 1558 if (compare_with_wrap(tsn, 1559 asoc->highest_tsn_inside_map, MAX_TSN)) { 1560 1561 /* Nope not in the valid range dump it */ 1562 #ifdef SCTP_DEBUG 1563 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1564 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n", 1565 (u_long)tsn, (u_long)asoc->my_rwnd, 1566 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)); 1567 1568 } 1569 #endif 1570 sctp_set_rwnd(stcb, asoc); 1571 if ((asoc->cnt_on_all_streams + 1572 asoc->cnt_on_reasm_queue + 1573 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1574 SCTP_STAT_INCR(sctps_datadropchklmt); 1575 } else { 1576 SCTP_STAT_INCR(sctps_datadroprwnd); 1577 } 1578 indx = *break_flag; 1579 *break_flag = 1; 1580 return (0); 1581 } 1582 } 1583 strmno = ntohs(ch->dp.stream_id); 1584 if (strmno >= asoc->streamincnt) { 1585 struct sctp_paramhdr *phdr; 1586 struct mbuf *mb; 1587 1588 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1589 0, M_DONTWAIT, 1, MT_DATA); 1590 if (mb != NULL) { 1591 /* add some space up front so prepend will work well */ 1592 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1593 phdr = mtod(mb, struct sctp_paramhdr *); 1594 /* 1595 * Error causes are just param's and this one has 1596 * two back to back phdr, one with the error type 1597 * and size, the other with the streamid and a rsvd 1598 */ 1599 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1600 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1601 phdr->param_length = 1602 htons(sizeof(struct sctp_paramhdr) * 2); 1603 phdr++; 1604 /* We insert the stream in the type field */ 1605 phdr->param_type = ch->dp.stream_id; 1606 /* And set the length to 0 for the rsvd field */ 1607 phdr->param_length = 0; 1608 sctp_queue_op_err(stcb, mb); 1609 } 1610 SCTP_STAT_INCR(sctps_badsid); 1611 return (0); 1612 } 1613 /* 1614 * Before we continue lets validate that we are not being fooled by 1615 * an evil attacker. We can only have 4k chunks based on our TSN 1616 * spread allowed by the mapping array 512 * 8 bits, so there is no 1617 * way our stream sequence numbers could have wrapped. We of course 1618 * only validate the FIRST fragment so the bit must be set. 1619 */ 1620 strmseq = ntohs(ch->dp.stream_sequence); 1621 1622 #ifdef SCTP_ASOCLOG_OF_TSNS 1623 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1624 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1625 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1626 asoc->tsn_in_at++; 1627 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1628 asoc->tsn_in_at = 0; 1629 } 1630 #endif 1631 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1632 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1633 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1634 strmseq, MAX_SEQ) || 1635 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1636 /* The incoming sseq is behind where we last delivered? */ 1637 #ifdef SCTP_DEBUG 1638 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1639 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1640 strmseq, 1641 asoc->strmin[strmno].last_sequence_delivered); 1642 } 1643 #endif 1644 /* 1645 * throw it in the stream so it gets cleaned up in 1646 * association destruction 1647 */ 1648 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1649 0, M_DONTWAIT, 1, MT_DATA); 1650 if (oper) { 1651 struct sctp_paramhdr *ph; 1652 uint32_t *ippp; 1653 1654 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1655 (3 * sizeof(uint32_t)); 1656 ph = mtod(oper, struct sctp_paramhdr *); 1657 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1658 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1659 ippp = (uint32_t *) (ph + 1); 1660 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1661 ippp++; 1662 *ippp = tsn; 1663 ippp++; 1664 *ippp = ((strmno << 16) | strmseq); 1665 1666 } 1667 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1668 sctp_abort_an_association(stcb->sctp_ep, stcb, 1669 SCTP_PEER_FAULTY, oper); 1670 *abort_flag = 1; 1671 return (0); 1672 } 1673 /************************************ 1674 * From here down we may find ch-> invalid 1675 * so its a good idea NOT to use it. 1676 *************************************/ 1677 1678 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1679 if (last_chunk == 0) { 1680 dmbuf = SCTP_M_COPYM(*m, 1681 (offset + sizeof(struct sctp_data_chunk)), 1682 the_len, M_DONTWAIT); 1683 #ifdef SCTP_MBUF_LOGGING 1684 { 1685 struct mbuf *mat; 1686 1687 mat = dmbuf; 1688 while (mat) { 1689 if (SCTP_BUF_IS_EXTENDED(mat)) { 1690 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1691 } 1692 mat = SCTP_BUF_NEXT(mat); 1693 } 1694 } 1695 #endif 1696 } else { 1697 /* We can steal the last chunk */ 1698 int l_len; 1699 1700 dmbuf = *m; 1701 /* lop off the top part */ 1702 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1703 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1704 l_len = SCTP_BUF_LEN(dmbuf); 1705 } else { 1706 /* 1707 * need to count up the size hopefully does not hit 1708 * this to often :-0 1709 */ 1710 struct mbuf *lat; 1711 1712 l_len = 0; 1713 lat = dmbuf; 1714 while (lat) { 1715 l_len += SCTP_BUF_LEN(lat); 1716 lat = SCTP_BUF_NEXT(lat); 1717 } 1718 } 1719 if (l_len > the_len) { 1720 /* Trim the end round bytes off too */ 1721 m_adj(dmbuf, -(l_len - the_len)); 1722 } 1723 } 1724 if (dmbuf == NULL) { 1725 SCTP_STAT_INCR(sctps_nomem); 1726 return (0); 1727 } 1728 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1729 asoc->fragmented_delivery_inprogress == 0 && 1730 TAILQ_EMPTY(&asoc->resetHead) && 1731 ((ordered == 0) || 1732 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1733 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1734 /* Candidate for express delivery */ 1735 /* 1736 * Its not fragmented, No PD-API is up, Nothing in the 1737 * delivery queue, Its un-ordered OR ordered and the next to 1738 * deliver AND nothing else is stuck on the stream queue, 1739 * And there is room for it in the socket buffer. Lets just 1740 * stuff it up the buffer.... 1741 */ 1742 1743 /* It would be nice to avoid this copy if we could :< */ 1744 sctp_alloc_a_readq(stcb, control); 1745 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1746 protocol_id, 1747 stcb->asoc.context, 1748 strmno, strmseq, 1749 chunk_flags, 1750 dmbuf); 1751 if (control == NULL) { 1752 goto failed_express_del; 1753 } 1754 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1); 1755 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1756 /* for ordered, bump what we delivered */ 1757 asoc->strmin[strmno].last_sequence_delivered++; 1758 } 1759 SCTP_STAT_INCR(sctps_recvexpress); 1760 #ifdef SCTP_STR_LOGGING 1761 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1762 SCTP_STR_LOG_FROM_EXPRS_DEL); 1763 #endif 1764 control = NULL; 1765 goto finish_express_del; 1766 } 1767 failed_express_del: 1768 /* If we reach here this is a new chunk */ 1769 chk = NULL; 1770 control = NULL; 1771 /* Express for fragmented delivery? */ 1772 if ((asoc->fragmented_delivery_inprogress) && 1773 (stcb->asoc.control_pdapi) && 1774 (asoc->str_of_pdapi == strmno) && 1775 (asoc->ssn_of_pdapi == strmseq) 1776 ) { 1777 control = stcb->asoc.control_pdapi; 1778 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1779 /* Can't be another first? */ 1780 goto failed_pdapi_express_del; 1781 } 1782 if (tsn == (control->sinfo_tsn + 1)) { 1783 /* Yep, we can add it on */ 1784 int end = 0; 1785 uint32_t cumack; 1786 1787 if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1788 end = 1; 1789 } 1790 cumack = asoc->cumulative_tsn; 1791 if ((cumack + 1) == tsn) 1792 cumack = tsn; 1793 1794 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1795 tsn, 1796 &stcb->sctp_socket->so_rcv)) { 1797 printf("Append fails end:%d\n", end); 1798 goto failed_pdapi_express_del; 1799 } 1800 SCTP_STAT_INCR(sctps_recvexpressm); 1801 control->sinfo_tsn = tsn; 1802 asoc->tsn_last_delivered = tsn; 1803 asoc->fragment_flags = chunk_flags; 1804 asoc->tsn_of_pdapi_last_delivered = tsn; 1805 asoc->last_flags_delivered = chunk_flags; 1806 asoc->last_strm_seq_delivered = strmseq; 1807 asoc->last_strm_no_delivered = strmno; 1808 if (end) { 1809 /* clean up the flags and such */ 1810 asoc->fragmented_delivery_inprogress = 0; 1811 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1812 asoc->strmin[strmno].last_sequence_delivered++; 1813 } 1814 stcb->asoc.control_pdapi = NULL; 1815 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1816 /* 1817 * There could be another message 1818 * ready 1819 */ 1820 need_reasm_check = 1; 1821 } 1822 } 1823 control = NULL; 1824 goto finish_express_del; 1825 } 1826 } 1827 failed_pdapi_express_del: 1828 control = NULL; 1829 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1830 sctp_alloc_a_chunk(stcb, chk); 1831 if (chk == NULL) { 1832 /* No memory so we drop the chunk */ 1833 SCTP_STAT_INCR(sctps_nomem); 1834 if (last_chunk == 0) { 1835 /* we copied it, free the copy */ 1836 sctp_m_freem(dmbuf); 1837 } 1838 return (0); 1839 } 1840 chk->rec.data.TSN_seq = tsn; 1841 chk->no_fr_allowed = 0; 1842 chk->rec.data.stream_seq = strmseq; 1843 chk->rec.data.stream_number = strmno; 1844 chk->rec.data.payloadtype = protocol_id; 1845 chk->rec.data.context = stcb->asoc.context; 1846 chk->rec.data.doing_fast_retransmit = 0; 1847 chk->rec.data.rcv_flags = chunk_flags; 1848 chk->asoc = asoc; 1849 chk->send_size = the_len; 1850 chk->whoTo = net; 1851 atomic_add_int(&net->ref_count, 1); 1852 chk->data = dmbuf; 1853 } else { 1854 sctp_alloc_a_readq(stcb, control); 1855 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1856 protocol_id, 1857 stcb->asoc.context, 1858 strmno, strmseq, 1859 chunk_flags, 1860 dmbuf); 1861 if (control == NULL) { 1862 /* No memory so we drop the chunk */ 1863 SCTP_STAT_INCR(sctps_nomem); 1864 if (last_chunk == 0) { 1865 /* we copied it, free the copy */ 1866 sctp_m_freem(dmbuf); 1867 } 1868 return (0); 1869 } 1870 control->length = the_len; 1871 } 1872 1873 /* Mark it as received */ 1874 /* Now queue it where it belongs */ 1875 if (control != NULL) { 1876 /* First a sanity check */ 1877 if (asoc->fragmented_delivery_inprogress) { 1878 /* 1879 * Ok, we have a fragmented delivery in progress if 1880 * this chunk is next to deliver OR belongs in our 1881 * view to the reassembly, the peer is evil or 1882 * broken. 1883 */ 1884 uint32_t estimate_tsn; 1885 1886 estimate_tsn = asoc->tsn_last_delivered + 1; 1887 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1888 (estimate_tsn == control->sinfo_tsn)) { 1889 /* Evil/Broke peer */ 1890 sctp_m_freem(control->data); 1891 control->data = NULL; 1892 sctp_free_remote_addr(control->whoFrom); 1893 sctp_free_a_readq(stcb, control); 1894 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1895 0, M_DONTWAIT, 1, MT_DATA); 1896 if (oper) { 1897 struct sctp_paramhdr *ph; 1898 uint32_t *ippp; 1899 1900 SCTP_BUF_LEN(oper) = 1901 sizeof(struct sctp_paramhdr) + 1902 (3 * sizeof(uint32_t)); 1903 ph = mtod(oper, struct sctp_paramhdr *); 1904 ph->param_type = 1905 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1906 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1907 ippp = (uint32_t *) (ph + 1); 1908 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1909 ippp++; 1910 *ippp = tsn; 1911 ippp++; 1912 *ippp = ((strmno << 16) | strmseq); 1913 } 1914 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1915 sctp_abort_an_association(stcb->sctp_ep, stcb, 1916 SCTP_PEER_FAULTY, oper); 1917 1918 *abort_flag = 1; 1919 return (0); 1920 } else { 1921 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1922 sctp_m_freem(control->data); 1923 control->data = NULL; 1924 sctp_free_remote_addr(control->whoFrom); 1925 sctp_free_a_readq(stcb, control); 1926 1927 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1928 0, M_DONTWAIT, 1, MT_DATA); 1929 if (oper) { 1930 struct sctp_paramhdr *ph; 1931 uint32_t *ippp; 1932 1933 SCTP_BUF_LEN(oper) = 1934 sizeof(struct sctp_paramhdr) + 1935 (3 * sizeof(uint32_t)); 1936 ph = mtod(oper, 1937 struct sctp_paramhdr *); 1938 ph->param_type = 1939 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1940 ph->param_length = 1941 htons(SCTP_BUF_LEN(oper)); 1942 ippp = (uint32_t *) (ph + 1); 1943 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 1944 ippp++; 1945 *ippp = tsn; 1946 ippp++; 1947 *ippp = ((strmno << 16) | strmseq); 1948 } 1949 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1950 sctp_abort_an_association(stcb->sctp_ep, 1951 stcb, SCTP_PEER_FAULTY, oper); 1952 1953 *abort_flag = 1; 1954 return (0); 1955 } 1956 } 1957 } else { 1958 /* No PDAPI running */ 1959 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1960 /* 1961 * Reassembly queue is NOT empty validate 1962 * that this tsn does not need to be in 1963 * reasembly queue. If it does then our peer 1964 * is broken or evil. 1965 */ 1966 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1967 sctp_m_freem(control->data); 1968 control->data = NULL; 1969 sctp_free_remote_addr(control->whoFrom); 1970 sctp_free_a_readq(stcb, control); 1971 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1972 0, M_DONTWAIT, 1, MT_DATA); 1973 if (oper) { 1974 struct sctp_paramhdr *ph; 1975 uint32_t *ippp; 1976 1977 SCTP_BUF_LEN(oper) = 1978 sizeof(struct sctp_paramhdr) + 1979 (3 * sizeof(uint32_t)); 1980 ph = mtod(oper, 1981 struct sctp_paramhdr *); 1982 ph->param_type = 1983 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1984 ph->param_length = 1985 htons(SCTP_BUF_LEN(oper)); 1986 ippp = (uint32_t *) (ph + 1); 1987 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 1988 ippp++; 1989 *ippp = tsn; 1990 ippp++; 1991 *ippp = ((strmno << 16) | strmseq); 1992 } 1993 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1994 sctp_abort_an_association(stcb->sctp_ep, 1995 stcb, SCTP_PEER_FAULTY, oper); 1996 1997 *abort_flag = 1; 1998 return (0); 1999 } 2000 } 2001 } 2002 /* ok, if we reach here we have passed the sanity checks */ 2003 if (chunk_flags & SCTP_DATA_UNORDERED) { 2004 /* queue directly into socket buffer */ 2005 sctp_add_to_readq(stcb->sctp_ep, stcb, 2006 control, 2007 &stcb->sctp_socket->so_rcv, 1); 2008 } else { 2009 /* 2010 * Special check for when streams are resetting. We 2011 * could be more smart about this and check the 2012 * actual stream to see if it is not being reset.. 2013 * that way we would not create a HOLB when amongst 2014 * streams being reset and those not being reset. 2015 * 2016 * We take complete messages that have a stream reset 2017 * intervening (aka the TSN is after where our 2018 * cum-ack needs to be) off and put them on a 2019 * pending_reply_queue. The reassembly ones we do 2020 * not have to worry about since they are all sorted 2021 * and proceessed by TSN order. It is only the 2022 * singletons I must worry about. 2023 */ 2024 struct sctp_stream_reset_list *liste; 2025 2026 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2027 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) || 2028 (tsn == ntohl(liste->tsn))) 2029 ) { 2030 /* 2031 * yep its past where we need to reset... go 2032 * ahead and queue it. 2033 */ 2034 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2035 /* first one on */ 2036 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2037 } else { 2038 struct sctp_queued_to_read *ctlOn; 2039 unsigned char inserted = 0; 2040 2041 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2042 while (ctlOn) { 2043 if (compare_with_wrap(control->sinfo_tsn, 2044 ctlOn->sinfo_tsn, MAX_TSN)) { 2045 ctlOn = TAILQ_NEXT(ctlOn, next); 2046 } else { 2047 /* found it */ 2048 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2049 inserted = 1; 2050 break; 2051 } 2052 } 2053 if (inserted == 0) { 2054 /* 2055 * must be put at end, use 2056 * prevP (all setup from 2057 * loop) to setup nextP. 2058 */ 2059 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2060 } 2061 } 2062 } else { 2063 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2064 if (*abort_flag) { 2065 return (0); 2066 } 2067 } 2068 } 2069 } else { 2070 /* Into the re-assembly queue */ 2071 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2072 if (*abort_flag) { 2073 /* 2074 * the assoc is now gone and chk was put onto the 2075 * reasm queue, which has all been freed. 2076 */ 2077 *m = NULL; 2078 return (0); 2079 } 2080 } 2081 finish_express_del: 2082 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2083 /* we have a new high score */ 2084 asoc->highest_tsn_inside_map = tsn; 2085 #ifdef SCTP_MAP_LOGGING 2086 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2087 #endif 2088 } 2089 if (tsn == (asoc->cumulative_tsn + 1)) { 2090 /* Update cum-ack */ 2091 asoc->cumulative_tsn = tsn; 2092 } 2093 if (last_chunk) { 2094 *m = NULL; 2095 } 2096 if (ordered) { 2097 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2098 } else { 2099 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2100 } 2101 SCTP_STAT_INCR(sctps_recvdata); 2102 /* Set it present please */ 2103 #ifdef SCTP_STR_LOGGING 2104 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2105 #endif 2106 #ifdef SCTP_MAP_LOGGING 2107 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2108 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2109 #endif 2110 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2111 if (need_reasm_check) { 2112 /* Another one waits ? */ 2113 sctp_deliver_reasm_check(stcb, asoc); 2114 } 2115 return (1); 2116 } 2117 2118 int8_t sctp_map_lookup_tab[256] = { 2119 -1, 0, -1, 1, -1, 0, -1, 2, 2120 -1, 0, -1, 1, -1, 0, -1, 3, 2121 -1, 0, -1, 1, -1, 0, -1, 2, 2122 -1, 0, -1, 1, -1, 0, -1, 4, 2123 -1, 0, -1, 1, -1, 0, -1, 2, 2124 -1, 0, -1, 1, -1, 0, -1, 3, 2125 -1, 0, -1, 1, -1, 0, -1, 2, 2126 -1, 0, -1, 1, -1, 0, -1, 5, 2127 -1, 0, -1, 1, -1, 0, -1, 2, 2128 -1, 0, -1, 1, -1, 0, -1, 3, 2129 -1, 0, -1, 1, -1, 0, -1, 2, 2130 -1, 0, -1, 1, -1, 0, -1, 4, 2131 -1, 0, -1, 1, -1, 0, -1, 2, 2132 -1, 0, -1, 1, -1, 0, -1, 3, 2133 -1, 0, -1, 1, -1, 0, -1, 2, 2134 -1, 0, -1, 1, -1, 0, -1, 6, 2135 -1, 0, -1, 1, -1, 0, -1, 2, 2136 -1, 0, -1, 1, -1, 0, -1, 3, 2137 -1, 0, -1, 1, -1, 0, -1, 2, 2138 -1, 0, -1, 1, -1, 0, -1, 4, 2139 -1, 0, -1, 1, -1, 0, -1, 2, 2140 -1, 0, -1, 1, -1, 0, -1, 3, 2141 -1, 0, -1, 1, -1, 0, -1, 2, 2142 -1, 0, -1, 1, -1, 0, -1, 5, 2143 -1, 0, -1, 1, -1, 0, -1, 2, 2144 -1, 0, -1, 1, -1, 0, -1, 3, 2145 -1, 0, -1, 1, -1, 0, -1, 2, 2146 -1, 0, -1, 1, -1, 0, -1, 4, 2147 -1, 0, -1, 1, -1, 0, -1, 2, 2148 -1, 0, -1, 1, -1, 0, -1, 3, 2149 -1, 0, -1, 1, -1, 0, -1, 2, 2150 -1, 0, -1, 1, -1, 0, -1, 7, 2151 }; 2152 2153 2154 void 2155 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2156 { 2157 /* 2158 * Now we also need to check the mapping array in a couple of ways. 2159 * 1) Did we move the cum-ack point? 2160 */ 2161 struct sctp_association *asoc; 2162 int i, at; 2163 int all_ones, last_all_ones = 0; 2164 int slide_from, slide_end, lgap, distance; 2165 2166 #ifdef SCTP_MAP_LOGGING 2167 uint32_t old_cumack, old_base, old_highest; 2168 unsigned char aux_array[64]; 2169 2170 #endif 2171 struct sctp_stream_reset_list *liste; 2172 2173 asoc = &stcb->asoc; 2174 at = 0; 2175 2176 #ifdef SCTP_MAP_LOGGING 2177 old_cumack = asoc->cumulative_tsn; 2178 old_base = asoc->mapping_array_base_tsn; 2179 old_highest = asoc->highest_tsn_inside_map; 2180 if (asoc->mapping_array_size < 64) 2181 memcpy(aux_array, asoc->mapping_array, 2182 asoc->mapping_array_size); 2183 else 2184 memcpy(aux_array, asoc->mapping_array, 64); 2185 #endif 2186 2187 /* 2188 * We could probably improve this a small bit by calculating the 2189 * offset of the current cum-ack as the starting point. 2190 */ 2191 all_ones = 1; 2192 at = 0; 2193 for (i = 0; i < stcb->asoc.mapping_array_size; i++) { 2194 if (asoc->mapping_array[i] == 0xff) { 2195 at += 8; 2196 last_all_ones = 1; 2197 } else { 2198 /* there is a 0 bit */ 2199 all_ones = 0; 2200 at += sctp_map_lookup_tab[asoc->mapping_array[i]]; 2201 last_all_ones = 0; 2202 break; 2203 } 2204 } 2205 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones); 2206 /* at is one off, since in the table a embedded -1 is present */ 2207 at++; 2208 2209 if (compare_with_wrap(asoc->cumulative_tsn, 2210 asoc->highest_tsn_inside_map, 2211 MAX_TSN)) { 2212 #ifdef INVARIANTS 2213 panic("huh, cumack greater than high-tsn in map"); 2214 #else 2215 printf("huh, cumack greater than high-tsn in map - should panic?\n"); 2216 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2217 #endif 2218 } 2219 if (all_ones || 2220 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) { 2221 /* The complete array was completed by a single FR */ 2222 /* higest becomes the cum-ack */ 2223 int clr; 2224 2225 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2226 /* clear the array */ 2227 if (all_ones) 2228 clr = asoc->mapping_array_size; 2229 else { 2230 clr = (at >> 3) + 1; 2231 /* 2232 * this should be the allones case but just in case 2233 * :> 2234 */ 2235 if (clr > asoc->mapping_array_size) 2236 clr = asoc->mapping_array_size; 2237 } 2238 memset(asoc->mapping_array, 0, clr); 2239 /* base becomes one ahead of the cum-ack */ 2240 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2241 #ifdef SCTP_MAP_LOGGING 2242 sctp_log_map(old_base, old_cumack, old_highest, 2243 SCTP_MAP_PREPARE_SLIDE); 2244 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2245 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2246 #endif 2247 } else if (at >= 8) { 2248 /* we can slide the mapping array down */ 2249 /* Calculate the new byte postion we can move down */ 2250 slide_from = at >> 3; 2251 /* 2252 * now calculate the ceiling of the move using our highest 2253 * TSN value 2254 */ 2255 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2256 lgap = asoc->highest_tsn_inside_map - 2257 asoc->mapping_array_base_tsn; 2258 } else { 2259 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2260 asoc->highest_tsn_inside_map + 1; 2261 } 2262 slide_end = lgap >> 3; 2263 if (slide_end < slide_from) { 2264 panic("impossible slide"); 2265 } 2266 distance = (slide_end - slide_from) + 1; 2267 #ifdef SCTP_MAP_LOGGING 2268 sctp_log_map(old_base, old_cumack, old_highest, 2269 SCTP_MAP_PREPARE_SLIDE); 2270 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2271 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2272 #endif 2273 if (distance + slide_from > asoc->mapping_array_size || 2274 distance < 0) { 2275 /* 2276 * Here we do NOT slide forward the array so that 2277 * hopefully when more data comes in to fill it up 2278 * we will be able to slide it forward. Really I 2279 * don't think this should happen :-0 2280 */ 2281 2282 #ifdef SCTP_MAP_LOGGING 2283 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2284 (uint32_t) asoc->mapping_array_size, 2285 SCTP_MAP_SLIDE_NONE); 2286 #endif 2287 } else { 2288 int ii; 2289 2290 for (ii = 0; ii < distance; ii++) { 2291 asoc->mapping_array[ii] = 2292 asoc->mapping_array[slide_from + ii]; 2293 } 2294 for (ii = distance; ii <= slide_end; ii++) { 2295 asoc->mapping_array[ii] = 0; 2296 } 2297 asoc->mapping_array_base_tsn += (slide_from << 3); 2298 #ifdef SCTP_MAP_LOGGING 2299 sctp_log_map(asoc->mapping_array_base_tsn, 2300 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2301 SCTP_MAP_SLIDE_RESULT); 2302 #endif 2303 } 2304 } 2305 /* check the special flag for stream resets */ 2306 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2307 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2308 (asoc->cumulative_tsn == liste->tsn)) 2309 ) { 2310 /* 2311 * we have finished working through the backlogged TSN's now 2312 * time to reset streams. 1: call reset function. 2: free 2313 * pending_reply space 3: distribute any chunks in 2314 * pending_reply_queue. 2315 */ 2316 struct sctp_queued_to_read *ctl; 2317 2318 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2319 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2320 SCTP_FREE(liste); 2321 liste = TAILQ_FIRST(&asoc->resetHead); 2322 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2323 if (ctl && (liste == NULL)) { 2324 /* All can be removed */ 2325 while (ctl) { 2326 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2327 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2328 if (*abort_flag) { 2329 return; 2330 } 2331 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2332 } 2333 } else if (ctl) { 2334 /* more than one in queue */ 2335 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2336 /* 2337 * if ctl->sinfo_tsn is <= liste->tsn we can 2338 * process it which is the NOT of 2339 * ctl->sinfo_tsn > liste->tsn 2340 */ 2341 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2342 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2343 if (*abort_flag) { 2344 return; 2345 } 2346 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2347 } 2348 } 2349 /* 2350 * Now service re-assembly to pick up anything that has been 2351 * held on reassembly queue? 2352 */ 2353 sctp_deliver_reasm_check(stcb, asoc); 2354 } 2355 /* 2356 * Now we need to see if we need to queue a sack or just start the 2357 * timer (if allowed). 2358 */ 2359 if (ok_to_sack) { 2360 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2361 /* 2362 * Ok special case, in SHUTDOWN-SENT case. here we 2363 * maker sure SACK timer is off and instead send a 2364 * SHUTDOWN and a SACK 2365 */ 2366 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2367 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2368 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2369 } 2370 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2371 sctp_send_sack(stcb); 2372 } else { 2373 int is_a_gap; 2374 2375 /* is there a gap now ? */ 2376 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2377 stcb->asoc.cumulative_tsn, MAX_TSN); 2378 2379 /* 2380 * CMT DAC algorithm: increase number of packets 2381 * received since last ack 2382 */ 2383 stcb->asoc.cmt_dac_pkts_rcvd++; 2384 2385 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a 2386 * sack */ 2387 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2388 * longer is one */ 2389 (stcb->asoc.numduptsns) || /* we have dup's */ 2390 (is_a_gap) || /* is still a gap */ 2391 (stcb->asoc.delayed_ack == 0) || 2392 (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) /* timer was up . second 2393 * packet */ 2394 ) { 2395 2396 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) && 2397 (stcb->asoc.first_ack_sent == 1) && 2398 (stcb->asoc.numduptsns == 0) && 2399 (stcb->asoc.delayed_ack) && 2400 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2401 2402 /* 2403 * CMT DAC algorithm: With CMT, 2404 * delay acks even in the face of 2405 * 2406 * reordering. Therefore, if acks that 2407 * do not have to be sent because of 2408 * the above reasons, will be 2409 * delayed. That is, acks that would 2410 * have been sent due to gap reports 2411 * will be delayed with DAC. Start 2412 * the delayed ack timer. 2413 */ 2414 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2415 stcb->sctp_ep, stcb, NULL); 2416 } else { 2417 /* 2418 * Ok we must build a SACK since the 2419 * timer is pending, we got our 2420 * first packet OR there are gaps or 2421 * duplicates. 2422 */ 2423 stcb->asoc.first_ack_sent = 1; 2424 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2425 sctp_send_sack(stcb); 2426 } 2427 } else { 2428 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2429 stcb->sctp_ep, stcb, NULL); 2430 } 2431 } 2432 } 2433 } 2434 2435 void 2436 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2437 { 2438 struct sctp_tmit_chunk *chk; 2439 uint32_t tsize; 2440 uint16_t nxt_todel; 2441 2442 if (asoc->fragmented_delivery_inprogress) { 2443 sctp_service_reassembly(stcb, asoc); 2444 } 2445 /* Can we proceed further, i.e. the PD-API is complete */ 2446 if (asoc->fragmented_delivery_inprogress) { 2447 /* no */ 2448 return; 2449 } 2450 /* 2451 * Now is there some other chunk I can deliver from the reassembly 2452 * queue. 2453 */ 2454 doit_again: 2455 chk = TAILQ_FIRST(&asoc->reasmqueue); 2456 if (chk == NULL) { 2457 asoc->size_on_reasm_queue = 0; 2458 asoc->cnt_on_reasm_queue = 0; 2459 return; 2460 } 2461 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2462 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2463 ((nxt_todel == chk->rec.data.stream_seq) || 2464 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2465 /* 2466 * Yep the first one is here. We setup to start reception, 2467 * by backing down the TSN just in case we can't deliver. 2468 */ 2469 2470 /* 2471 * Before we start though either all of the message should 2472 * be here or 1/4 the socket buffer max or nothing on the 2473 * delivery queue and something can be delivered. 2474 */ 2475 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2476 (tsize > stcb->sctp_ep->partial_delivery_point))) { 2477 asoc->fragmented_delivery_inprogress = 1; 2478 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2479 asoc->str_of_pdapi = chk->rec.data.stream_number; 2480 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2481 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2482 asoc->fragment_flags = chk->rec.data.rcv_flags; 2483 sctp_service_reassembly(stcb, asoc); 2484 if (asoc->fragmented_delivery_inprogress == 0) { 2485 goto doit_again; 2486 } 2487 } 2488 } 2489 } 2490 2491 extern int sctp_strict_data_order; 2492 2493 int 2494 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2495 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2496 struct sctp_nets *net, uint32_t * high_tsn) 2497 { 2498 struct sctp_data_chunk *ch, chunk_buf; 2499 struct sctp_association *asoc; 2500 int num_chunks = 0; /* number of control chunks processed */ 2501 int stop_proc = 0; 2502 int chk_length, break_flag, last_chunk; 2503 int abort_flag = 0, was_a_gap = 0; 2504 struct mbuf *m; 2505 2506 /* set the rwnd */ 2507 sctp_set_rwnd(stcb, &stcb->asoc); 2508 2509 m = *mm; 2510 SCTP_TCB_LOCK_ASSERT(stcb); 2511 asoc = &stcb->asoc; 2512 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2513 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2514 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 2515 /* 2516 * wait a minute, this guy is gone, there is no longer a 2517 * receiver. Send peer an ABORT! 2518 */ 2519 struct mbuf *op_err; 2520 2521 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2522 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 2523 return (2); 2524 } 2525 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2526 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2527 /* there was a gap before this data was processed */ 2528 was_a_gap = 1; 2529 } 2530 /* 2531 * setup where we got the last DATA packet from for any SACK that 2532 * may need to go out. Don't bump the net. This is done ONLY when a 2533 * chunk is assigned. 2534 */ 2535 asoc->last_data_chunk_from = net; 2536 2537 /* 2538 * Now before we proceed we must figure out if this is a wasted 2539 * cluster... i.e. it is a small packet sent in and yet the driver 2540 * underneath allocated a full cluster for it. If so we must copy it 2541 * to a smaller mbuf and free up the cluster mbuf. This will help 2542 * with cluster starvation. 2543 */ 2544 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2545 /* we only handle mbufs that are singletons.. not chains */ 2546 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA); 2547 if (m) { 2548 /* ok lets see if we can copy the data up */ 2549 caddr_t *from, *to; 2550 2551 /* get the pointers and copy */ 2552 to = mtod(m, caddr_t *); 2553 from = mtod((*mm), caddr_t *); 2554 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2555 /* copy the length and free up the old */ 2556 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2557 sctp_m_freem(*mm); 2558 /* sucess, back copy */ 2559 *mm = m; 2560 } else { 2561 /* We are in trouble in the mbuf world .. yikes */ 2562 m = *mm; 2563 } 2564 } 2565 /* get pointer to the first chunk header */ 2566 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2567 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2568 if (ch == NULL) { 2569 return (1); 2570 } 2571 /* 2572 * process all DATA chunks... 2573 */ 2574 *high_tsn = asoc->cumulative_tsn; 2575 break_flag = 0; 2576 while (stop_proc == 0) { 2577 /* validate chunk length */ 2578 chk_length = ntohs(ch->ch.chunk_length); 2579 if (length - *offset < chk_length) { 2580 /* all done, mutulated chunk */ 2581 stop_proc = 1; 2582 break; 2583 } 2584 if (ch->ch.chunk_type == SCTP_DATA) { 2585 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2586 /* 2587 * Need to send an abort since we had a 2588 * invalid data chunk. 2589 */ 2590 struct mbuf *op_err; 2591 2592 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 2593 0, M_DONTWAIT, 1, MT_DATA); 2594 2595 if (op_err) { 2596 struct sctp_paramhdr *ph; 2597 uint32_t *ippp; 2598 2599 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 2600 (2 * sizeof(uint32_t)); 2601 ph = mtod(op_err, struct sctp_paramhdr *); 2602 ph->param_type = 2603 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2604 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2605 ippp = (uint32_t *) (ph + 1); 2606 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2607 ippp++; 2608 *ippp = asoc->cumulative_tsn; 2609 2610 } 2611 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2612 sctp_abort_association(inp, stcb, m, iphlen, sh, 2613 op_err); 2614 return (2); 2615 } 2616 #ifdef SCTP_AUDITING_ENABLED 2617 sctp_audit_log(0xB1, 0); 2618 #endif 2619 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2620 last_chunk = 1; 2621 } else { 2622 last_chunk = 0; 2623 } 2624 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2625 chk_length, net, high_tsn, &abort_flag, &break_flag, 2626 last_chunk)) { 2627 num_chunks++; 2628 } 2629 if (abort_flag) 2630 return (2); 2631 2632 if (break_flag) { 2633 /* 2634 * Set because of out of rwnd space and no 2635 * drop rep space left. 2636 */ 2637 stop_proc = 1; 2638 break; 2639 } 2640 } else { 2641 /* not a data chunk in the data region */ 2642 switch (ch->ch.chunk_type) { 2643 case SCTP_INITIATION: 2644 case SCTP_INITIATION_ACK: 2645 case SCTP_SELECTIVE_ACK: 2646 case SCTP_HEARTBEAT_REQUEST: 2647 case SCTP_HEARTBEAT_ACK: 2648 case SCTP_ABORT_ASSOCIATION: 2649 case SCTP_SHUTDOWN: 2650 case SCTP_SHUTDOWN_ACK: 2651 case SCTP_OPERATION_ERROR: 2652 case SCTP_COOKIE_ECHO: 2653 case SCTP_COOKIE_ACK: 2654 case SCTP_ECN_ECHO: 2655 case SCTP_ECN_CWR: 2656 case SCTP_SHUTDOWN_COMPLETE: 2657 case SCTP_AUTHENTICATION: 2658 case SCTP_ASCONF_ACK: 2659 case SCTP_PACKET_DROPPED: 2660 case SCTP_STREAM_RESET: 2661 case SCTP_FORWARD_CUM_TSN: 2662 case SCTP_ASCONF: 2663 /* 2664 * Now, what do we do with KNOWN chunks that 2665 * are NOT in the right place? 2666 * 2667 * For now, I do nothing but ignore them. We 2668 * may later want to add sysctl stuff to 2669 * switch out and do either an ABORT() or 2670 * possibly process them. 2671 */ 2672 if (sctp_strict_data_order) { 2673 struct mbuf *op_err; 2674 2675 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2676 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err); 2677 return (2); 2678 } 2679 break; 2680 default: 2681 /* unknown chunk type, use bit rules */ 2682 if (ch->ch.chunk_type & 0x40) { 2683 /* Add a error report to the queue */ 2684 struct mbuf *mm; 2685 struct sctp_paramhdr *phd; 2686 2687 mm = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA); 2688 if (mm) { 2689 phd = mtod(mm, struct sctp_paramhdr *); 2690 /* 2691 * We cheat and use param 2692 * type since we did not 2693 * bother to define a error 2694 * cause struct. They are 2695 * the same basic format 2696 * with different names. 2697 */ 2698 phd->param_type = 2699 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2700 phd->param_length = 2701 htons(chk_length + sizeof(*phd)); 2702 SCTP_BUF_LEN(mm) = sizeof(*phd); 2703 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, 2704 SCTP_SIZE32(chk_length), 2705 M_DONTWAIT); 2706 if (SCTP_BUF_NEXT(mm)) { 2707 sctp_queue_op_err(stcb, mm); 2708 } else { 2709 sctp_m_freem(mm); 2710 } 2711 } 2712 } 2713 if ((ch->ch.chunk_type & 0x80) == 0) { 2714 /* discard the rest of this packet */ 2715 stop_proc = 1; 2716 } /* else skip this bad chunk and 2717 * continue... */ 2718 break; 2719 }; /* switch of chunk type */ 2720 } 2721 *offset += SCTP_SIZE32(chk_length); 2722 if ((*offset >= length) || stop_proc) { 2723 /* no more data left in the mbuf chain */ 2724 stop_proc = 1; 2725 continue; 2726 } 2727 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2728 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2729 if (ch == NULL) { 2730 *offset = length; 2731 stop_proc = 1; 2732 break; 2733 2734 } 2735 } /* while */ 2736 if (break_flag) { 2737 /* 2738 * we need to report rwnd overrun drops. 2739 */ 2740 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2741 } 2742 if (num_chunks) { 2743 /* 2744 * Did we get data, if so update the time for auto-close and 2745 * give peer credit for being alive. 2746 */ 2747 SCTP_STAT_INCR(sctps_recvpktwithdata); 2748 stcb->asoc.overall_error_count = 0; 2749 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2750 } 2751 /* now service all of the reassm queue if needed */ 2752 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2753 sctp_service_queues(stcb, asoc); 2754 2755 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2756 /* 2757 * Assure that we ack right away by making sure that a d-ack 2758 * timer is running. So the sack_check will send a sack. 2759 */ 2760 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, 2761 net); 2762 } 2763 /* Start a sack timer or QUEUE a SACK for sending */ 2764 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2765 (stcb->asoc.first_ack_sent)) { 2766 /* Everything is in order */ 2767 if (stcb->asoc.mapping_array[0] == 0xff) { 2768 /* need to do the slide */ 2769 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2770 } else { 2771 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2772 stcb->asoc.first_ack_sent = 1; 2773 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2774 sctp_send_sack(stcb); 2775 } else { 2776 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2777 stcb->sctp_ep, stcb, NULL); 2778 } 2779 } 2780 } else { 2781 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2782 } 2783 if (abort_flag) 2784 return (2); 2785 2786 return (0); 2787 } 2788 2789 static void 2790 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc, 2791 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2792 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 2793 int num_seg, int *ecn_seg_sums) 2794 { 2795 /************************************************/ 2796 /* process fragments and update sendqueue */ 2797 /************************************************/ 2798 struct sctp_sack *sack; 2799 struct sctp_gap_ack_block *frag; 2800 struct sctp_tmit_chunk *tp1; 2801 int i; 2802 unsigned int j; 2803 2804 #ifdef SCTP_FR_LOGGING 2805 int num_frs = 0; 2806 2807 #endif 2808 uint16_t frag_strt, frag_end, primary_flag_set; 2809 u_long last_frag_high; 2810 2811 /* 2812 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2813 */ 2814 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2815 primary_flag_set = 1; 2816 } else { 2817 primary_flag_set = 0; 2818 } 2819 2820 sack = &ch->sack; 2821 frag = (struct sctp_gap_ack_block *)((caddr_t)sack + 2822 sizeof(struct sctp_sack)); 2823 tp1 = NULL; 2824 last_frag_high = 0; 2825 for (i = 0; i < num_seg; i++) { 2826 frag_strt = ntohs(frag->start); 2827 frag_end = ntohs(frag->end); 2828 /* some sanity checks on the fargment offsets */ 2829 if (frag_strt > frag_end) { 2830 /* this one is malformed, skip */ 2831 frag++; 2832 continue; 2833 } 2834 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2835 MAX_TSN)) 2836 *biggest_tsn_acked = frag_end + last_tsn; 2837 2838 /* mark acked dgs and find out the highestTSN being acked */ 2839 if (tp1 == NULL) { 2840 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2841 2842 /* save the locations of the last frags */ 2843 last_frag_high = frag_end + last_tsn; 2844 } else { 2845 /* 2846 * now lets see if we need to reset the queue due to 2847 * a out-of-order SACK fragment 2848 */ 2849 if (compare_with_wrap(frag_strt + last_tsn, 2850 last_frag_high, MAX_TSN)) { 2851 /* 2852 * if the new frag starts after the last TSN 2853 * frag covered, we are ok and this one is 2854 * beyond the last one 2855 */ 2856 ; 2857 } else { 2858 /* 2859 * ok, they have reset us, so we need to 2860 * reset the queue this will cause extra 2861 * hunting but hey, they chose the 2862 * performance hit when they failed to order 2863 * there gaps.. 2864 */ 2865 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2866 } 2867 last_frag_high = frag_end + last_tsn; 2868 } 2869 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2870 while (tp1) { 2871 #ifdef SCTP_FR_LOGGING 2872 if (tp1->rec.data.doing_fast_retransmit) 2873 num_frs++; 2874 #endif 2875 2876 /* 2877 * CMT: CUCv2 algorithm. For each TSN being 2878 * processed from the sent queue, track the 2879 * next expected pseudo-cumack, or 2880 * rtx_pseudo_cumack, if required. Separate 2881 * cumack trackers for first transmissions, 2882 * and retransmissions. 2883 */ 2884 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2885 (tp1->snd_count == 1)) { 2886 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2887 tp1->whoTo->find_pseudo_cumack = 0; 2888 } 2889 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2890 (tp1->snd_count > 1)) { 2891 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2892 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2893 } 2894 if (tp1->rec.data.TSN_seq == j) { 2895 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2896 /* 2897 * must be held until 2898 * cum-ack passes 2899 */ 2900 /* 2901 * ECN Nonce: Add the nonce 2902 * value to the sender's 2903 * nonce sum 2904 */ 2905 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 2906 /* 2907 * If it is less 2908 * than ACKED, it is 2909 * now no-longer in 2910 * flight. Higher 2911 * values may 2912 * already be set 2913 * via previous Gap 2914 * Ack Blocks... 2915 * i.e. ACKED or 2916 * MARKED. 2917 */ 2918 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2919 *biggest_newly_acked_tsn, MAX_TSN)) { 2920 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2921 } 2922 /* 2923 * CMT: SFR algo 2924 * (and HTNA) - set 2925 * saw_newack to 1 2926 * for dest being 2927 * newly acked. 2928 * update 2929 * this_sack_highest_ 2930 * newack if 2931 * appropriate. 2932 */ 2933 if (tp1->rec.data.chunk_was_revoked == 0) 2934 tp1->whoTo->saw_newack = 1; 2935 2936 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2937 tp1->whoTo->this_sack_highest_newack, 2938 MAX_TSN)) { 2939 tp1->whoTo->this_sack_highest_newack = 2940 tp1->rec.data.TSN_seq; 2941 } 2942 /* 2943 * CMT DAC algo: 2944 * also update 2945 * this_sack_lowest_n 2946 * ewack 2947 */ 2948 if (*this_sack_lowest_newack == 0) { 2949 #ifdef SCTP_SACK_LOGGING 2950 sctp_log_sack(*this_sack_lowest_newack, 2951 last_tsn, 2952 tp1->rec.data.TSN_seq, 2953 0, 2954 0, 2955 SCTP_LOG_TSN_ACKED); 2956 #endif 2957 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2958 } 2959 /* 2960 * CMT: CUCv2 2961 * algorithm. If 2962 * (rtx-)pseudo-cumac 2963 * k for corresp 2964 * dest is being 2965 * acked, then we 2966 * have a new 2967 * (rtx-)pseudo-cumac 2968 * k. Set 2969 * new_(rtx_)pseudo_c 2970 * umack to TRUE so 2971 * that the cwnd for 2972 * this dest can be 2973 * updated. Also 2974 * trigger search 2975 * for the next 2976 * expected 2977 * (rtx-)pseudo-cumac 2978 * k. Separate 2979 * pseudo_cumack 2980 * trackers for 2981 * first 2982 * transmissions and 2983 * retransmissions. 2984 */ 2985 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2986 if (tp1->rec.data.chunk_was_revoked == 0) { 2987 tp1->whoTo->new_pseudo_cumack = 1; 2988 } 2989 tp1->whoTo->find_pseudo_cumack = 1; 2990 } 2991 #ifdef SCTP_CWND_LOGGING 2992 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2993 #endif 2994 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2995 if (tp1->rec.data.chunk_was_revoked == 0) { 2996 tp1->whoTo->new_pseudo_cumack = 1; 2997 } 2998 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2999 } 3000 #ifdef SCTP_SACK_LOGGING 3001 sctp_log_sack(*biggest_newly_acked_tsn, 3002 last_tsn, 3003 tp1->rec.data.TSN_seq, 3004 frag_strt, 3005 frag_end, 3006 SCTP_LOG_TSN_ACKED); 3007 #endif 3008 #ifdef SCTP_FLIGHT_LOGGING 3009 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN, 3010 tp1->whoTo->flight_size, 3011 tp1->book_size, 3012 (uintptr_t) stcb, 3013 tp1->rec.data.TSN_seq); 3014 #endif 3015 if (tp1->whoTo->flight_size >= tp1->book_size) 3016 tp1->whoTo->flight_size -= tp1->book_size; 3017 else 3018 tp1->whoTo->flight_size = 0; 3019 if (asoc->total_flight >= tp1->book_size) { 3020 asoc->total_flight -= tp1->book_size; 3021 if (asoc->total_flight_count > 0) 3022 asoc->total_flight_count--; 3023 } else { 3024 asoc->total_flight = 0; 3025 asoc->total_flight_count = 0; 3026 } 3027 3028 tp1->whoTo->net_ack += tp1->send_size; 3029 3030 if (tp1->snd_count < 2) { 3031 /* 3032 * True 3033 * non-retran 3034 * smited 3035 * chunk */ 3036 tp1->whoTo->net_ack2 += tp1->send_size; 3037 3038 /* 3039 * update RTO 3040 * too ? */ 3041 if (tp1->do_rtt) { 3042 tp1->whoTo->RTO = 3043 sctp_calculate_rto(stcb, 3044 asoc, 3045 tp1->whoTo, 3046 &tp1->sent_rcv_time); 3047 tp1->whoTo->rto_pending = 0; 3048 tp1->do_rtt = 0; 3049 } 3050 } 3051 } 3052 if (tp1->sent <= SCTP_DATAGRAM_RESEND && 3053 tp1->sent != SCTP_DATAGRAM_UNSENT && 3054 compare_with_wrap(tp1->rec.data.TSN_seq, 3055 asoc->this_sack_highest_gap, 3056 MAX_TSN)) { 3057 asoc->this_sack_highest_gap = 3058 tp1->rec.data.TSN_seq; 3059 } 3060 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3061 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3062 #ifdef SCTP_AUDITING_ENABLED 3063 sctp_audit_log(0xB2, 3064 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3065 #endif 3066 3067 } 3068 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3069 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3070 3071 tp1->sent = SCTP_DATAGRAM_MARKED; 3072 } 3073 break; 3074 } /* if (tp1->TSN_seq == j) */ 3075 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3076 MAX_TSN)) 3077 break; 3078 3079 tp1 = TAILQ_NEXT(tp1, sctp_next); 3080 } /* end while (tp1) */ 3081 } /* end for (j = fragStart */ 3082 frag++; /* next one */ 3083 } 3084 #ifdef SCTP_FR_LOGGING 3085 /* 3086 * if (num_frs) sctp_log_fr(*biggest_tsn_acked, 3087 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3088 */ 3089 #endif 3090 } 3091 3092 static void 3093 sctp_check_for_revoked(struct sctp_association *asoc, uint32_t cumack, 3094 u_long biggest_tsn_acked) 3095 { 3096 struct sctp_tmit_chunk *tp1; 3097 int tot_revoked = 0; 3098 3099 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3100 while (tp1) { 3101 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3102 MAX_TSN)) { 3103 /* 3104 * ok this guy is either ACK or MARKED. If it is 3105 * ACKED it has been previously acked but not this 3106 * time i.e. revoked. If it is MARKED it was ACK'ed 3107 * again. 3108 */ 3109 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3110 /* it has been revoked */ 3111 3112 if (sctp_cmt_on_off) { 3113 /* 3114 * If CMT is ON, leave "sent" at 3115 * ACKED. CMT causes reordering of 3116 * data and acks (received on 3117 * different interfaces) can be 3118 * persistently reordered. Acking 3119 * followed by apparent revoking and 3120 * re-acking causes unexpected weird 3121 * behavior. So, at this time, CMT 3122 * does not respect renegs. Renegs 3123 * cannot be recovered. I will fix 3124 * this once I am sure that things 3125 * are working right again with CMT. 3126 */ 3127 } else { 3128 tp1->sent = SCTP_DATAGRAM_SENT; 3129 tp1->rec.data.chunk_was_revoked = 1; 3130 /* 3131 * We must add this stuff back in to 3132 * assure timers and such get 3133 * started. 3134 */ 3135 tp1->whoTo->flight_size += tp1->book_size; 3136 asoc->total_flight_count++; 3137 asoc->total_flight += tp1->book_size; 3138 tot_revoked++; 3139 #ifdef SCTP_SACK_LOGGING 3140 sctp_log_sack(asoc->last_acked_seq, 3141 cumack, 3142 tp1->rec.data.TSN_seq, 3143 0, 3144 0, 3145 SCTP_LOG_TSN_REVOKED); 3146 #endif 3147 } 3148 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3149 /* it has been re-acked in this SACK */ 3150 tp1->sent = SCTP_DATAGRAM_ACKED; 3151 } 3152 } 3153 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3154 break; 3155 tp1 = TAILQ_NEXT(tp1, sctp_next); 3156 } 3157 if (tot_revoked > 0) { 3158 /* 3159 * Setup the ecn nonce re-sync point. We do this since once 3160 * data is revoked we begin to retransmit things, which do 3161 * NOT have the ECN bits set. This means we are now out of 3162 * sync and must wait until we get back in sync with the 3163 * peer to check ECN bits. 3164 */ 3165 tp1 = TAILQ_FIRST(&asoc->send_queue); 3166 if (tp1 == NULL) { 3167 asoc->nonce_resync_tsn = asoc->sending_seq; 3168 } else { 3169 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3170 } 3171 asoc->nonce_wait_for_ecne = 0; 3172 asoc->nonce_sum_check = 0; 3173 } 3174 } 3175 3176 extern int sctp_peer_chunk_oh; 3177 3178 static void 3179 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3180 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3181 { 3182 struct sctp_tmit_chunk *tp1; 3183 int strike_flag = 0; 3184 struct timeval now; 3185 int tot_retrans = 0; 3186 uint32_t sending_seq; 3187 struct sctp_nets *net; 3188 int num_dests_sacked = 0; 3189 3190 /* 3191 * select the sending_seq, this is either the next thing ready to be 3192 * sent but not transmitted, OR, the next seq we assign. 3193 */ 3194 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3195 if (tp1 == NULL) { 3196 sending_seq = asoc->sending_seq; 3197 } else { 3198 sending_seq = tp1->rec.data.TSN_seq; 3199 } 3200 3201 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3202 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3203 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3204 if (net->saw_newack) 3205 num_dests_sacked++; 3206 } 3207 } 3208 if (stcb->asoc.peer_supports_prsctp) { 3209 SCTP_GETTIME_TIMEVAL(&now); 3210 } 3211 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3212 while (tp1) { 3213 strike_flag = 0; 3214 if (tp1->no_fr_allowed) { 3215 /* this one had a timeout or something */ 3216 tp1 = TAILQ_NEXT(tp1, sctp_next); 3217 continue; 3218 } 3219 #ifdef SCTP_FR_LOGGING 3220 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3221 sctp_log_fr(biggest_tsn_newly_acked, 3222 tp1->rec.data.TSN_seq, 3223 tp1->sent, 3224 SCTP_FR_LOG_CHECK_STRIKE); 3225 #endif 3226 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3227 MAX_TSN) || 3228 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3229 /* done */ 3230 break; 3231 } 3232 if (stcb->asoc.peer_supports_prsctp) { 3233 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3234 /* Is it expired? */ 3235 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3236 /* Yes so drop it */ 3237 if (tp1->data != NULL) { 3238 sctp_release_pr_sctp_chunk(stcb, tp1, 3239 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3240 &asoc->sent_queue); 3241 } 3242 tp1 = TAILQ_NEXT(tp1, sctp_next); 3243 continue; 3244 } 3245 } 3246 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3247 /* Has it been retransmitted tv_sec times? */ 3248 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3249 /* Yes, so drop it */ 3250 if (tp1->data != NULL) { 3251 sctp_release_pr_sctp_chunk(stcb, tp1, 3252 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3253 &asoc->sent_queue); 3254 } 3255 tp1 = TAILQ_NEXT(tp1, sctp_next); 3256 continue; 3257 } 3258 } 3259 } 3260 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3261 asoc->this_sack_highest_gap, MAX_TSN)) { 3262 /* we are beyond the tsn in the sack */ 3263 break; 3264 } 3265 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3266 /* either a RESEND, ACKED, or MARKED */ 3267 /* skip */ 3268 tp1 = TAILQ_NEXT(tp1, sctp_next); 3269 continue; 3270 } 3271 /* 3272 * CMT : SFR algo (covers part of DAC and HTNA as well) 3273 */ 3274 if (tp1->whoTo->saw_newack == 0) { 3275 /* 3276 * No new acks were receieved for data sent to this 3277 * dest. Therefore, according to the SFR algo for 3278 * CMT, no data sent to this dest can be marked for 3279 * FR using this SACK. (iyengar@cis.udel.edu, 3280 * 2005/05/12) 3281 */ 3282 tp1 = TAILQ_NEXT(tp1, sctp_next); 3283 continue; 3284 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3285 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3286 /* 3287 * CMT: New acks were receieved for data sent to 3288 * this dest. But no new acks were seen for data 3289 * sent after tp1. Therefore, according to the SFR 3290 * algo for CMT, tp1 cannot be marked for FR using 3291 * this SACK. This step covers part of the DAC algo 3292 * and the HTNA algo as well. 3293 */ 3294 tp1 = TAILQ_NEXT(tp1, sctp_next); 3295 continue; 3296 } 3297 /* 3298 * Here we check to see if we were have already done a FR 3299 * and if so we see if the biggest TSN we saw in the sack is 3300 * smaller than the recovery point. If so we don't strike 3301 * the tsn... otherwise we CAN strike the TSN. 3302 */ 3303 /* 3304 * @@@ JRI: Check for CMT 3305 */ 3306 if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { 3307 /* 3308 * Strike the TSN if in fast-recovery and cum-ack 3309 * moved. 3310 */ 3311 #ifdef SCTP_FR_LOGGING 3312 sctp_log_fr(biggest_tsn_newly_acked, 3313 tp1->rec.data.TSN_seq, 3314 tp1->sent, 3315 SCTP_FR_LOG_STRIKE_CHUNK); 3316 #endif 3317 tp1->sent++; 3318 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3319 /* 3320 * CMT DAC algorithm: If SACK flag is set to 3321 * 0, then lowest_newack test will not pass 3322 * because it would have been set to the 3323 * cumack earlier. If not already to be 3324 * rtx'd, If not a mixed sack and if tp1 is 3325 * not between two sacked TSNs, then mark by 3326 * one more. 3327 */ 3328 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3329 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3330 #ifdef SCTP_FR_LOGGING 3331 sctp_log_fr(16 + num_dests_sacked, 3332 tp1->rec.data.TSN_seq, 3333 tp1->sent, 3334 SCTP_FR_LOG_STRIKE_CHUNK); 3335 #endif 3336 tp1->sent++; 3337 } 3338 } 3339 } else if (tp1->rec.data.doing_fast_retransmit) { 3340 /* 3341 * For those that have done a FR we must take 3342 * special consideration if we strike. I.e the 3343 * biggest_newly_acked must be higher than the 3344 * sending_seq at the time we did the FR. 3345 */ 3346 #ifdef SCTP_FR_TO_ALTERNATE 3347 /* 3348 * If FR's go to new networks, then we must only do 3349 * this for singly homed asoc's. However if the FR's 3350 * go to the same network (Armando's work) then its 3351 * ok to FR multiple times. 3352 */ 3353 if (asoc->numnets < 2) 3354 #else 3355 if (1) 3356 #endif 3357 { 3358 if ((compare_with_wrap(biggest_tsn_newly_acked, 3359 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3360 (biggest_tsn_newly_acked == 3361 tp1->rec.data.fast_retran_tsn)) { 3362 /* 3363 * Strike the TSN, since this ack is 3364 * beyond where things were when we 3365 * did a FR. 3366 */ 3367 #ifdef SCTP_FR_LOGGING 3368 sctp_log_fr(biggest_tsn_newly_acked, 3369 tp1->rec.data.TSN_seq, 3370 tp1->sent, 3371 SCTP_FR_LOG_STRIKE_CHUNK); 3372 #endif 3373 tp1->sent++; 3374 strike_flag = 1; 3375 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3376 /* 3377 * CMT DAC algorithm: If 3378 * SACK flag is set to 0, 3379 * then lowest_newack test 3380 * will not pass because it 3381 * would have been set to 3382 * the cumack earlier. If 3383 * not already to be rtx'd, 3384 * If not a mixed sack and 3385 * if tp1 is not between two 3386 * sacked TSNs, then mark by 3387 * one more. 3388 */ 3389 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3390 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3391 #ifdef SCTP_FR_LOGGING 3392 sctp_log_fr(32 + num_dests_sacked, 3393 tp1->rec.data.TSN_seq, 3394 tp1->sent, 3395 SCTP_FR_LOG_STRIKE_CHUNK); 3396 #endif 3397 tp1->sent++; 3398 } 3399 } 3400 } 3401 } 3402 /* 3403 * @@@ JRI: TODO: remove code for HTNA algo. CMT's 3404 * SFR algo covers HTNA. 3405 */ 3406 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3407 biggest_tsn_newly_acked, MAX_TSN)) { 3408 /* 3409 * We don't strike these: This is the HTNA 3410 * algorithm i.e. we don't strike If our TSN is 3411 * larger than the Highest TSN Newly Acked. 3412 */ 3413 ; 3414 } else { 3415 /* Strike the TSN */ 3416 #ifdef SCTP_FR_LOGGING 3417 sctp_log_fr(biggest_tsn_newly_acked, 3418 tp1->rec.data.TSN_seq, 3419 tp1->sent, 3420 SCTP_FR_LOG_STRIKE_CHUNK); 3421 #endif 3422 tp1->sent++; 3423 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3424 /* 3425 * CMT DAC algorithm: If SACK flag is set to 3426 * 0, then lowest_newack test will not pass 3427 * because it would have been set to the 3428 * cumack earlier. If not already to be 3429 * rtx'd, If not a mixed sack and if tp1 is 3430 * not between two sacked TSNs, then mark by 3431 * one more. 3432 */ 3433 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3434 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3435 #ifdef SCTP_FR_LOGGING 3436 sctp_log_fr(48 + num_dests_sacked, 3437 tp1->rec.data.TSN_seq, 3438 tp1->sent, 3439 SCTP_FR_LOG_STRIKE_CHUNK); 3440 #endif 3441 tp1->sent++; 3442 } 3443 } 3444 } 3445 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3446 /* Increment the count to resend */ 3447 struct sctp_nets *alt; 3448 3449 /* printf("OK, we are now ready to FR this guy\n"); */ 3450 #ifdef SCTP_FR_LOGGING 3451 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3452 0, SCTP_FR_MARKED); 3453 #endif 3454 if (strike_flag) { 3455 /* This is a subsequent FR */ 3456 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3457 } 3458 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3459 3460 if (sctp_cmt_on_off) { 3461 /* 3462 * CMT: Using RTX_SSTHRESH policy for CMT. 3463 * If CMT is being used, then pick dest with 3464 * largest ssthresh for any retransmission. 3465 * (iyengar@cis.udel.edu, 2005/08/12) 3466 */ 3467 tp1->no_fr_allowed = 1; 3468 alt = tp1->whoTo; 3469 alt = sctp_find_alternate_net(stcb, alt, 1); 3470 /* 3471 * CUCv2: If a different dest is picked for 3472 * the retransmission, then new 3473 * (rtx-)pseudo_cumack needs to be tracked 3474 * for orig dest. Let CUCv2 track new (rtx-) 3475 * pseudo-cumack always. 3476 */ 3477 tp1->whoTo->find_pseudo_cumack = 1; 3478 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3479 3480 3481 } else {/* CMT is OFF */ 3482 3483 #ifdef SCTP_FR_TO_ALTERNATE 3484 /* Can we find an alternate? */ 3485 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3486 #else 3487 /* 3488 * default behavior is to NOT retransmit 3489 * FR's to an alternate. Armando Caro's 3490 * paper details why. 3491 */ 3492 alt = tp1->whoTo; 3493 #endif 3494 } 3495 3496 tp1->rec.data.doing_fast_retransmit = 1; 3497 tot_retrans++; 3498 /* mark the sending seq for possible subsequent FR's */ 3499 /* 3500 * printf("Marking TSN for FR new value %x\n", 3501 * (uint32_t)tpi->rec.data.TSN_seq); 3502 */ 3503 if (TAILQ_EMPTY(&asoc->send_queue)) { 3504 /* 3505 * If the queue of send is empty then its 3506 * the next sequence number that will be 3507 * assigned so we subtract one from this to 3508 * get the one we last sent. 3509 */ 3510 tp1->rec.data.fast_retran_tsn = sending_seq; 3511 } else { 3512 /* 3513 * If there are chunks on the send queue 3514 * (unsent data that has made it from the 3515 * stream queues but not out the door, we 3516 * take the first one (which will have the 3517 * lowest TSN) and subtract one to get the 3518 * one we last sent. 3519 */ 3520 struct sctp_tmit_chunk *ttt; 3521 3522 ttt = TAILQ_FIRST(&asoc->send_queue); 3523 tp1->rec.data.fast_retran_tsn = 3524 ttt->rec.data.TSN_seq; 3525 } 3526 3527 if (tp1->do_rtt) { 3528 /* 3529 * this guy had a RTO calculation pending on 3530 * it, cancel it 3531 */ 3532 tp1->whoTo->rto_pending = 0; 3533 tp1->do_rtt = 0; 3534 } 3535 /* fix counts and things */ 3536 #ifdef SCTP_FLIGHT_LOGGING 3537 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN, 3538 tp1->whoTo->flight_size, 3539 tp1->book_size, 3540 (uintptr_t) stcb, 3541 tp1->rec.data.TSN_seq); 3542 #endif 3543 tp1->whoTo->net_ack++; 3544 if (tp1->whoTo->flight_size >= tp1->book_size) 3545 tp1->whoTo->flight_size -= tp1->book_size; 3546 else 3547 tp1->whoTo->flight_size = 0; 3548 3549 #ifdef SCTP_LOG_RWND 3550 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3551 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh); 3552 #endif 3553 /* add back to the rwnd */ 3554 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3555 3556 /* remove from the total flight */ 3557 if (asoc->total_flight >= tp1->book_size) { 3558 asoc->total_flight -= tp1->book_size; 3559 if (asoc->total_flight_count > 0) 3560 asoc->total_flight_count--; 3561 } else { 3562 asoc->total_flight = 0; 3563 asoc->total_flight_count = 0; 3564 } 3565 3566 3567 if (alt != tp1->whoTo) { 3568 /* yes, there is an alternate. */ 3569 sctp_free_remote_addr(tp1->whoTo); 3570 tp1->whoTo = alt; 3571 atomic_add_int(&alt->ref_count, 1); 3572 } 3573 } 3574 tp1 = TAILQ_NEXT(tp1, sctp_next); 3575 } /* while (tp1) */ 3576 3577 if (tot_retrans > 0) { 3578 /* 3579 * Setup the ecn nonce re-sync point. We do this since once 3580 * we go to FR something we introduce a Karn's rule scenario 3581 * and won't know the totals for the ECN bits. 3582 */ 3583 asoc->nonce_resync_tsn = sending_seq; 3584 asoc->nonce_wait_for_ecne = 0; 3585 asoc->nonce_sum_check = 0; 3586 } 3587 } 3588 3589 struct sctp_tmit_chunk * 3590 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3591 struct sctp_association *asoc) 3592 { 3593 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3594 struct timeval now; 3595 int now_filled = 0; 3596 3597 if (asoc->peer_supports_prsctp == 0) { 3598 return (NULL); 3599 } 3600 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3601 while (tp1) { 3602 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3603 tp1->sent != SCTP_DATAGRAM_RESEND) { 3604 /* no chance to advance, out of here */ 3605 break; 3606 } 3607 if (!PR_SCTP_ENABLED(tp1->flags)) { 3608 /* 3609 * We can't fwd-tsn past any that are reliable aka 3610 * retransmitted until the asoc fails. 3611 */ 3612 break; 3613 } 3614 if (!now_filled) { 3615 SCTP_GETTIME_TIMEVAL(&now); 3616 now_filled = 1; 3617 } 3618 tp2 = TAILQ_NEXT(tp1, sctp_next); 3619 /* 3620 * now we got a chunk which is marked for another 3621 * retransmission to a PR-stream but has run out its chances 3622 * already maybe OR has been marked to skip now. Can we skip 3623 * it if its a resend? 3624 */ 3625 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3626 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3627 /* 3628 * Now is this one marked for resend and its time is 3629 * now up? 3630 */ 3631 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3632 /* Yes so drop it */ 3633 if (tp1->data) { 3634 sctp_release_pr_sctp_chunk(stcb, tp1, 3635 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3636 &asoc->sent_queue); 3637 } 3638 } else { 3639 /* 3640 * No, we are done when hit one for resend 3641 * whos time as not expired. 3642 */ 3643 break; 3644 } 3645 } 3646 /* 3647 * Ok now if this chunk is marked to drop it we can clean up 3648 * the chunk, advance our peer ack point and we can check 3649 * the next chunk. 3650 */ 3651 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3652 /* advance PeerAckPoint goes forward */ 3653 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3654 a_adv = tp1; 3655 /* 3656 * we don't want to de-queue it here. Just wait for 3657 * the next peer SACK to come with a new cumTSN and 3658 * then the chunk will be droped in the normal 3659 * fashion. 3660 */ 3661 if (tp1->data) { 3662 sctp_free_bufspace(stcb, asoc, tp1, 1); 3663 /* 3664 * Maybe there should be another 3665 * notification type 3666 */ 3667 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3668 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3669 tp1); 3670 sctp_m_freem(tp1->data); 3671 tp1->data = NULL; 3672 if (stcb->sctp_socket) { 3673 sctp_sowwakeup(stcb->sctp_ep, 3674 stcb->sctp_socket); 3675 #ifdef SCTP_WAKE_LOGGING 3676 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3677 #endif 3678 } 3679 } 3680 } else { 3681 /* 3682 * If it is still in RESEND we can advance no 3683 * further 3684 */ 3685 break; 3686 } 3687 /* 3688 * If we hit here we just dumped tp1, move to next tsn on 3689 * sent queue. 3690 */ 3691 tp1 = tp2; 3692 } 3693 return (a_adv); 3694 } 3695 3696 #ifdef SCTP_HIGH_SPEED 3697 struct sctp_hs_raise_drop { 3698 int32_t cwnd; 3699 int32_t increase; 3700 int32_t drop_percent; 3701 }; 3702 3703 #define SCTP_HS_TABLE_SIZE 73 3704 3705 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 3706 {38, 1, 50}, /* 0 */ 3707 {118, 2, 44}, /* 1 */ 3708 {221, 3, 41}, /* 2 */ 3709 {347, 4, 38}, /* 3 */ 3710 {495, 5, 37}, /* 4 */ 3711 {663, 6, 35}, /* 5 */ 3712 {851, 7, 34}, /* 6 */ 3713 {1058, 8, 33}, /* 7 */ 3714 {1284, 9, 32}, /* 8 */ 3715 {1529, 10, 31}, /* 9 */ 3716 {1793, 11, 30}, /* 10 */ 3717 {2076, 12, 29}, /* 11 */ 3718 {2378, 13, 28}, /* 12 */ 3719 {2699, 14, 28}, /* 13 */ 3720 {3039, 15, 27}, /* 14 */ 3721 {3399, 16, 27}, /* 15 */ 3722 {3778, 17, 26}, /* 16 */ 3723 {4177, 18, 26}, /* 17 */ 3724 {4596, 19, 25}, /* 18 */ 3725 {5036, 20, 25}, /* 19 */ 3726 {5497, 21, 24}, /* 20 */ 3727 {5979, 22, 24}, /* 21 */ 3728 {6483, 23, 23}, /* 22 */ 3729 {7009, 24, 23}, /* 23 */ 3730 {7558, 25, 22}, /* 24 */ 3731 {8130, 26, 22}, /* 25 */ 3732 {8726, 27, 22}, /* 26 */ 3733 {9346, 28, 21}, /* 27 */ 3734 {9991, 29, 21}, /* 28 */ 3735 {10661, 30, 21}, /* 29 */ 3736 {11358, 31, 20}, /* 30 */ 3737 {12082, 32, 20}, /* 31 */ 3738 {12834, 33, 20}, /* 32 */ 3739 {13614, 34, 19}, /* 33 */ 3740 {14424, 35, 19}, /* 34 */ 3741 {15265, 36, 19}, /* 35 */ 3742 {16137, 37, 19}, /* 36 */ 3743 {17042, 38, 18}, /* 37 */ 3744 {17981, 39, 18}, /* 38 */ 3745 {18955, 40, 18}, /* 39 */ 3746 {19965, 41, 17}, /* 40 */ 3747 {21013, 42, 17}, /* 41 */ 3748 {22101, 43, 17}, /* 42 */ 3749 {23230, 44, 17}, /* 43 */ 3750 {24402, 45, 16}, /* 44 */ 3751 {25618, 46, 16}, /* 45 */ 3752 {26881, 47, 16}, /* 46 */ 3753 {28193, 48, 16}, /* 47 */ 3754 {29557, 49, 15}, /* 48 */ 3755 {30975, 50, 15}, /* 49 */ 3756 {32450, 51, 15}, /* 50 */ 3757 {33986, 52, 15}, /* 51 */ 3758 {35586, 53, 14}, /* 52 */ 3759 {37253, 54, 14}, /* 53 */ 3760 {38992, 55, 14}, /* 54 */ 3761 {40808, 56, 14}, /* 55 */ 3762 {42707, 57, 13}, /* 56 */ 3763 {44694, 58, 13}, /* 57 */ 3764 {46776, 59, 13}, /* 58 */ 3765 {48961, 60, 13}, /* 59 */ 3766 {51258, 61, 13}, /* 60 */ 3767 {53677, 62, 12}, /* 61 */ 3768 {56230, 63, 12}, /* 62 */ 3769 {58932, 64, 12}, /* 63 */ 3770 {61799, 65, 12}, /* 64 */ 3771 {64851, 66, 11}, /* 65 */ 3772 {68113, 67, 11}, /* 66 */ 3773 {71617, 68, 11}, /* 67 */ 3774 {75401, 69, 10}, /* 68 */ 3775 {79517, 70, 10}, /* 69 */ 3776 {84035, 71, 10}, /* 70 */ 3777 {89053, 72, 10}, /* 71 */ 3778 {94717, 73, 9} /* 72 */ 3779 }; 3780 3781 static void 3782 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 3783 { 3784 int cur_val, i, indx, incr; 3785 3786 cur_val = net->cwnd >> 10; 3787 indx = SCTP_HS_TABLE_SIZE - 1; 3788 3789 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3790 /* normal mode */ 3791 if (net->net_ack > net->mtu) { 3792 net->cwnd += net->mtu; 3793 #ifdef SCTP_CWND_MONITOR 3794 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 3795 #endif 3796 } else { 3797 net->cwnd += net->net_ack; 3798 #ifdef SCTP_CWND_MONITOR 3799 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 3800 #endif 3801 } 3802 } else { 3803 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 3804 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 3805 indx = i; 3806 break; 3807 } 3808 } 3809 net->last_hs_used = indx; 3810 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 3811 net->cwnd += incr; 3812 #ifdef SCTP_CWND_MONITOR 3813 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 3814 #endif 3815 } 3816 } 3817 3818 static void 3819 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 3820 { 3821 int cur_val, i, indx; 3822 3823 #ifdef SCTP_CWND_MONITOR 3824 int old_cwnd = net->cwnd; 3825 3826 #endif 3827 3828 cur_val = net->cwnd >> 10; 3829 indx = net->last_hs_used; 3830 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3831 /* normal mode */ 3832 net->ssthresh = net->cwnd / 2; 3833 if (net->ssthresh < (net->mtu * 2)) { 3834 net->ssthresh = 2 * net->mtu; 3835 } 3836 net->cwnd = net->ssthresh; 3837 } else { 3838 /* drop by the proper amount */ 3839 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 3840 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 3841 net->cwnd = net->ssthresh; 3842 /* now where are we */ 3843 indx = net->last_hs_used; 3844 cur_val = net->cwnd >> 10; 3845 /* reset where we are in the table */ 3846 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3847 /* feel out of hs */ 3848 net->last_hs_used = 0; 3849 } else { 3850 for (i = indx; i >= 1; i--) { 3851 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 3852 break; 3853 } 3854 } 3855 net->last_hs_used = indx; 3856 } 3857 } 3858 #ifdef SCTP_CWND_MONITOR 3859 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 3860 #endif 3861 3862 } 3863 3864 #endif 3865 3866 extern int sctp_early_fr; 3867 extern int sctp_L2_abc_variable; 3868 3869 3870 static __inline void 3871 sctp_cwnd_update(struct sctp_tcb *stcb, 3872 struct sctp_association *asoc, 3873 int accum_moved, int reneged_all, int will_exit) 3874 { 3875 struct sctp_nets *net; 3876 3877 /******************************/ 3878 /* update cwnd and Early FR */ 3879 /******************************/ 3880 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3881 #ifdef JANA_CODE_WHY_THIS 3882 /* 3883 * CMT fast recovery code. Need to debug. 3884 */ 3885 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 3886 if (compare_with_wrap(asoc->last_acked_seq, 3887 net->fast_recovery_tsn, MAX_TSN) || 3888 (asoc->last_acked_seq == net->fast_recovery_tsn) || 3889 compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) || 3890 (net->pseudo_cumack == net->fast_recovery_tsn)) { 3891 net->will_exit_fast_recovery = 1; 3892 } 3893 } 3894 #endif 3895 if (sctp_early_fr) { 3896 /* 3897 * So, first of all do we need to have a Early FR 3898 * timer running? 3899 */ 3900 if (((TAILQ_FIRST(&asoc->sent_queue)) && 3901 (net->ref_count > 1) && 3902 (net->flight_size < net->cwnd)) || 3903 (reneged_all)) { 3904 /* 3905 * yes, so in this case stop it if its 3906 * running, and then restart it. Reneging 3907 * all is a special case where we want to 3908 * run the Early FR timer and then force the 3909 * last few unacked to be sent, causing us 3910 * to illicit a sack with gaps to force out 3911 * the others. 3912 */ 3913 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 3914 SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 3915 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 3916 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 3917 } 3918 SCTP_STAT_INCR(sctps_earlyfrstrid); 3919 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3920 } else { 3921 /* No, stop it if its running */ 3922 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 3923 SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 3924 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 3925 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 3926 } 3927 } 3928 } 3929 /* if nothing was acked on this destination skip it */ 3930 if (net->net_ack == 0) { 3931 #ifdef SCTP_CWND_LOGGING 3932 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 3933 #endif 3934 continue; 3935 } 3936 if (net->net_ack2 > 0) { 3937 /* 3938 * Karn's rule applies to clearing error count, this 3939 * is optional. 3940 */ 3941 net->error_count = 0; 3942 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 3943 SCTP_ADDR_NOT_REACHABLE) { 3944 /* addr came good */ 3945 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 3946 net->dest_state |= SCTP_ADDR_REACHABLE; 3947 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 3948 SCTP_RECEIVED_SACK, (void *)net); 3949 /* now was it the primary? if so restore */ 3950 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 3951 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 3952 } 3953 } 3954 } 3955 #ifdef JANA_CODE_WHY_THIS 3956 /* 3957 * Cannot skip for CMT. Need to come back and check these 3958 * variables for CMT. CMT fast recovery code. Need to debug. 3959 */ 3960 if (sctp_cmt_on_off == 1 && 3961 net->fast_retran_loss_recovery && 3962 net->will_exit_fast_recovery == 0) 3963 #endif 3964 if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) { 3965 /* 3966 * If we are in loss recovery we skip any 3967 * cwnd update 3968 */ 3969 goto skip_cwnd_update; 3970 } 3971 /* 3972 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 3973 * moved. 3974 */ 3975 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) { 3976 /* If the cumulative ack moved we can proceed */ 3977 if (net->cwnd <= net->ssthresh) { 3978 /* We are in slow start */ 3979 if (net->flight_size + net->net_ack >= 3980 net->cwnd) { 3981 #ifdef SCTP_HIGH_SPEED 3982 sctp_hs_cwnd_increase(stcb, net); 3983 #else 3984 if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) { 3985 net->cwnd += (net->mtu * sctp_L2_abc_variable); 3986 #ifdef SCTP_CWND_MONITOR 3987 sctp_log_cwnd(stcb, net, net->mtu, 3988 SCTP_CWND_LOG_FROM_SS); 3989 #endif 3990 3991 } else { 3992 net->cwnd += net->net_ack; 3993 #ifdef SCTP_CWND_MONITOR 3994 sctp_log_cwnd(stcb, net, net->net_ack, 3995 SCTP_CWND_LOG_FROM_SS); 3996 #endif 3997 3998 } 3999 #endif 4000 } else { 4001 unsigned int dif; 4002 4003 dif = net->cwnd - (net->flight_size + 4004 net->net_ack); 4005 #ifdef SCTP_CWND_LOGGING 4006 sctp_log_cwnd(stcb, net, net->net_ack, 4007 SCTP_CWND_LOG_NOADV_SS); 4008 #endif 4009 } 4010 } else { 4011 /* We are in congestion avoidance */ 4012 if (net->flight_size + net->net_ack >= 4013 net->cwnd) { 4014 /* 4015 * add to pba only if we had a 4016 * cwnd's worth (or so) in flight OR 4017 * the burst limit was applied. 4018 */ 4019 net->partial_bytes_acked += 4020 net->net_ack; 4021 4022 /* 4023 * Do we need to increase (if pba is 4024 * > cwnd)? 4025 */ 4026 if (net->partial_bytes_acked >= 4027 net->cwnd) { 4028 if (net->cwnd < 4029 net->partial_bytes_acked) { 4030 net->partial_bytes_acked -= 4031 net->cwnd; 4032 } else { 4033 net->partial_bytes_acked = 4034 0; 4035 } 4036 net->cwnd += net->mtu; 4037 #ifdef SCTP_CWND_MONITOR 4038 sctp_log_cwnd(stcb, net, net->mtu, 4039 SCTP_CWND_LOG_FROM_CA); 4040 #endif 4041 } 4042 #ifdef SCTP_CWND_LOGGING 4043 else { 4044 sctp_log_cwnd(stcb, net, net->net_ack, 4045 SCTP_CWND_LOG_NOADV_CA); 4046 } 4047 #endif 4048 } else { 4049 unsigned int dif; 4050 4051 #ifdef SCTP_CWND_LOGGING 4052 sctp_log_cwnd(stcb, net, net->net_ack, 4053 SCTP_CWND_LOG_NOADV_CA); 4054 #endif 4055 dif = net->cwnd - (net->flight_size + 4056 net->net_ack); 4057 } 4058 } 4059 } else { 4060 #ifdef SCTP_CWND_LOGGING 4061 sctp_log_cwnd(stcb, net, net->mtu, 4062 SCTP_CWND_LOG_NO_CUMACK); 4063 #endif 4064 } 4065 skip_cwnd_update: 4066 /* 4067 * NOW, according to Karn's rule do we need to restore the 4068 * RTO timer back? Check our net_ack2. If not set then we 4069 * have a ambiguity.. i.e. all data ack'd was sent to more 4070 * than one place. 4071 */ 4072 if (net->net_ack2) { 4073 /* restore any doubled timers */ 4074 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 4075 if (net->RTO < stcb->asoc.minrto) { 4076 net->RTO = stcb->asoc.minrto; 4077 } 4078 if (net->RTO > stcb->asoc.maxrto) { 4079 net->RTO = stcb->asoc.maxrto; 4080 } 4081 } 4082 } 4083 } 4084 4085 4086 void 4087 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 4088 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 4089 { 4090 struct sctp_nets *net; 4091 struct sctp_association *asoc; 4092 struct sctp_tmit_chunk *tp1, *tp2; 4093 int j; 4094 4095 SCTP_TCB_LOCK_ASSERT(stcb); 4096 asoc = &stcb->asoc; 4097 /* First setup for CC stuff */ 4098 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4099 net->prev_cwnd = net->cwnd; 4100 net->net_ack = 0; 4101 net->net_ack2 = 0; 4102 } 4103 if (sctp_strict_sacks) { 4104 uint32_t send_s; 4105 4106 if (TAILQ_EMPTY(&asoc->send_queue)) { 4107 send_s = asoc->sending_seq; 4108 } else { 4109 tp1 = TAILQ_FIRST(&asoc->send_queue); 4110 send_s = tp1->rec.data.TSN_seq; 4111 } 4112 if ((cumack == send_s) || 4113 compare_with_wrap(cumack, send_s, MAX_TSN)) { 4114 #ifdef INVARIANTS /* for testing only */ 4115 panic("Impossible sack 1"); 4116 #else 4117 struct mbuf *oper; 4118 4119 *abort_now = 1; 4120 /* XXX */ 4121 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4122 0, M_DONTWAIT, 1, MT_DATA); 4123 if (oper) { 4124 struct sctp_paramhdr *ph; 4125 uint32_t *ippp; 4126 4127 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4128 sizeof(uint32_t); 4129 ph = mtod(oper, struct sctp_paramhdr *); 4130 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4131 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4132 ippp = (uint32_t *) (ph + 1); 4133 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4134 } 4135 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4136 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4137 return; 4138 #endif 4139 } 4140 } 4141 asoc->this_sack_highest_gap = cumack; 4142 stcb->asoc.overall_error_count = 0; 4143 /* process the new consecutive TSN first */ 4144 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4145 while (tp1) { 4146 tp2 = TAILQ_NEXT(tp1, sctp_next); 4147 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 4148 MAX_TSN) || 4149 cumack == tp1->rec.data.TSN_seq) { 4150 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4151 /* 4152 * ECN Nonce: Add the nonce to the sender's 4153 * nonce sum 4154 */ 4155 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4156 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4157 /* 4158 * If it is less than ACKED, it is 4159 * now no-longer in flight. Higher 4160 * values may occur during marking 4161 */ 4162 #ifdef SCTP_FLIGHT_LOGGING 4163 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN, 4164 tp1->whoTo->flight_size, 4165 tp1->book_size, 4166 (uintptr_t) stcb, 4167 tp1->rec.data.TSN_seq); 4168 #endif 4169 4170 if (tp1->whoTo->flight_size >= tp1->book_size) { 4171 tp1->whoTo->flight_size -= tp1->book_size; 4172 } else { 4173 tp1->whoTo->flight_size = 0; 4174 } 4175 if (asoc->total_flight >= tp1->book_size) { 4176 asoc->total_flight -= tp1->book_size; 4177 if (asoc->total_flight_count > 0) 4178 asoc->total_flight_count--; 4179 } else { 4180 asoc->total_flight = 0; 4181 asoc->total_flight_count = 0; 4182 } 4183 tp1->whoTo->net_ack += tp1->send_size; 4184 if (tp1->snd_count < 2) { 4185 /* 4186 * True non-retransmited 4187 * chunk 4188 */ 4189 tp1->whoTo->net_ack2 += 4190 tp1->send_size; 4191 4192 /* update RTO too? */ 4193 if ((tp1->do_rtt) && (tp1->whoTo->rto_pending)) { 4194 tp1->whoTo->RTO = 4195 sctp_calculate_rto(stcb, 4196 asoc, tp1->whoTo, 4197 &tp1->sent_rcv_time); 4198 tp1->whoTo->rto_pending = 0; 4199 tp1->do_rtt = 0; 4200 } 4201 } 4202 #ifdef SCTP_CWND_LOGGING 4203 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4204 #endif 4205 } 4206 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4207 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4208 } 4209 tp1->sent = SCTP_DATAGRAM_ACKED; 4210 } 4211 } else { 4212 break; 4213 } 4214 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4215 if (tp1->data) { 4216 sctp_free_bufspace(stcb, asoc, tp1, 1); 4217 sctp_m_freem(tp1->data); 4218 } 4219 #ifdef SCTP_SACK_LOGGING 4220 sctp_log_sack(asoc->last_acked_seq, 4221 cumack, 4222 tp1->rec.data.TSN_seq, 4223 0, 4224 0, 4225 SCTP_LOG_FREE_SENT); 4226 #endif 4227 tp1->data = NULL; 4228 asoc->sent_queue_cnt--; 4229 sctp_free_remote_addr(tp1->whoTo); 4230 sctp_free_a_chunk(stcb, tp1); 4231 tp1 = tp2; 4232 } 4233 if (stcb->sctp_socket) { 4234 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4235 #ifdef SCTP_WAKE_LOGGING 4236 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 4237 #endif 4238 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4239 #ifdef SCTP_WAKE_LOGGING 4240 } else { 4241 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 4242 #endif 4243 } 4244 4245 if (asoc->last_acked_seq != cumack) 4246 sctp_cwnd_update(stcb, asoc, 1, 0, 0); 4247 asoc->last_acked_seq = cumack; 4248 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4249 /* nothing left in-flight */ 4250 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4251 net->flight_size = 0; 4252 net->partial_bytes_acked = 0; 4253 } 4254 asoc->total_flight = 0; 4255 asoc->total_flight_count = 0; 4256 } 4257 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4258 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4259 asoc->advanced_peer_ack_point = cumack; 4260 } 4261 /* ECN Nonce updates */ 4262 if (asoc->ecn_nonce_allowed) { 4263 if (asoc->nonce_sum_check) { 4264 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4265 if (asoc->nonce_wait_for_ecne == 0) { 4266 struct sctp_tmit_chunk *lchk; 4267 4268 lchk = TAILQ_FIRST(&asoc->send_queue); 4269 asoc->nonce_wait_for_ecne = 1; 4270 if (lchk) { 4271 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4272 } else { 4273 asoc->nonce_wait_tsn = asoc->sending_seq; 4274 } 4275 } else { 4276 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4277 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4278 /* 4279 * Misbehaving peer. We need 4280 * to react to this guy 4281 */ 4282 asoc->ecn_allowed = 0; 4283 asoc->ecn_nonce_allowed = 0; 4284 } 4285 } 4286 } 4287 } else { 4288 /* See if Resynchronization Possible */ 4289 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4290 asoc->nonce_sum_check = 1; 4291 /* 4292 * now we must calculate what the base is. 4293 * We do this based on two things, we know 4294 * the total's for all the segments 4295 * gap-acked in the SACK (none), We also 4296 * know the SACK's nonce sum, its in 4297 * nonce_sum_flag. So we can build a truth 4298 * table to back-calculate the new value of 4299 * asoc->nonce_sum_expect_base: 4300 * 4301 * SACK-flag-Value Seg-Sums Base 0 0 0 4302 * 1 0 1 0 1 1 1 4303 * 1 0 4304 */ 4305 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4306 } 4307 } 4308 } 4309 /* RWND update */ 4310 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4311 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4312 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4313 /* SWS sender side engages */ 4314 asoc->peers_rwnd = 0; 4315 } 4316 /* Now assure a timer where data is queued at */ 4317 again: 4318 j = 0; 4319 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4320 if (net->flight_size) { 4321 int to_ticks; 4322 4323 if (net->RTO == 0) { 4324 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4325 } else { 4326 to_ticks = MSEC_TO_TICKS(net->RTO); 4327 } 4328 j++; 4329 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4330 sctp_timeout_handler, &net->rxt_timer); 4331 } else { 4332 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4333 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4334 stcb, net, 4335 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4336 } 4337 if (sctp_early_fr) { 4338 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4339 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4340 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4341 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4342 } 4343 } 4344 } 4345 } 4346 if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) { 4347 /* huh, this should not happen */ 4348 #ifdef INVARIANTS 4349 panic("Flight size incorrect? fixing??"); 4350 #else 4351 printf("Flight size incorrect? fixing\n"); 4352 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4353 net->flight_size = 0; 4354 } 4355 asoc->total_flight = 0; 4356 asoc->total_flight_count = 0; 4357 asoc->sent_queue_retran_cnt = 0; 4358 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4359 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4360 tp1->whoTo->flight_size += tp1->book_size; 4361 asoc->total_flight += tp1->book_size; 4362 asoc->total_flight_count++; 4363 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4364 asoc->sent_queue_retran_cnt++; 4365 } 4366 } 4367 #endif 4368 goto again; 4369 } 4370 /**********************************/ 4371 /* Now what about shutdown issues */ 4372 /**********************************/ 4373 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4374 /* nothing left on sendqueue.. consider done */ 4375 /* clean up */ 4376 if ((asoc->stream_queue_cnt == 1) && 4377 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4378 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4379 (asoc->locked_on_sending) 4380 ) { 4381 struct sctp_stream_queue_pending *sp; 4382 4383 /* 4384 * I may be in a state where we got all across.. but 4385 * cannot write more due to a shutdown... we abort 4386 * since the user did not indicate EOR in this case. 4387 * The sp will be cleaned during free of the asoc. 4388 */ 4389 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4390 sctp_streamhead); 4391 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4392 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4393 asoc->locked_on_sending = NULL; 4394 asoc->stream_queue_cnt--; 4395 } 4396 } 4397 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4398 (asoc->stream_queue_cnt == 0)) { 4399 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4400 /* Need to abort here */ 4401 struct mbuf *oper; 4402 4403 abort_out_now: 4404 *abort_now = 1; 4405 /* XXX */ 4406 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4407 0, M_DONTWAIT, 1, MT_DATA); 4408 if (oper) { 4409 struct sctp_paramhdr *ph; 4410 uint32_t *ippp; 4411 4412 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4413 sizeof(uint32_t); 4414 ph = mtod(oper, struct sctp_paramhdr *); 4415 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4416 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4417 ippp = (uint32_t *) (ph + 1); 4418 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4419 } 4420 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4421 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4422 } else { 4423 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4424 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4425 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4426 } 4427 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4428 sctp_stop_timers_for_shutdown(stcb); 4429 sctp_send_shutdown(stcb, 4430 stcb->asoc.primary_destination); 4431 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4432 stcb->sctp_ep, stcb, asoc->primary_destination); 4433 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4434 stcb->sctp_ep, stcb, asoc->primary_destination); 4435 } 4436 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4437 (asoc->stream_queue_cnt == 0)) { 4438 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4439 goto abort_out_now; 4440 } 4441 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4442 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4443 sctp_send_shutdown_ack(stcb, 4444 stcb->asoc.primary_destination); 4445 4446 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4447 stcb->sctp_ep, stcb, asoc->primary_destination); 4448 } 4449 } 4450 #ifdef SCTP_SACK_RWND_LOGGING 4451 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4452 rwnd, 4453 stcb->asoc.peers_rwnd, 4454 stcb->asoc.total_flight, 4455 stcb->asoc.total_output_queue_size); 4456 4457 #endif 4458 } 4459 4460 4461 4462 void 4463 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4464 struct sctp_nets *net_from, int *abort_now) 4465 { 4466 struct sctp_association *asoc; 4467 struct sctp_sack *sack; 4468 struct sctp_tmit_chunk *tp1, *tp2; 4469 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4470 this_sack_lowest_newack; 4471 uint16_t num_seg, num_dup; 4472 uint16_t wake_him = 0; 4473 unsigned int sack_length; 4474 uint32_t send_s; 4475 long j; 4476 int accum_moved = 0; 4477 int will_exit_fast_recovery = 0; 4478 uint32_t a_rwnd; 4479 struct sctp_nets *net = NULL; 4480 int nonce_sum_flag, ecn_seg_sums = 0; 4481 uint8_t reneged_all = 0; 4482 uint8_t cmt_dac_flag; 4483 4484 /* 4485 * we take any chance we can to service our queues since we cannot 4486 * get awoken when the socket is read from :< 4487 */ 4488 /* 4489 * Now perform the actual SACK handling: 1) Verify that it is not an 4490 * old sack, if so discard. 2) If there is nothing left in the send 4491 * queue (cum-ack is equal to last acked) then you have a duplicate 4492 * too, update any rwnd change and verify no timers are running. 4493 * then return. 3) Process any new consequtive data i.e. cum-ack 4494 * moved process these first and note that it moved. 4) Process any 4495 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4496 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4497 * sync up flightsizes and things, stop all timers and also check 4498 * for shutdown_pending state. If so then go ahead and send off the 4499 * shutdown. If in shutdown recv, send off the shutdown-ack and 4500 * start that timer, Ret. 9) Strike any non-acked things and do FR 4501 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4502 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4503 * if in shutdown_recv state. 4504 */ 4505 SCTP_TCB_LOCK_ASSERT(stcb); 4506 sack = &ch->sack; 4507 /* CMT DAC algo */ 4508 this_sack_lowest_newack = 0; 4509 j = 0; 4510 sack_length = ntohs(ch->ch.chunk_length); 4511 if (sack_length < sizeof(struct sctp_sack_chunk)) { 4512 #ifdef SCTP_DEBUG 4513 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 4514 printf("Bad size on sack chunk .. to small\n"); 4515 } 4516 #endif 4517 return; 4518 } 4519 /* ECN Nonce */ 4520 SCTP_STAT_INCR(sctps_slowpath_sack); 4521 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4522 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4523 num_seg = ntohs(sack->num_gap_ack_blks); 4524 a_rwnd = (uint32_t) ntohl(sack->a_rwnd); 4525 4526 /* CMT DAC algo */ 4527 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4528 num_dup = ntohs(sack->num_dup_tsns); 4529 4530 4531 stcb->asoc.overall_error_count = 0; 4532 asoc = &stcb->asoc; 4533 #ifdef SCTP_SACK_LOGGING 4534 sctp_log_sack(asoc->last_acked_seq, 4535 cum_ack, 4536 0, 4537 num_seg, 4538 num_dup, 4539 SCTP_LOG_NEW_SACK); 4540 #endif 4541 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 4542 if (num_dup) { 4543 int off_to_dup, iii; 4544 uint32_t *dupdata; 4545 4546 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4547 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4548 dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup); 4549 for (iii = 0; iii < num_dup; iii++) { 4550 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4551 dupdata++; 4552 4553 } 4554 } else { 4555 printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4556 off_to_dup, num_dup, sack_length, num_seg); 4557 } 4558 } 4559 #endif 4560 /* reality check */ 4561 if (TAILQ_EMPTY(&asoc->send_queue)) { 4562 send_s = asoc->sending_seq; 4563 } else { 4564 tp1 = TAILQ_FIRST(&asoc->send_queue); 4565 send_s = tp1->rec.data.TSN_seq; 4566 } 4567 4568 if (sctp_strict_sacks) { 4569 if (cum_ack == send_s || 4570 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4571 #ifdef INVARIANTS /* for testing only */ 4572 hopeless_peer: 4573 panic("Impossible sack 1"); 4574 #else 4575 struct mbuf *oper; 4576 4577 /* 4578 * no way, we have not even sent this TSN out yet. 4579 * Peer is hopelessly messed up with us. 4580 */ 4581 hopeless_peer: 4582 *abort_now = 1; 4583 /* XXX */ 4584 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4585 0, M_DONTWAIT, 1, MT_DATA); 4586 if (oper) { 4587 struct sctp_paramhdr *ph; 4588 uint32_t *ippp; 4589 4590 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4591 sizeof(uint32_t); 4592 ph = mtod(oper, struct sctp_paramhdr *); 4593 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4594 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4595 ippp = (uint32_t *) (ph + 1); 4596 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4597 } 4598 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4599 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4600 return; 4601 #endif 4602 } 4603 } 4604 /**********************/ 4605 /* 1) check the range */ 4606 /**********************/ 4607 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4608 /* acking something behind */ 4609 return; 4610 } 4611 /* update the Rwnd of the peer */ 4612 if (TAILQ_EMPTY(&asoc->sent_queue) && 4613 TAILQ_EMPTY(&asoc->send_queue) && 4614 (asoc->stream_queue_cnt == 0) 4615 ) { 4616 /* nothing left on send/sent and strmq */ 4617 #ifdef SCTP_LOG_RWND 4618 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4619 asoc->peers_rwnd, 0, 0, a_rwnd); 4620 #endif 4621 asoc->peers_rwnd = a_rwnd; 4622 if (asoc->sent_queue_retran_cnt) { 4623 asoc->sent_queue_retran_cnt = 0; 4624 } 4625 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4626 /* SWS sender side engages */ 4627 asoc->peers_rwnd = 0; 4628 } 4629 /* stop any timers */ 4630 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4631 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4632 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4633 if (sctp_early_fr) { 4634 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4635 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4636 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4637 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4638 } 4639 } 4640 net->partial_bytes_acked = 0; 4641 net->flight_size = 0; 4642 } 4643 asoc->total_flight = 0; 4644 asoc->total_flight_count = 0; 4645 return; 4646 } 4647 /* 4648 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4649 * things. The total byte count acked is tracked in netAckSz AND 4650 * netAck2 is used to track the total bytes acked that are un- 4651 * amibguious and were never retransmitted. We track these on a per 4652 * destination address basis. 4653 */ 4654 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4655 net->prev_cwnd = net->cwnd; 4656 net->net_ack = 0; 4657 net->net_ack2 = 0; 4658 4659 /* 4660 * CMT: Reset CUC algo variable before SACK processing 4661 */ 4662 net->new_pseudo_cumack = 0; 4663 net->will_exit_fast_recovery = 0; 4664 } 4665 /* process the new consecutive TSN first */ 4666 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4667 while (tp1) { 4668 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4669 MAX_TSN) || 4670 last_tsn == tp1->rec.data.TSN_seq) { 4671 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4672 /* 4673 * ECN Nonce: Add the nonce to the sender's 4674 * nonce sum 4675 */ 4676 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4677 accum_moved = 1; 4678 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4679 /* 4680 * If it is less than ACKED, it is 4681 * now no-longer in flight. Higher 4682 * values may occur during marking 4683 */ 4684 if ((tp1->whoTo->dest_state & 4685 SCTP_ADDR_UNCONFIRMED) && 4686 (tp1->snd_count < 2)) { 4687 /* 4688 * If there was no retran 4689 * and the address is 4690 * un-confirmed and we sent 4691 * there and are now 4692 * sacked.. its confirmed, 4693 * mark it so. 4694 */ 4695 tp1->whoTo->dest_state &= 4696 ~SCTP_ADDR_UNCONFIRMED; 4697 } 4698 #ifdef SCTP_FLIGHT_LOGGING 4699 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN, 4700 tp1->whoTo->flight_size, 4701 tp1->book_size, 4702 (uintptr_t) stcb, 4703 tp1->rec.data.TSN_seq); 4704 #endif 4705 if (tp1->whoTo->flight_size >= tp1->book_size) { 4706 tp1->whoTo->flight_size -= tp1->book_size; 4707 } else { 4708 tp1->whoTo->flight_size = 0; 4709 } 4710 if (asoc->total_flight >= tp1->book_size) { 4711 asoc->total_flight -= tp1->book_size; 4712 if (asoc->total_flight_count > 0) 4713 asoc->total_flight_count--; 4714 } else { 4715 asoc->total_flight = 0; 4716 asoc->total_flight_count = 0; 4717 } 4718 tp1->whoTo->net_ack += tp1->send_size; 4719 4720 /* CMT SFR and DAC algos */ 4721 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4722 tp1->whoTo->saw_newack = 1; 4723 4724 if (tp1->snd_count < 2) { 4725 /* 4726 * True non-retransmited 4727 * chunk 4728 */ 4729 tp1->whoTo->net_ack2 += 4730 tp1->send_size; 4731 4732 /* update RTO too? */ 4733 if (tp1->do_rtt) { 4734 tp1->whoTo->RTO = 4735 sctp_calculate_rto(stcb, 4736 asoc, tp1->whoTo, 4737 &tp1->sent_rcv_time); 4738 tp1->whoTo->rto_pending = 0; 4739 tp1->do_rtt = 0; 4740 } 4741 } 4742 /* 4743 * CMT: CUCv2 algorithm. From the 4744 * cumack'd TSNs, for each TSN being 4745 * acked for the first time, set the 4746 * following variables for the 4747 * corresp destination. 4748 * new_pseudo_cumack will trigger a 4749 * cwnd update. 4750 * find_(rtx_)pseudo_cumack will 4751 * trigger search for the next 4752 * expected (rtx-)pseudo-cumack. 4753 */ 4754 tp1->whoTo->new_pseudo_cumack = 1; 4755 tp1->whoTo->find_pseudo_cumack = 1; 4756 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4757 4758 4759 #ifdef SCTP_SACK_LOGGING 4760 sctp_log_sack(asoc->last_acked_seq, 4761 cum_ack, 4762 tp1->rec.data.TSN_seq, 4763 0, 4764 0, 4765 SCTP_LOG_TSN_ACKED); 4766 #endif 4767 #ifdef SCTP_CWND_LOGGING 4768 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4769 #endif 4770 } 4771 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4772 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4773 #ifdef SCTP_AUDITING_ENABLED 4774 sctp_audit_log(0xB3, 4775 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4776 #endif 4777 } 4778 tp1->sent = SCTP_DATAGRAM_ACKED; 4779 } 4780 } else { 4781 break; 4782 } 4783 tp1 = TAILQ_NEXT(tp1, sctp_next); 4784 } 4785 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4786 /* always set this up to cum-ack */ 4787 asoc->this_sack_highest_gap = last_tsn; 4788 4789 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4790 4791 /* skip corrupt segments */ 4792 goto skip_segments; 4793 } 4794 if (num_seg > 0) { 4795 4796 /* 4797 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4798 * to be greater than the cumack. Also reset saw_newack to 0 4799 * for all dests. 4800 */ 4801 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4802 net->saw_newack = 0; 4803 net->this_sack_highest_newack = last_tsn; 4804 } 4805 4806 /* 4807 * thisSackHighestGap will increase while handling NEW 4808 * segments this_sack_highest_newack will increase while 4809 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4810 * used for CMT DAC algo. saw_newack will also change. 4811 */ 4812 sctp_handle_segments(stcb, asoc, ch, last_tsn, 4813 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4814 num_seg, &ecn_seg_sums); 4815 4816 if (sctp_strict_sacks) { 4817 /* 4818 * validate the biggest_tsn_acked in the gap acks if 4819 * strict adherence is wanted. 4820 */ 4821 if ((biggest_tsn_acked == send_s) || 4822 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4823 /* 4824 * peer is either confused or we are under 4825 * attack. We must abort. 4826 */ 4827 goto hopeless_peer; 4828 } 4829 } 4830 } 4831 skip_segments: 4832 /*******************************************/ 4833 /* cancel ALL T3-send timer if accum moved */ 4834 /*******************************************/ 4835 if (sctp_cmt_on_off) { 4836 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4837 if (net->new_pseudo_cumack) 4838 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4839 stcb, net, 4840 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4841 4842 } 4843 } else { 4844 if (accum_moved) { 4845 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4846 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4847 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4848 } 4849 } 4850 } 4851 /********************************************/ 4852 /* drop the acked chunks from the sendqueue */ 4853 /********************************************/ 4854 asoc->last_acked_seq = cum_ack; 4855 4856 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4857 if (tp1 == NULL) 4858 goto done_with_it; 4859 do { 4860 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4861 MAX_TSN)) { 4862 break; 4863 } 4864 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4865 /* no more sent on list */ 4866 break; 4867 } 4868 tp2 = TAILQ_NEXT(tp1, sctp_next); 4869 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4870 /* 4871 * Friendlier printf in lieu of panic now that I think its 4872 * fixed 4873 */ 4874 4875 if (tp1->pr_sctp_on) { 4876 if (asoc->pr_sctp_cnt != 0) 4877 asoc->pr_sctp_cnt--; 4878 } 4879 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4880 (asoc->total_flight > 0)) { 4881 printf("Warning flight size incorrect should be 0 is %d\n", 4882 asoc->total_flight); 4883 asoc->total_flight = 0; 4884 } 4885 if (tp1->data) { 4886 sctp_free_bufspace(stcb, asoc, tp1, 1); 4887 sctp_m_freem(tp1->data); 4888 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4889 asoc->sent_queue_cnt_removeable--; 4890 } 4891 } 4892 #ifdef SCTP_SACK_LOGGING 4893 sctp_log_sack(asoc->last_acked_seq, 4894 cum_ack, 4895 tp1->rec.data.TSN_seq, 4896 0, 4897 0, 4898 SCTP_LOG_FREE_SENT); 4899 #endif 4900 tp1->data = NULL; 4901 asoc->sent_queue_cnt--; 4902 sctp_free_remote_addr(tp1->whoTo); 4903 4904 sctp_free_a_chunk(stcb, tp1); 4905 wake_him++; 4906 tp1 = tp2; 4907 } while (tp1 != NULL); 4908 4909 done_with_it: 4910 if ((wake_him) && (stcb->sctp_socket)) { 4911 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4912 #ifdef SCTP_WAKE_LOGGING 4913 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 4914 #endif 4915 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4916 #ifdef SCTP_WAKE_LOGGING 4917 } else { 4918 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 4919 #endif 4920 } 4921 4922 if ((sctp_cmt_on_off == 0) && asoc->fast_retran_loss_recovery && accum_moved) { 4923 if (compare_with_wrap(asoc->last_acked_seq, 4924 asoc->fast_recovery_tsn, MAX_TSN) || 4925 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 4926 /* Setup so we will exit RFC2582 fast recovery */ 4927 will_exit_fast_recovery = 1; 4928 } 4929 } 4930 /* 4931 * Check for revoked fragments: 4932 * 4933 * if Previous sack - Had no frags then we can't have any revoked if 4934 * Previous sack - Had frag's then - If we now have frags aka 4935 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4936 * some of them. else - The peer revoked all ACKED fragments, since 4937 * we had some before and now we have NONE. 4938 */ 4939 4940 if (num_seg) 4941 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked); 4942 else if (asoc->saw_sack_with_frags) { 4943 int cnt_revoked = 0; 4944 4945 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4946 if (tp1 != NULL) { 4947 /* Peer revoked all dg's marked or acked */ 4948 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4949 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 4950 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 4951 tp1->sent = SCTP_DATAGRAM_SENT; 4952 tp1->rec.data.chunk_was_revoked = 1; 4953 tp1->whoTo->flight_size += tp1->book_size; 4954 asoc->total_flight_count++; 4955 asoc->total_flight += tp1->book_size; 4956 cnt_revoked++; 4957 } 4958 } 4959 if (cnt_revoked) { 4960 reneged_all = 1; 4961 } 4962 } 4963 asoc->saw_sack_with_frags = 0; 4964 } 4965 if (num_seg) 4966 asoc->saw_sack_with_frags = 1; 4967 else 4968 asoc->saw_sack_with_frags = 0; 4969 4970 4971 sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4972 4973 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4974 /* nothing left in-flight */ 4975 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4976 /* stop all timers */ 4977 if (sctp_early_fr) { 4978 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4979 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4980 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4981 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4982 } 4983 } 4984 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4985 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4986 net->flight_size = 0; 4987 net->partial_bytes_acked = 0; 4988 } 4989 asoc->total_flight = 0; 4990 asoc->total_flight_count = 0; 4991 } 4992 /**********************************/ 4993 /* Now what about shutdown issues */ 4994 /**********************************/ 4995 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4996 /* nothing left on sendqueue.. consider done */ 4997 #ifdef SCTP_LOG_RWND 4998 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4999 asoc->peers_rwnd, 0, 0, a_rwnd); 5000 #endif 5001 asoc->peers_rwnd = a_rwnd; 5002 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5003 /* SWS sender side engages */ 5004 asoc->peers_rwnd = 0; 5005 } 5006 /* clean up */ 5007 if ((asoc->stream_queue_cnt == 1) && 5008 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5009 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 5010 (asoc->locked_on_sending) 5011 ) { 5012 struct sctp_stream_queue_pending *sp; 5013 5014 /* 5015 * I may be in a state where we got all across.. but 5016 * cannot write more due to a shutdown... we abort 5017 * since the user did not indicate EOR in this case. 5018 */ 5019 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 5020 sctp_streamhead); 5021 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 5022 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 5023 asoc->locked_on_sending = NULL; 5024 asoc->stream_queue_cnt--; 5025 } 5026 } 5027 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5028 (asoc->stream_queue_cnt == 0)) { 5029 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 5030 /* Need to abort here */ 5031 struct mbuf *oper; 5032 5033 abort_out_now: 5034 *abort_now = 1; 5035 /* XXX */ 5036 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 5037 0, M_DONTWAIT, 1, MT_DATA); 5038 if (oper) { 5039 struct sctp_paramhdr *ph; 5040 uint32_t *ippp; 5041 5042 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 5043 sizeof(uint32_t); 5044 ph = mtod(oper, struct sctp_paramhdr *); 5045 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 5046 ph->param_length = htons(SCTP_BUF_LEN(oper)); 5047 ippp = (uint32_t *) (ph + 1); 5048 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 5049 } 5050 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 5051 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 5052 return; 5053 } else { 5054 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 5055 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5056 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5057 } 5058 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 5059 sctp_stop_timers_for_shutdown(stcb); 5060 sctp_send_shutdown(stcb, 5061 stcb->asoc.primary_destination); 5062 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5063 stcb->sctp_ep, stcb, asoc->primary_destination); 5064 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5065 stcb->sctp_ep, stcb, asoc->primary_destination); 5066 } 5067 return; 5068 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5069 (asoc->stream_queue_cnt == 0)) { 5070 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 5071 goto abort_out_now; 5072 } 5073 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5074 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 5075 sctp_send_shutdown_ack(stcb, 5076 stcb->asoc.primary_destination); 5077 5078 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5079 stcb->sctp_ep, stcb, asoc->primary_destination); 5080 return; 5081 } 5082 } 5083 /* 5084 * Now here we are going to recycle net_ack for a different use... 5085 * HEADS UP. 5086 */ 5087 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5088 net->net_ack = 0; 5089 } 5090 5091 /* 5092 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5093 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5094 * automatically ensure that. 5095 */ 5096 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) { 5097 this_sack_lowest_newack = cum_ack; 5098 } 5099 if (num_seg > 0) { 5100 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5101 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5102 } 5103 /*********************************************/ 5104 /* Here we perform PR-SCTP procedures */ 5105 /* (section 4.2) */ 5106 /*********************************************/ 5107 /* C1. update advancedPeerAckPoint */ 5108 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 5109 asoc->advanced_peer_ack_point = cum_ack; 5110 } 5111 /* C2. try to further move advancedPeerAckPoint ahead */ 5112 5113 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 5114 struct sctp_tmit_chunk *lchk; 5115 5116 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5117 /* C3. See if we need to send a Fwd-TSN */ 5118 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 5119 MAX_TSN)) { 5120 /* 5121 * ISSUE with ECN, see FWD-TSN processing for notes 5122 * on issues that will occur when the ECN NONCE 5123 * stuff is put into SCTP for cross checking. 5124 */ 5125 send_forward_tsn(stcb, asoc); 5126 5127 /* 5128 * ECN Nonce: Disable Nonce Sum check when FWD TSN 5129 * is sent and store resync tsn 5130 */ 5131 asoc->nonce_sum_check = 0; 5132 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 5133 if (lchk) { 5134 /* Assure a timer is up */ 5135 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5136 stcb->sctp_ep, stcb, lchk->whoTo); 5137 } 5138 } 5139 } 5140 /* 5141 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) && 5142 * (net->fast_retran_loss_recovery == 0))) 5143 */ 5144 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5145 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) { 5146 /* out of a RFC2582 Fast recovery window? */ 5147 if (net->net_ack > 0) { 5148 /* 5149 * per section 7.2.3, are there any 5150 * destinations that had a fast retransmit 5151 * to them. If so what we need to do is 5152 * adjust ssthresh and cwnd. 5153 */ 5154 struct sctp_tmit_chunk *lchk; 5155 5156 #ifdef SCTP_HIGH_SPEED 5157 sctp_hs_cwnd_decrease(stcb, net); 5158 #else 5159 #ifdef SCTP_CWND_MONITOR 5160 int old_cwnd = net->cwnd; 5161 5162 #endif 5163 net->ssthresh = net->cwnd / 2; 5164 if (net->ssthresh < (net->mtu * 2)) { 5165 net->ssthresh = 2 * net->mtu; 5166 } 5167 net->cwnd = net->ssthresh; 5168 #ifdef SCTP_CWND_MONITOR 5169 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 5170 SCTP_CWND_LOG_FROM_FR); 5171 #endif 5172 #endif 5173 5174 lchk = TAILQ_FIRST(&asoc->send_queue); 5175 5176 net->partial_bytes_acked = 0; 5177 /* Turn on fast recovery window */ 5178 asoc->fast_retran_loss_recovery = 1; 5179 if (lchk == NULL) { 5180 /* Mark end of the window */ 5181 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 5182 } else { 5183 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5184 } 5185 5186 /* 5187 * CMT fast recovery -- per destination 5188 * recovery variable. 5189 */ 5190 net->fast_retran_loss_recovery = 1; 5191 5192 if (lchk == NULL) { 5193 /* Mark end of the window */ 5194 net->fast_recovery_tsn = asoc->sending_seq - 1; 5195 } else { 5196 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5197 } 5198 5199 5200 5201 /* 5202 * Disable Nonce Sum Checking and store the 5203 * resync tsn 5204 */ 5205 asoc->nonce_sum_check = 0; 5206 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 5207 5208 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 5209 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5210 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5211 stcb->sctp_ep, stcb, net); 5212 } 5213 } else if (net->net_ack > 0) { 5214 /* 5215 * Mark a peg that we WOULD have done a cwnd 5216 * reduction but RFC2582 prevented this action. 5217 */ 5218 SCTP_STAT_INCR(sctps_fastretransinrtt); 5219 } 5220 } 5221 5222 5223 /****************************************************************** 5224 * Here we do the stuff with ECN Nonce checking. 5225 * We basically check to see if the nonce sum flag was incorrect 5226 * or if resynchronization needs to be done. Also if we catch a 5227 * misbehaving receiver we give him the kick. 5228 ******************************************************************/ 5229 5230 if (asoc->ecn_nonce_allowed) { 5231 if (asoc->nonce_sum_check) { 5232 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 5233 if (asoc->nonce_wait_for_ecne == 0) { 5234 struct sctp_tmit_chunk *lchk; 5235 5236 lchk = TAILQ_FIRST(&asoc->send_queue); 5237 asoc->nonce_wait_for_ecne = 1; 5238 if (lchk) { 5239 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 5240 } else { 5241 asoc->nonce_wait_tsn = asoc->sending_seq; 5242 } 5243 } else { 5244 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 5245 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 5246 /* 5247 * Misbehaving peer. We need 5248 * to react to this guy 5249 */ 5250 asoc->ecn_allowed = 0; 5251 asoc->ecn_nonce_allowed = 0; 5252 } 5253 } 5254 } 5255 } else { 5256 /* See if Resynchronization Possible */ 5257 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 5258 asoc->nonce_sum_check = 1; 5259 /* 5260 * now we must calculate what the base is. 5261 * We do this based on two things, we know 5262 * the total's for all the segments 5263 * gap-acked in the SACK, its stored in 5264 * ecn_seg_sums. We also know the SACK's 5265 * nonce sum, its in nonce_sum_flag. So we 5266 * can build a truth table to back-calculate 5267 * the new value of 5268 * asoc->nonce_sum_expect_base: 5269 * 5270 * SACK-flag-Value Seg-Sums Base 0 0 0 5271 * 1 0 1 0 1 1 1 5272 * 1 0 5273 */ 5274 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 5275 } 5276 } 5277 } 5278 /* Now are we exiting loss recovery ? */ 5279 if (will_exit_fast_recovery) { 5280 /* Ok, we must exit fast recovery */ 5281 asoc->fast_retran_loss_recovery = 0; 5282 } 5283 if ((asoc->sat_t3_loss_recovery) && 5284 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 5285 MAX_TSN) || 5286 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 5287 /* end satellite t3 loss recovery */ 5288 asoc->sat_t3_loss_recovery = 0; 5289 } 5290 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5291 if (net->will_exit_fast_recovery) { 5292 /* Ok, we must exit fast recovery */ 5293 net->fast_retran_loss_recovery = 0; 5294 } 5295 } 5296 5297 /* Adjust and set the new rwnd value */ 5298 #ifdef SCTP_LOG_RWND 5299 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5300 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 5301 #endif 5302 5303 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5304 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 5305 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5306 /* SWS sender side engages */ 5307 asoc->peers_rwnd = 0; 5308 } 5309 /* 5310 * Now we must setup so we have a timer up for anyone with 5311 * outstanding data. 5312 */ 5313 again: 5314 j = 0; 5315 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5316 if (net->flight_size) { 5317 j++; 5318 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5319 stcb->sctp_ep, stcb, net); 5320 } 5321 } 5322 if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) { 5323 /* huh, this should not happen */ 5324 #ifdef INVARIANTS 5325 panic("Flight size incorrect? fixing??"); 5326 #else 5327 printf("Flight size incorrect? fixing??\n"); 5328 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5329 net->flight_size = 0; 5330 } 5331 asoc->total_flight = 0; 5332 asoc->total_flight_count = 0; 5333 asoc->sent_queue_retran_cnt = 0; 5334 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5335 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5336 tp1->whoTo->flight_size += tp1->book_size; 5337 asoc->total_flight += tp1->book_size; 5338 asoc->total_flight_count++; 5339 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5340 asoc->sent_queue_retran_cnt++; 5341 } 5342 } 5343 #endif 5344 goto again; 5345 } 5346 #ifdef SCTP_SACK_RWND_LOGGING 5347 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5348 a_rwnd, 5349 stcb->asoc.peers_rwnd, 5350 stcb->asoc.total_flight, 5351 stcb->asoc.total_output_queue_size); 5352 5353 #endif 5354 5355 } 5356 5357 void 5358 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5359 struct sctp_nets *netp, int *abort_flag) 5360 { 5361 /* Copy cum-ack */ 5362 uint32_t cum_ack, a_rwnd; 5363 5364 cum_ack = ntohl(cp->cumulative_tsn_ack); 5365 /* Arrange so a_rwnd does NOT change */ 5366 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5367 5368 /* Now call the express sack handling */ 5369 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5370 } 5371 5372 static void 5373 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5374 struct sctp_stream_in *strmin) 5375 { 5376 struct sctp_queued_to_read *ctl, *nctl; 5377 struct sctp_association *asoc; 5378 int tt; 5379 5380 asoc = &stcb->asoc; 5381 tt = strmin->last_sequence_delivered; 5382 /* 5383 * First deliver anything prior to and including the stream no that 5384 * came in 5385 */ 5386 ctl = TAILQ_FIRST(&strmin->inqueue); 5387 while (ctl) { 5388 nctl = TAILQ_NEXT(ctl, next); 5389 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5390 (tt == ctl->sinfo_ssn)) { 5391 /* this is deliverable now */ 5392 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5393 /* subtract pending on streams */ 5394 asoc->size_on_all_streams -= ctl->length; 5395 sctp_ucount_decr(asoc->cnt_on_all_streams); 5396 /* deliver it to at least the delivery-q */ 5397 if (stcb->sctp_socket) { 5398 sctp_add_to_readq(stcb->sctp_ep, stcb, 5399 ctl, 5400 &stcb->sctp_socket->so_rcv, 1); 5401 } 5402 } else { 5403 /* no more delivery now. */ 5404 break; 5405 } 5406 ctl = nctl; 5407 } 5408 /* 5409 * now we must deliver things in queue the normal way if any are 5410 * now ready. 5411 */ 5412 tt = strmin->last_sequence_delivered + 1; 5413 ctl = TAILQ_FIRST(&strmin->inqueue); 5414 while (ctl) { 5415 nctl = TAILQ_NEXT(ctl, next); 5416 if (tt == ctl->sinfo_ssn) { 5417 /* this is deliverable now */ 5418 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5419 /* subtract pending on streams */ 5420 asoc->size_on_all_streams -= ctl->length; 5421 sctp_ucount_decr(asoc->cnt_on_all_streams); 5422 /* deliver it to at least the delivery-q */ 5423 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5424 if (stcb->sctp_socket) { 5425 sctp_add_to_readq(stcb->sctp_ep, stcb, 5426 ctl, 5427 &stcb->sctp_socket->so_rcv, 1); 5428 } 5429 tt = strmin->last_sequence_delivered + 1; 5430 } else { 5431 break; 5432 } 5433 ctl = nctl; 5434 } 5435 } 5436 5437 void 5438 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5439 struct sctp_forward_tsn_chunk *fwd, int *abort_flag) 5440 { 5441 /* 5442 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5443 * forward TSN, when the SACK comes back that acknowledges the 5444 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5445 * get quite tricky since we may have sent more data interveneing 5446 * and must carefully account for what the SACK says on the nonce 5447 * and any gaps that are reported. This work will NOT be done here, 5448 * but I note it here since it is really related to PR-SCTP and 5449 * FWD-TSN's 5450 */ 5451 5452 /* The pr-sctp fwd tsn */ 5453 /* 5454 * here we will perform all the data receiver side steps for 5455 * processing FwdTSN, as required in by pr-sctp draft: 5456 * 5457 * Assume we get FwdTSN(x): 5458 * 5459 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5460 * others we have 3) examine and update re-ordering queue on 5461 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5462 * report where we are. 5463 */ 5464 struct sctp_strseq *stseq; 5465 struct sctp_association *asoc; 5466 uint32_t new_cum_tsn, gap, back_out_htsn; 5467 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5468 struct sctp_stream_in *strm; 5469 struct sctp_tmit_chunk *chk, *at; 5470 5471 cumack_set_flag = 0; 5472 asoc = &stcb->asoc; 5473 cnt_gone = 0; 5474 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5475 #ifdef SCTP_DEBUG 5476 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 5477 printf("Bad size too small/big fwd-tsn\n"); 5478 } 5479 #endif 5480 return; 5481 } 5482 m_size = (stcb->asoc.mapping_array_size << 3); 5483 /*************************************************************/ 5484 /* 1. Here we update local cumTSN and shift the bitmap array */ 5485 /*************************************************************/ 5486 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5487 5488 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5489 asoc->cumulative_tsn == new_cum_tsn) { 5490 /* Already got there ... */ 5491 return; 5492 } 5493 back_out_htsn = asoc->highest_tsn_inside_map; 5494 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5495 MAX_TSN)) { 5496 asoc->highest_tsn_inside_map = new_cum_tsn; 5497 #ifdef SCTP_MAP_LOGGING 5498 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5499 #endif 5500 } 5501 /* 5502 * now we know the new TSN is more advanced, let's find the actual 5503 * gap 5504 */ 5505 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5506 MAX_TSN)) || 5507 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5508 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5509 } else { 5510 /* try to prevent underflow here */ 5511 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5512 } 5513 5514 if (gap > m_size || gap < 0) { 5515 asoc->highest_tsn_inside_map = back_out_htsn; 5516 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5517 /* 5518 * out of range (of single byte chunks in the rwnd I 5519 * give out) too questionable. better to drop it 5520 * silently 5521 */ 5522 return; 5523 } 5524 if (asoc->highest_tsn_inside_map > 5525 asoc->mapping_array_base_tsn) { 5526 gap = asoc->highest_tsn_inside_map - 5527 asoc->mapping_array_base_tsn; 5528 } else { 5529 gap = asoc->highest_tsn_inside_map + 5530 (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5531 } 5532 cumack_set_flag = 1; 5533 } 5534 for (i = 0; i <= gap; i++) { 5535 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5536 } 5537 /* 5538 * Now after marking all, slide thing forward but no sack please. 5539 */ 5540 sctp_sack_check(stcb, 0, 0, abort_flag); 5541 if (*abort_flag) 5542 return; 5543 5544 if (cumack_set_flag) { 5545 /* 5546 * fwd-tsn went outside my gap array - not a common 5547 * occurance. Do the same thing we do when a cookie-echo 5548 * arrives. 5549 */ 5550 asoc->highest_tsn_inside_map = new_cum_tsn - 1; 5551 asoc->mapping_array_base_tsn = new_cum_tsn; 5552 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 5553 #ifdef SCTP_MAP_LOGGING 5554 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5555 #endif 5556 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5557 } 5558 /*************************************************************/ 5559 /* 2. Clear up re-assembly queue */ 5560 /*************************************************************/ 5561 5562 /* 5563 * First service it if pd-api is up, just in case we can progress it 5564 * forward 5565 */ 5566 if (asoc->fragmented_delivery_inprogress) { 5567 sctp_service_reassembly(stcb, asoc); 5568 } 5569 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5570 /* For each one on here see if we need to toss it */ 5571 /* 5572 * For now large messages held on the reasmqueue that are 5573 * complete will be tossed too. We could in theory do more 5574 * work to spin through and stop after dumping one msg aka 5575 * seeing the start of a new msg at the head, and call the 5576 * delivery function... to see if it can be delivered... But 5577 * for now we just dump everything on the queue. 5578 */ 5579 chk = TAILQ_FIRST(&asoc->reasmqueue); 5580 while (chk) { 5581 at = TAILQ_NEXT(chk, sctp_next); 5582 if (compare_with_wrap(asoc->cumulative_tsn, 5583 chk->rec.data.TSN_seq, MAX_TSN) || 5584 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5585 /* It needs to be tossed */ 5586 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5587 if (compare_with_wrap(chk->rec.data.TSN_seq, 5588 asoc->tsn_last_delivered, MAX_TSN)) { 5589 asoc->tsn_last_delivered = 5590 chk->rec.data.TSN_seq; 5591 asoc->str_of_pdapi = 5592 chk->rec.data.stream_number; 5593 asoc->ssn_of_pdapi = 5594 chk->rec.data.stream_seq; 5595 asoc->fragment_flags = 5596 chk->rec.data.rcv_flags; 5597 } 5598 asoc->size_on_reasm_queue -= chk->send_size; 5599 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5600 cnt_gone++; 5601 5602 /* Clear up any stream problem */ 5603 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5604 SCTP_DATA_UNORDERED && 5605 (compare_with_wrap(chk->rec.data.stream_seq, 5606 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5607 MAX_SEQ))) { 5608 /* 5609 * We must dump forward this streams 5610 * sequence number if the chunk is 5611 * not unordered that is being 5612 * skipped. There is a chance that 5613 * if the peer does not include the 5614 * last fragment in its FWD-TSN we 5615 * WILL have a problem here since 5616 * you would have a partial chunk in 5617 * queue that may not be 5618 * deliverable. Also if a Partial 5619 * delivery API as started the user 5620 * may get a partial chunk. The next 5621 * read returning a new chunk... 5622 * really ugly but I see no way 5623 * around it! Maybe a notify?? 5624 */ 5625 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5626 chk->rec.data.stream_seq; 5627 } 5628 if (chk->data) { 5629 sctp_m_freem(chk->data); 5630 chk->data = NULL; 5631 } 5632 sctp_free_remote_addr(chk->whoTo); 5633 sctp_free_a_chunk(stcb, chk); 5634 } else { 5635 /* 5636 * Ok we have gone beyond the end of the 5637 * fwd-tsn's mark. Some checks... 5638 */ 5639 if ((asoc->fragmented_delivery_inprogress) && 5640 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5641 /* 5642 * Special case PD-API is up and 5643 * what we fwd-tsn' over includes 5644 * one that had the LAST_FRAG. We no 5645 * longer need to do the PD-API. 5646 */ 5647 asoc->fragmented_delivery_inprogress = 0; 5648 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5649 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 5650 5651 } 5652 break; 5653 } 5654 chk = at; 5655 } 5656 } 5657 if (asoc->fragmented_delivery_inprogress) { 5658 /* 5659 * Ok we removed cnt_gone chunks in the PD-API queue that 5660 * were being delivered. So now we must turn off the flag. 5661 */ 5662 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5663 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 5664 asoc->fragmented_delivery_inprogress = 0; 5665 } 5666 /*************************************************************/ 5667 /* 3. Update the PR-stream re-ordering queues */ 5668 /*************************************************************/ 5669 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd)); 5670 fwd_sz -= sizeof(*fwd); 5671 { 5672 /* New method. */ 5673 int num_str, i; 5674 5675 num_str = fwd_sz / sizeof(struct sctp_strseq); 5676 for (i = 0; i < num_str; i++) { 5677 uint16_t st; 5678 unsigned char *xx; 5679 5680 /* Convert */ 5681 xx = (unsigned char *)&stseq[i]; 5682 st = ntohs(stseq[i].stream); 5683 stseq[i].stream = st; 5684 st = ntohs(stseq[i].sequence); 5685 stseq[i].sequence = st; 5686 /* now process */ 5687 if (stseq[i].stream > asoc->streamincnt) { 5688 /* 5689 * It is arguable if we should continue. 5690 * Since the peer sent bogus stream info we 5691 * may be in deep trouble.. a return may be 5692 * a better choice? 5693 */ 5694 continue; 5695 } 5696 strm = &asoc->strmin[stseq[i].stream]; 5697 if (compare_with_wrap(stseq[i].sequence, 5698 strm->last_sequence_delivered, MAX_SEQ)) { 5699 /* Update the sequence number */ 5700 strm->last_sequence_delivered = 5701 stseq[i].sequence; 5702 } 5703 /* now kick the stream the new way */ 5704 sctp_kick_prsctp_reorder_queue(stcb, strm); 5705 } 5706 } 5707 if (TAILQ_FIRST(&asoc->reasmqueue)) { 5708 /* now lets kick out and check for more fragmented delivery */ 5709 sctp_deliver_reasm_check(stcb, &stcb->asoc); 5710 } 5711 } 5712