1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 48 49 /* 50 * NOTES: On the outbound side of things I need to check the sack timer to 51 * see if I should generate a sack into the chunk queue (if I have data to 52 * send that is and will be sending it .. for bundling. 53 * 54 * The callback in sctp_usrreq.c will get called when the socket is read from. 55 * This will cause sctp_service_queues() to get called on the top entry in 56 * the list. 57 */ 58 59 void 60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 61 { 62 uint32_t calc, calc_save; 63 64 /* 65 * This is really set wrong with respect to a 1-2-m socket. Since 66 * the sb_cc is the count that everyone as put up. When we re-write 67 * sctp_soreceive then we will fix this so that ONLY this 68 * associations data is taken into account. 69 */ 70 if (stcb->sctp_socket == NULL) 71 return; 72 73 if (stcb->asoc.sb_cc == 0 && 74 asoc->size_on_reasm_queue == 0 && 75 asoc->size_on_all_streams == 0) { 76 /* Full rwnd granted */ 77 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 78 SCTP_MINIMAL_RWND); 79 return; 80 } 81 /* get actual space */ 82 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 83 84 /* 85 * take out what has NOT been put on socket queue and we yet hold 86 * for putting up. 87 */ 88 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 89 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 90 91 if (calc == 0) { 92 /* out of space */ 93 asoc->my_rwnd = 0; 94 return; 95 } 96 /* what is the overhead of all these rwnd's */ 97 98 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 99 calc_save = calc; 100 101 asoc->my_rwnd = calc; 102 if ((asoc->my_rwnd == 0) && 103 (calc < stcb->asoc.my_rwnd_control_len)) { 104 /*- 105 * If our rwnd == 0 && the overhead is greater than the 106 * data onqueue, we clamp the rwnd to 1. This lets us 107 * still accept inbound segments, but hopefully will shut 108 * the sender down when he finally gets the message. This 109 * hopefully will gracefully avoid discarding packets. 110 */ 111 asoc->my_rwnd = 1; 112 } 113 if (asoc->my_rwnd && 114 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 115 /* SWS engaged, tell peer none left */ 116 asoc->my_rwnd = 1; 117 } 118 } 119 120 /* Calculate what the rwnd would be */ 121 uint32_t 122 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 123 { 124 uint32_t calc = 0, calc_save = 0, result = 0; 125 126 /* 127 * This is really set wrong with respect to a 1-2-m socket. Since 128 * the sb_cc is the count that everyone as put up. When we re-write 129 * sctp_soreceive then we will fix this so that ONLY this 130 * associations data is taken into account. 131 */ 132 if (stcb->sctp_socket == NULL) 133 return (calc); 134 135 if (stcb->asoc.sb_cc == 0 && 136 asoc->size_on_reasm_queue == 0 && 137 asoc->size_on_all_streams == 0) { 138 /* Full rwnd granted */ 139 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 140 SCTP_MINIMAL_RWND); 141 return (calc); 142 } 143 /* get actual space */ 144 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 145 146 /* 147 * take out what has NOT been put on socket queue and we yet hold 148 * for putting up. 149 */ 150 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 151 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 152 153 if (calc == 0) { 154 /* out of space */ 155 return (calc); 156 } 157 /* what is the overhead of all these rwnd's */ 158 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 159 calc_save = calc; 160 161 result = calc; 162 if ((result == 0) && 163 (calc < stcb->asoc.my_rwnd_control_len)) { 164 /*- 165 * If our rwnd == 0 && the overhead is greater than the 166 * data onqueue, we clamp the rwnd to 1. This lets us 167 * still accept inbound segments, but hopefully will shut 168 * the sender down when he finally gets the message. This 169 * hopefully will gracefully avoid discarding packets. 170 */ 171 result = 1; 172 } 173 if (asoc->my_rwnd && 174 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 175 /* SWS engaged, tell peer none left */ 176 result = 1; 177 } 178 return (result); 179 } 180 181 182 183 /* 184 * Build out our readq entry based on the incoming packet. 185 */ 186 struct sctp_queued_to_read * 187 sctp_build_readq_entry(struct sctp_tcb *stcb, 188 struct sctp_nets *net, 189 uint32_t tsn, uint32_t ppid, 190 uint32_t context, uint16_t stream_no, 191 uint16_t stream_seq, uint8_t flags, 192 struct mbuf *dm) 193 { 194 struct sctp_queued_to_read *read_queue_e = NULL; 195 196 sctp_alloc_a_readq(stcb, read_queue_e); 197 if (read_queue_e == NULL) { 198 goto failed_build; 199 } 200 read_queue_e->sinfo_stream = stream_no; 201 read_queue_e->sinfo_ssn = stream_seq; 202 read_queue_e->sinfo_flags = (flags << 8); 203 read_queue_e->sinfo_ppid = ppid; 204 read_queue_e->sinfo_context = stcb->asoc.context; 205 read_queue_e->sinfo_timetolive = 0; 206 read_queue_e->sinfo_tsn = tsn; 207 read_queue_e->sinfo_cumtsn = tsn; 208 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 209 read_queue_e->whoFrom = net; 210 read_queue_e->length = 0; 211 atomic_add_int(&net->ref_count, 1); 212 read_queue_e->data = dm; 213 read_queue_e->spec_flags = 0; 214 read_queue_e->tail_mbuf = NULL; 215 read_queue_e->aux_data = NULL; 216 read_queue_e->stcb = stcb; 217 read_queue_e->port_from = stcb->rport; 218 read_queue_e->do_not_ref_stcb = 0; 219 read_queue_e->end_added = 0; 220 read_queue_e->some_taken = 0; 221 read_queue_e->pdapi_aborted = 0; 222 failed_build: 223 return (read_queue_e); 224 } 225 226 227 /* 228 * Build out our readq entry based on the incoming packet. 229 */ 230 static struct sctp_queued_to_read * 231 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 232 struct sctp_tmit_chunk *chk) 233 { 234 struct sctp_queued_to_read *read_queue_e = NULL; 235 236 sctp_alloc_a_readq(stcb, read_queue_e); 237 if (read_queue_e == NULL) { 238 goto failed_build; 239 } 240 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 241 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 242 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 243 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 244 read_queue_e->sinfo_context = stcb->asoc.context; 245 read_queue_e->sinfo_timetolive = 0; 246 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 247 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 248 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 249 read_queue_e->whoFrom = chk->whoTo; 250 read_queue_e->aux_data = NULL; 251 read_queue_e->length = 0; 252 atomic_add_int(&chk->whoTo->ref_count, 1); 253 read_queue_e->data = chk->data; 254 read_queue_e->tail_mbuf = NULL; 255 read_queue_e->stcb = stcb; 256 read_queue_e->port_from = stcb->rport; 257 read_queue_e->spec_flags = 0; 258 read_queue_e->do_not_ref_stcb = 0; 259 read_queue_e->end_added = 0; 260 read_queue_e->some_taken = 0; 261 read_queue_e->pdapi_aborted = 0; 262 failed_build: 263 return (read_queue_e); 264 } 265 266 267 struct mbuf * 268 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 269 struct sctp_sndrcvinfo *sinfo) 270 { 271 struct sctp_sndrcvinfo *outinfo; 272 struct cmsghdr *cmh; 273 struct mbuf *ret; 274 int len; 275 int use_extended = 0; 276 277 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 278 /* user does not want the sndrcv ctl */ 279 return (NULL); 280 } 281 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 282 use_extended = 1; 283 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 284 } else { 285 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 286 } 287 288 289 ret = sctp_get_mbuf_for_msg(len, 290 0, M_DONTWAIT, 1, MT_DATA); 291 292 if (ret == NULL) { 293 /* No space */ 294 return (ret); 295 } 296 /* We need a CMSG header followed by the struct */ 297 cmh = mtod(ret, struct cmsghdr *); 298 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 299 cmh->cmsg_level = IPPROTO_SCTP; 300 if (use_extended) { 301 cmh->cmsg_type = SCTP_EXTRCV; 302 cmh->cmsg_len = len; 303 memcpy(outinfo, sinfo, len); 304 } else { 305 cmh->cmsg_type = SCTP_SNDRCV; 306 cmh->cmsg_len = len; 307 *outinfo = *sinfo; 308 } 309 SCTP_BUF_LEN(ret) = cmh->cmsg_len; 310 return (ret); 311 } 312 313 314 char * 315 sctp_build_ctl_cchunk(struct sctp_inpcb *inp, 316 int *control_len, 317 struct sctp_sndrcvinfo *sinfo) 318 { 319 struct sctp_sndrcvinfo *outinfo; 320 struct cmsghdr *cmh; 321 char *buf; 322 int len; 323 int use_extended = 0; 324 325 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 326 /* user does not want the sndrcv ctl */ 327 return (NULL); 328 } 329 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 330 use_extended = 1; 331 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 332 } else { 333 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 334 } 335 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG); 336 if (buf == NULL) { 337 /* No space */ 338 return (buf); 339 } 340 /* We need a CMSG header followed by the struct */ 341 cmh = (struct cmsghdr *)buf; 342 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 343 cmh->cmsg_level = IPPROTO_SCTP; 344 if (use_extended) { 345 cmh->cmsg_type = SCTP_EXTRCV; 346 cmh->cmsg_len = len; 347 memcpy(outinfo, sinfo, len); 348 } else { 349 cmh->cmsg_type = SCTP_SNDRCV; 350 cmh->cmsg_len = len; 351 *outinfo = *sinfo; 352 } 353 *control_len = len; 354 return (buf); 355 } 356 357 358 /* 359 * We are delivering currently from the reassembly queue. We must continue to 360 * deliver until we either: 1) run out of space. 2) run out of sequential 361 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 362 */ 363 static void 364 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 365 { 366 struct sctp_tmit_chunk *chk; 367 uint16_t nxt_todel; 368 uint16_t stream_no; 369 int end = 0; 370 int cntDel; 371 struct sctp_queued_to_read *control, *ctl, *ctlat; 372 373 if (stcb == NULL) 374 return; 375 376 cntDel = stream_no = 0; 377 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 378 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || 379 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 380 /* socket above is long gone or going.. */ 381 abandon: 382 asoc->fragmented_delivery_inprogress = 0; 383 chk = TAILQ_FIRST(&asoc->reasmqueue); 384 while (chk) { 385 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 386 asoc->size_on_reasm_queue -= chk->send_size; 387 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 388 /* 389 * Lose the data pointer, since its in the socket 390 * buffer 391 */ 392 if (chk->data) { 393 sctp_m_freem(chk->data); 394 chk->data = NULL; 395 } 396 /* Now free the address and data */ 397 sctp_free_a_chunk(stcb, chk); 398 /* sa_ignore FREED_MEMORY */ 399 chk = TAILQ_FIRST(&asoc->reasmqueue); 400 } 401 return; 402 } 403 SCTP_TCB_LOCK_ASSERT(stcb); 404 do { 405 chk = TAILQ_FIRST(&asoc->reasmqueue); 406 if (chk == NULL) { 407 return; 408 } 409 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 410 /* Can't deliver more :< */ 411 return; 412 } 413 stream_no = chk->rec.data.stream_number; 414 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 415 if (nxt_todel != chk->rec.data.stream_seq && 416 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 417 /* 418 * Not the next sequence to deliver in its stream OR 419 * unordered 420 */ 421 return; 422 } 423 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 424 425 control = sctp_build_readq_entry_chk(stcb, chk); 426 if (control == NULL) { 427 /* out of memory? */ 428 return; 429 } 430 /* save it off for our future deliveries */ 431 stcb->asoc.control_pdapi = control; 432 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 433 end = 1; 434 else 435 end = 0; 436 sctp_add_to_readq(stcb->sctp_ep, 437 stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED); 438 cntDel++; 439 } else { 440 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 441 end = 1; 442 else 443 end = 0; 444 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 445 stcb->asoc.control_pdapi, 446 chk->data, end, chk->rec.data.TSN_seq, 447 &stcb->sctp_socket->so_rcv)) { 448 /* 449 * something is very wrong, either 450 * control_pdapi is NULL, or the tail_mbuf 451 * is corrupt, or there is a EOM already on 452 * the mbuf chain. 453 */ 454 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 455 goto abandon; 456 } else { 457 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 458 panic("This should not happen control_pdapi NULL?"); 459 } 460 /* if we did not panic, it was a EOM */ 461 panic("Bad chunking ??"); 462 return; 463 } 464 } 465 cntDel++; 466 } 467 /* pull it we did it */ 468 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 469 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 470 asoc->fragmented_delivery_inprogress = 0; 471 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 472 asoc->strmin[stream_no].last_sequence_delivered++; 473 } 474 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 475 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 476 } 477 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 478 /* 479 * turn the flag back on since we just delivered 480 * yet another one. 481 */ 482 asoc->fragmented_delivery_inprogress = 1; 483 } 484 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 485 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 486 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 487 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 488 489 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 490 asoc->size_on_reasm_queue -= chk->send_size; 491 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 492 /* free up the chk */ 493 chk->data = NULL; 494 sctp_free_a_chunk(stcb, chk); 495 496 if (asoc->fragmented_delivery_inprogress == 0) { 497 /* 498 * Now lets see if we can deliver the next one on 499 * the stream 500 */ 501 struct sctp_stream_in *strm; 502 503 strm = &asoc->strmin[stream_no]; 504 nxt_todel = strm->last_sequence_delivered + 1; 505 ctl = TAILQ_FIRST(&strm->inqueue); 506 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 507 while (ctl != NULL) { 508 /* Deliver more if we can. */ 509 if (nxt_todel == ctl->sinfo_ssn) { 510 ctlat = TAILQ_NEXT(ctl, next); 511 TAILQ_REMOVE(&strm->inqueue, ctl, next); 512 asoc->size_on_all_streams -= ctl->length; 513 sctp_ucount_decr(asoc->cnt_on_all_streams); 514 strm->last_sequence_delivered++; 515 sctp_add_to_readq(stcb->sctp_ep, stcb, 516 ctl, 517 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 518 ctl = ctlat; 519 } else { 520 break; 521 } 522 nxt_todel = strm->last_sequence_delivered + 1; 523 } 524 } 525 break; 526 } 527 /* sa_ignore FREED_MEMORY */ 528 chk = TAILQ_FIRST(&asoc->reasmqueue); 529 } while (chk); 530 } 531 532 /* 533 * Queue the chunk either right into the socket buffer if it is the next one 534 * to go OR put it in the correct place in the delivery queue. If we do 535 * append to the so_buf, keep doing so until we are out of order. One big 536 * question still remains, what to do when the socket buffer is FULL?? 537 */ 538 static void 539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 540 struct sctp_queued_to_read *control, int *abort_flag) 541 { 542 /* 543 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 544 * all the data in one stream this could happen quite rapidly. One 545 * could use the TSN to keep track of things, but this scheme breaks 546 * down in the other type of stream useage that could occur. Send a 547 * single msg to stream 0, send 4Billion messages to stream 1, now 548 * send a message to stream 0. You have a situation where the TSN 549 * has wrapped but not in the stream. Is this worth worrying about 550 * or should we just change our queue sort at the bottom to be by 551 * TSN. 552 * 553 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 554 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 555 * assignment this could happen... and I don't see how this would be 556 * a violation. So for now I am undecided an will leave the sort by 557 * SSN alone. Maybe a hybred approach is the answer 558 * 559 */ 560 struct sctp_stream_in *strm; 561 struct sctp_queued_to_read *at; 562 int queue_needed; 563 uint16_t nxt_todel; 564 struct mbuf *oper; 565 566 queue_needed = 1; 567 asoc->size_on_all_streams += control->length; 568 sctp_ucount_incr(asoc->cnt_on_all_streams); 569 strm = &asoc->strmin[control->sinfo_stream]; 570 nxt_todel = strm->last_sequence_delivered + 1; 571 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 572 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 573 } 574 SCTPDBG(SCTP_DEBUG_INDATA1, 575 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 576 (uint32_t) control->sinfo_stream, 577 (uint32_t) strm->last_sequence_delivered, 578 (uint32_t) nxt_todel); 579 if (compare_with_wrap(strm->last_sequence_delivered, 580 control->sinfo_ssn, MAX_SEQ) || 581 (strm->last_sequence_delivered == control->sinfo_ssn)) { 582 /* The incoming sseq is behind where we last delivered? */ 583 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 584 control->sinfo_ssn, strm->last_sequence_delivered); 585 /* 586 * throw it in the stream so it gets cleaned up in 587 * association destruction 588 */ 589 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 590 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 591 0, M_DONTWAIT, 1, MT_DATA); 592 if (oper) { 593 struct sctp_paramhdr *ph; 594 uint32_t *ippp; 595 596 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 597 (sizeof(uint32_t) * 3); 598 ph = mtod(oper, struct sctp_paramhdr *); 599 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 600 ph->param_length = htons(SCTP_BUF_LEN(oper)); 601 ippp = (uint32_t *) (ph + 1); 602 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 603 ippp++; 604 *ippp = control->sinfo_tsn; 605 ippp++; 606 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 607 } 608 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 609 sctp_abort_an_association(stcb->sctp_ep, stcb, 610 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 611 612 *abort_flag = 1; 613 return; 614 615 } 616 if (nxt_todel == control->sinfo_ssn) { 617 /* can be delivered right away? */ 618 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 619 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 620 } 621 queue_needed = 0; 622 asoc->size_on_all_streams -= control->length; 623 sctp_ucount_decr(asoc->cnt_on_all_streams); 624 strm->last_sequence_delivered++; 625 sctp_add_to_readq(stcb->sctp_ep, stcb, 626 control, 627 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 628 control = TAILQ_FIRST(&strm->inqueue); 629 while (control != NULL) { 630 /* all delivered */ 631 nxt_todel = strm->last_sequence_delivered + 1; 632 if (nxt_todel == control->sinfo_ssn) { 633 at = TAILQ_NEXT(control, next); 634 TAILQ_REMOVE(&strm->inqueue, control, next); 635 asoc->size_on_all_streams -= control->length; 636 sctp_ucount_decr(asoc->cnt_on_all_streams); 637 strm->last_sequence_delivered++; 638 /* 639 * We ignore the return of deliver_data here 640 * since we always can hold the chunk on the 641 * d-queue. And we have a finite number that 642 * can be delivered from the strq. 643 */ 644 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 645 sctp_log_strm_del(control, NULL, 646 SCTP_STR_LOG_FROM_IMMED_DEL); 647 } 648 sctp_add_to_readq(stcb->sctp_ep, stcb, 649 control, 650 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 651 control = at; 652 continue; 653 } 654 break; 655 } 656 } 657 if (queue_needed) { 658 /* 659 * Ok, we did not deliver this guy, find the correct place 660 * to put it on the queue. 661 */ 662 if (TAILQ_EMPTY(&strm->inqueue)) { 663 /* Empty queue */ 664 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 665 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 666 } 667 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 668 } else { 669 TAILQ_FOREACH(at, &strm->inqueue, next) { 670 if (compare_with_wrap(at->sinfo_ssn, 671 control->sinfo_ssn, MAX_SEQ)) { 672 /* 673 * one in queue is bigger than the 674 * new one, insert before this one 675 */ 676 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 677 sctp_log_strm_del(control, at, 678 SCTP_STR_LOG_FROM_INSERT_MD); 679 } 680 TAILQ_INSERT_BEFORE(at, control, next); 681 break; 682 } else if (at->sinfo_ssn == control->sinfo_ssn) { 683 /* 684 * Gak, He sent me a duplicate str 685 * seq number 686 */ 687 /* 688 * foo bar, I guess I will just free 689 * this new guy, should we abort 690 * too? FIX ME MAYBE? Or it COULD be 691 * that the SSN's have wrapped. 692 * Maybe I should compare to TSN 693 * somehow... sigh for now just blow 694 * away the chunk! 695 */ 696 697 if (control->data) 698 sctp_m_freem(control->data); 699 control->data = NULL; 700 asoc->size_on_all_streams -= control->length; 701 sctp_ucount_decr(asoc->cnt_on_all_streams); 702 if (control->whoFrom) 703 sctp_free_remote_addr(control->whoFrom); 704 control->whoFrom = NULL; 705 sctp_free_a_readq(stcb, control); 706 return; 707 } else { 708 if (TAILQ_NEXT(at, next) == NULL) { 709 /* 710 * We are at the end, insert 711 * it after this one 712 */ 713 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 714 sctp_log_strm_del(control, at, 715 SCTP_STR_LOG_FROM_INSERT_TL); 716 } 717 TAILQ_INSERT_AFTER(&strm->inqueue, 718 at, control, next); 719 break; 720 } 721 } 722 } 723 } 724 } 725 } 726 727 /* 728 * Returns two things: You get the total size of the deliverable parts of the 729 * first fragmented message on the reassembly queue. And you get a 1 back if 730 * all of the message is ready or a 0 back if the message is still incomplete 731 */ 732 static int 733 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 734 { 735 struct sctp_tmit_chunk *chk; 736 uint32_t tsn; 737 738 *t_size = 0; 739 chk = TAILQ_FIRST(&asoc->reasmqueue); 740 if (chk == NULL) { 741 /* nothing on the queue */ 742 return (0); 743 } 744 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 745 /* Not a first on the queue */ 746 return (0); 747 } 748 tsn = chk->rec.data.TSN_seq; 749 while (chk) { 750 if (tsn != chk->rec.data.TSN_seq) { 751 return (0); 752 } 753 *t_size += chk->send_size; 754 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 755 return (1); 756 } 757 tsn++; 758 chk = TAILQ_NEXT(chk, sctp_next); 759 } 760 return (0); 761 } 762 763 static void 764 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 765 { 766 struct sctp_tmit_chunk *chk; 767 uint16_t nxt_todel; 768 uint32_t tsize; 769 770 doit_again: 771 chk = TAILQ_FIRST(&asoc->reasmqueue); 772 if (chk == NULL) { 773 /* Huh? */ 774 asoc->size_on_reasm_queue = 0; 775 asoc->cnt_on_reasm_queue = 0; 776 return; 777 } 778 if (asoc->fragmented_delivery_inprogress == 0) { 779 nxt_todel = 780 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 781 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 782 (nxt_todel == chk->rec.data.stream_seq || 783 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 784 /* 785 * Yep the first one is here and its ok to deliver 786 * but should we? 787 */ 788 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 789 (tsize >= stcb->sctp_ep->partial_delivery_point))) { 790 791 /* 792 * Yes, we setup to start reception, by 793 * backing down the TSN just in case we 794 * can't deliver. If we 795 */ 796 asoc->fragmented_delivery_inprogress = 1; 797 asoc->tsn_last_delivered = 798 chk->rec.data.TSN_seq - 1; 799 asoc->str_of_pdapi = 800 chk->rec.data.stream_number; 801 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 802 asoc->pdapi_ppid = chk->rec.data.payloadtype; 803 asoc->fragment_flags = chk->rec.data.rcv_flags; 804 sctp_service_reassembly(stcb, asoc); 805 } 806 } 807 } else { 808 /* 809 * Service re-assembly will deliver stream data queued at 810 * the end of fragmented delivery.. but it wont know to go 811 * back and call itself again... we do that here with the 812 * got doit_again 813 */ 814 sctp_service_reassembly(stcb, asoc); 815 if (asoc->fragmented_delivery_inprogress == 0) { 816 /* 817 * finished our Fragmented delivery, could be more 818 * waiting? 819 */ 820 goto doit_again; 821 } 822 } 823 } 824 825 /* 826 * Dump onto the re-assembly queue, in its proper place. After dumping on the 827 * queue, see if anthing can be delivered. If so pull it off (or as much as 828 * we can. If we run out of space then we must dump what we can and set the 829 * appropriate flag to say we queued what we could. 830 */ 831 static void 832 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 833 struct sctp_tmit_chunk *chk, int *abort_flag) 834 { 835 struct mbuf *oper; 836 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 837 u_char last_flags; 838 struct sctp_tmit_chunk *at, *prev, *next; 839 840 prev = next = NULL; 841 cum_ackp1 = asoc->tsn_last_delivered + 1; 842 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 843 /* This is the first one on the queue */ 844 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 845 /* 846 * we do not check for delivery of anything when only one 847 * fragment is here 848 */ 849 asoc->size_on_reasm_queue = chk->send_size; 850 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 851 if (chk->rec.data.TSN_seq == cum_ackp1) { 852 if (asoc->fragmented_delivery_inprogress == 0 && 853 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 854 SCTP_DATA_FIRST_FRAG) { 855 /* 856 * An empty queue, no delivery inprogress, 857 * we hit the next one and it does NOT have 858 * a FIRST fragment mark. 859 */ 860 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 861 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 862 0, M_DONTWAIT, 1, MT_DATA); 863 864 if (oper) { 865 struct sctp_paramhdr *ph; 866 uint32_t *ippp; 867 868 SCTP_BUF_LEN(oper) = 869 sizeof(struct sctp_paramhdr) + 870 (sizeof(uint32_t) * 3); 871 ph = mtod(oper, struct sctp_paramhdr *); 872 ph->param_type = 873 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 874 ph->param_length = htons(SCTP_BUF_LEN(oper)); 875 ippp = (uint32_t *) (ph + 1); 876 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 877 ippp++; 878 *ippp = chk->rec.data.TSN_seq; 879 ippp++; 880 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 881 882 } 883 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 884 sctp_abort_an_association(stcb->sctp_ep, stcb, 885 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 886 *abort_flag = 1; 887 } else if (asoc->fragmented_delivery_inprogress && 888 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 889 /* 890 * We are doing a partial delivery and the 891 * NEXT chunk MUST be either the LAST or 892 * MIDDLE fragment NOT a FIRST 893 */ 894 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 895 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 896 0, M_DONTWAIT, 1, MT_DATA); 897 if (oper) { 898 struct sctp_paramhdr *ph; 899 uint32_t *ippp; 900 901 SCTP_BUF_LEN(oper) = 902 sizeof(struct sctp_paramhdr) + 903 (3 * sizeof(uint32_t)); 904 ph = mtod(oper, struct sctp_paramhdr *); 905 ph->param_type = 906 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 907 ph->param_length = htons(SCTP_BUF_LEN(oper)); 908 ippp = (uint32_t *) (ph + 1); 909 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 910 ippp++; 911 *ippp = chk->rec.data.TSN_seq; 912 ippp++; 913 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 914 } 915 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 916 sctp_abort_an_association(stcb->sctp_ep, stcb, 917 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 918 *abort_flag = 1; 919 } else if (asoc->fragmented_delivery_inprogress) { 920 /* 921 * Here we are ok with a MIDDLE or LAST 922 * piece 923 */ 924 if (chk->rec.data.stream_number != 925 asoc->str_of_pdapi) { 926 /* Got to be the right STR No */ 927 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 928 chk->rec.data.stream_number, 929 asoc->str_of_pdapi); 930 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 931 0, M_DONTWAIT, 1, MT_DATA); 932 if (oper) { 933 struct sctp_paramhdr *ph; 934 uint32_t *ippp; 935 936 SCTP_BUF_LEN(oper) = 937 sizeof(struct sctp_paramhdr) + 938 (sizeof(uint32_t) * 3); 939 ph = mtod(oper, 940 struct sctp_paramhdr *); 941 ph->param_type = 942 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 943 ph->param_length = 944 htons(SCTP_BUF_LEN(oper)); 945 ippp = (uint32_t *) (ph + 1); 946 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 947 ippp++; 948 *ippp = chk->rec.data.TSN_seq; 949 ippp++; 950 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 951 } 952 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 953 sctp_abort_an_association(stcb->sctp_ep, 954 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 955 *abort_flag = 1; 956 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 957 SCTP_DATA_UNORDERED && 958 chk->rec.data.stream_seq != 959 asoc->ssn_of_pdapi) { 960 /* Got to be the right STR Seq */ 961 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 962 chk->rec.data.stream_seq, 963 asoc->ssn_of_pdapi); 964 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 965 0, M_DONTWAIT, 1, MT_DATA); 966 if (oper) { 967 struct sctp_paramhdr *ph; 968 uint32_t *ippp; 969 970 SCTP_BUF_LEN(oper) = 971 sizeof(struct sctp_paramhdr) + 972 (3 * sizeof(uint32_t)); 973 ph = mtod(oper, 974 struct sctp_paramhdr *); 975 ph->param_type = 976 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 977 ph->param_length = 978 htons(SCTP_BUF_LEN(oper)); 979 ippp = (uint32_t *) (ph + 1); 980 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 981 ippp++; 982 *ippp = chk->rec.data.TSN_seq; 983 ippp++; 984 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 985 986 } 987 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 988 sctp_abort_an_association(stcb->sctp_ep, 989 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 990 *abort_flag = 1; 991 } 992 } 993 } 994 return; 995 } 996 /* Find its place */ 997 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 998 if (compare_with_wrap(at->rec.data.TSN_seq, 999 chk->rec.data.TSN_seq, MAX_TSN)) { 1000 /* 1001 * one in queue is bigger than the new one, insert 1002 * before this one 1003 */ 1004 /* A check */ 1005 asoc->size_on_reasm_queue += chk->send_size; 1006 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1007 next = at; 1008 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1009 break; 1010 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1011 /* Gak, He sent me a duplicate str seq number */ 1012 /* 1013 * foo bar, I guess I will just free this new guy, 1014 * should we abort too? FIX ME MAYBE? Or it COULD be 1015 * that the SSN's have wrapped. Maybe I should 1016 * compare to TSN somehow... sigh for now just blow 1017 * away the chunk! 1018 */ 1019 if (chk->data) { 1020 sctp_m_freem(chk->data); 1021 chk->data = NULL; 1022 } 1023 sctp_free_a_chunk(stcb, chk); 1024 return; 1025 } else { 1026 last_flags = at->rec.data.rcv_flags; 1027 last_tsn = at->rec.data.TSN_seq; 1028 prev = at; 1029 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1030 /* 1031 * We are at the end, insert it after this 1032 * one 1033 */ 1034 /* check it first */ 1035 asoc->size_on_reasm_queue += chk->send_size; 1036 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1037 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1038 break; 1039 } 1040 } 1041 } 1042 /* Now the audits */ 1043 if (prev) { 1044 prev_tsn = chk->rec.data.TSN_seq - 1; 1045 if (prev_tsn == prev->rec.data.TSN_seq) { 1046 /* 1047 * Ok the one I am dropping onto the end is the 1048 * NEXT. A bit of valdiation here. 1049 */ 1050 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1051 SCTP_DATA_FIRST_FRAG || 1052 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1053 SCTP_DATA_MIDDLE_FRAG) { 1054 /* 1055 * Insert chk MUST be a MIDDLE or LAST 1056 * fragment 1057 */ 1058 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1059 SCTP_DATA_FIRST_FRAG) { 1060 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 1061 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 1062 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1063 0, M_DONTWAIT, 1, MT_DATA); 1064 if (oper) { 1065 struct sctp_paramhdr *ph; 1066 uint32_t *ippp; 1067 1068 SCTP_BUF_LEN(oper) = 1069 sizeof(struct sctp_paramhdr) + 1070 (3 * sizeof(uint32_t)); 1071 ph = mtod(oper, 1072 struct sctp_paramhdr *); 1073 ph->param_type = 1074 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1075 ph->param_length = 1076 htons(SCTP_BUF_LEN(oper)); 1077 ippp = (uint32_t *) (ph + 1); 1078 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1079 ippp++; 1080 *ippp = chk->rec.data.TSN_seq; 1081 ippp++; 1082 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1083 1084 } 1085 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1086 sctp_abort_an_association(stcb->sctp_ep, 1087 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1088 *abort_flag = 1; 1089 return; 1090 } 1091 if (chk->rec.data.stream_number != 1092 prev->rec.data.stream_number) { 1093 /* 1094 * Huh, need the correct STR here, 1095 * they must be the same. 1096 */ 1097 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1098 chk->rec.data.stream_number, 1099 prev->rec.data.stream_number); 1100 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1101 0, M_DONTWAIT, 1, MT_DATA); 1102 if (oper) { 1103 struct sctp_paramhdr *ph; 1104 uint32_t *ippp; 1105 1106 SCTP_BUF_LEN(oper) = 1107 sizeof(struct sctp_paramhdr) + 1108 (3 * sizeof(uint32_t)); 1109 ph = mtod(oper, 1110 struct sctp_paramhdr *); 1111 ph->param_type = 1112 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1113 ph->param_length = 1114 htons(SCTP_BUF_LEN(oper)); 1115 ippp = (uint32_t *) (ph + 1); 1116 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1117 ippp++; 1118 *ippp = chk->rec.data.TSN_seq; 1119 ippp++; 1120 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1121 } 1122 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1123 sctp_abort_an_association(stcb->sctp_ep, 1124 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1125 1126 *abort_flag = 1; 1127 return; 1128 } 1129 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1130 chk->rec.data.stream_seq != 1131 prev->rec.data.stream_seq) { 1132 /* 1133 * Huh, need the correct STR here, 1134 * they must be the same. 1135 */ 1136 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1137 chk->rec.data.stream_seq, 1138 prev->rec.data.stream_seq); 1139 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1140 0, M_DONTWAIT, 1, MT_DATA); 1141 if (oper) { 1142 struct sctp_paramhdr *ph; 1143 uint32_t *ippp; 1144 1145 SCTP_BUF_LEN(oper) = 1146 sizeof(struct sctp_paramhdr) + 1147 (3 * sizeof(uint32_t)); 1148 ph = mtod(oper, 1149 struct sctp_paramhdr *); 1150 ph->param_type = 1151 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1152 ph->param_length = 1153 htons(SCTP_BUF_LEN(oper)); 1154 ippp = (uint32_t *) (ph + 1); 1155 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1156 ippp++; 1157 *ippp = chk->rec.data.TSN_seq; 1158 ippp++; 1159 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1160 } 1161 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1162 sctp_abort_an_association(stcb->sctp_ep, 1163 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1164 1165 *abort_flag = 1; 1166 return; 1167 } 1168 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1169 SCTP_DATA_LAST_FRAG) { 1170 /* Insert chk MUST be a FIRST */ 1171 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1172 SCTP_DATA_FIRST_FRAG) { 1173 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1174 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1175 0, M_DONTWAIT, 1, MT_DATA); 1176 if (oper) { 1177 struct sctp_paramhdr *ph; 1178 uint32_t *ippp; 1179 1180 SCTP_BUF_LEN(oper) = 1181 sizeof(struct sctp_paramhdr) + 1182 (3 * sizeof(uint32_t)); 1183 ph = mtod(oper, 1184 struct sctp_paramhdr *); 1185 ph->param_type = 1186 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1187 ph->param_length = 1188 htons(SCTP_BUF_LEN(oper)); 1189 ippp = (uint32_t *) (ph + 1); 1190 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1191 ippp++; 1192 *ippp = chk->rec.data.TSN_seq; 1193 ippp++; 1194 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1195 1196 } 1197 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1198 sctp_abort_an_association(stcb->sctp_ep, 1199 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1200 1201 *abort_flag = 1; 1202 return; 1203 } 1204 } 1205 } 1206 } 1207 if (next) { 1208 post_tsn = chk->rec.data.TSN_seq + 1; 1209 if (post_tsn == next->rec.data.TSN_seq) { 1210 /* 1211 * Ok the one I am inserting ahead of is my NEXT 1212 * one. A bit of valdiation here. 1213 */ 1214 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1215 /* Insert chk MUST be a last fragment */ 1216 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1217 != SCTP_DATA_LAST_FRAG) { 1218 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1219 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1220 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1221 0, M_DONTWAIT, 1, MT_DATA); 1222 if (oper) { 1223 struct sctp_paramhdr *ph; 1224 uint32_t *ippp; 1225 1226 SCTP_BUF_LEN(oper) = 1227 sizeof(struct sctp_paramhdr) + 1228 (3 * sizeof(uint32_t)); 1229 ph = mtod(oper, 1230 struct sctp_paramhdr *); 1231 ph->param_type = 1232 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1233 ph->param_length = 1234 htons(SCTP_BUF_LEN(oper)); 1235 ippp = (uint32_t *) (ph + 1); 1236 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1237 ippp++; 1238 *ippp = chk->rec.data.TSN_seq; 1239 ippp++; 1240 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1241 } 1242 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1243 sctp_abort_an_association(stcb->sctp_ep, 1244 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1245 1246 *abort_flag = 1; 1247 return; 1248 } 1249 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1250 SCTP_DATA_MIDDLE_FRAG || 1251 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1252 SCTP_DATA_LAST_FRAG) { 1253 /* 1254 * Insert chk CAN be MIDDLE or FIRST NOT 1255 * LAST 1256 */ 1257 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1258 SCTP_DATA_LAST_FRAG) { 1259 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1260 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1261 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1262 0, M_DONTWAIT, 1, MT_DATA); 1263 if (oper) { 1264 struct sctp_paramhdr *ph; 1265 uint32_t *ippp; 1266 1267 SCTP_BUF_LEN(oper) = 1268 sizeof(struct sctp_paramhdr) + 1269 (3 * sizeof(uint32_t)); 1270 ph = mtod(oper, 1271 struct sctp_paramhdr *); 1272 ph->param_type = 1273 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1274 ph->param_length = 1275 htons(SCTP_BUF_LEN(oper)); 1276 ippp = (uint32_t *) (ph + 1); 1277 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1278 ippp++; 1279 *ippp = chk->rec.data.TSN_seq; 1280 ippp++; 1281 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1282 1283 } 1284 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1285 sctp_abort_an_association(stcb->sctp_ep, 1286 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1287 1288 *abort_flag = 1; 1289 return; 1290 } 1291 if (chk->rec.data.stream_number != 1292 next->rec.data.stream_number) { 1293 /* 1294 * Huh, need the correct STR here, 1295 * they must be the same. 1296 */ 1297 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1298 chk->rec.data.stream_number, 1299 next->rec.data.stream_number); 1300 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1301 0, M_DONTWAIT, 1, MT_DATA); 1302 if (oper) { 1303 struct sctp_paramhdr *ph; 1304 uint32_t *ippp; 1305 1306 SCTP_BUF_LEN(oper) = 1307 sizeof(struct sctp_paramhdr) + 1308 (3 * sizeof(uint32_t)); 1309 ph = mtod(oper, 1310 struct sctp_paramhdr *); 1311 ph->param_type = 1312 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1313 ph->param_length = 1314 htons(SCTP_BUF_LEN(oper)); 1315 ippp = (uint32_t *) (ph + 1); 1316 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1317 ippp++; 1318 *ippp = chk->rec.data.TSN_seq; 1319 ippp++; 1320 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1321 1322 } 1323 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1324 sctp_abort_an_association(stcb->sctp_ep, 1325 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1326 1327 *abort_flag = 1; 1328 return; 1329 } 1330 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1331 chk->rec.data.stream_seq != 1332 next->rec.data.stream_seq) { 1333 /* 1334 * Huh, need the correct STR here, 1335 * they must be the same. 1336 */ 1337 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1338 chk->rec.data.stream_seq, 1339 next->rec.data.stream_seq); 1340 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1341 0, M_DONTWAIT, 1, MT_DATA); 1342 if (oper) { 1343 struct sctp_paramhdr *ph; 1344 uint32_t *ippp; 1345 1346 SCTP_BUF_LEN(oper) = 1347 sizeof(struct sctp_paramhdr) + 1348 (3 * sizeof(uint32_t)); 1349 ph = mtod(oper, 1350 struct sctp_paramhdr *); 1351 ph->param_type = 1352 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1353 ph->param_length = 1354 htons(SCTP_BUF_LEN(oper)); 1355 ippp = (uint32_t *) (ph + 1); 1356 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1357 ippp++; 1358 *ippp = chk->rec.data.TSN_seq; 1359 ippp++; 1360 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1361 } 1362 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1363 sctp_abort_an_association(stcb->sctp_ep, 1364 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1365 1366 *abort_flag = 1; 1367 return; 1368 } 1369 } 1370 } 1371 } 1372 /* Do we need to do some delivery? check */ 1373 sctp_deliver_reasm_check(stcb, asoc); 1374 } 1375 1376 /* 1377 * This is an unfortunate routine. It checks to make sure a evil guy is not 1378 * stuffing us full of bad packet fragments. A broken peer could also do this 1379 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1380 * :< more cycles. 1381 */ 1382 static int 1383 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1384 uint32_t TSN_seq) 1385 { 1386 struct sctp_tmit_chunk *at; 1387 uint32_t tsn_est; 1388 1389 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1390 if (compare_with_wrap(TSN_seq, 1391 at->rec.data.TSN_seq, MAX_TSN)) { 1392 /* is it one bigger? */ 1393 tsn_est = at->rec.data.TSN_seq + 1; 1394 if (tsn_est == TSN_seq) { 1395 /* yep. It better be a last then */ 1396 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1397 SCTP_DATA_LAST_FRAG) { 1398 /* 1399 * Ok this guy belongs next to a guy 1400 * that is NOT last, it should be a 1401 * middle/last, not a complete 1402 * chunk. 1403 */ 1404 return (1); 1405 } else { 1406 /* 1407 * This guy is ok since its a LAST 1408 * and the new chunk is a fully 1409 * self- contained one. 1410 */ 1411 return (0); 1412 } 1413 } 1414 } else if (TSN_seq == at->rec.data.TSN_seq) { 1415 /* Software error since I have a dup? */ 1416 return (1); 1417 } else { 1418 /* 1419 * Ok, 'at' is larger than new chunk but does it 1420 * need to be right before it. 1421 */ 1422 tsn_est = TSN_seq + 1; 1423 if (tsn_est == at->rec.data.TSN_seq) { 1424 /* Yep, It better be a first */ 1425 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1426 SCTP_DATA_FIRST_FRAG) { 1427 return (1); 1428 } else { 1429 return (0); 1430 } 1431 } 1432 } 1433 } 1434 return (0); 1435 } 1436 1437 1438 static int 1439 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1440 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1441 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1442 int *break_flag, int last_chunk) 1443 { 1444 /* Process a data chunk */ 1445 /* struct sctp_tmit_chunk *chk; */ 1446 struct sctp_tmit_chunk *chk; 1447 uint32_t tsn, gap; 1448 struct mbuf *dmbuf; 1449 int indx, the_len; 1450 int need_reasm_check = 0; 1451 uint16_t strmno, strmseq; 1452 struct mbuf *oper; 1453 struct sctp_queued_to_read *control; 1454 int ordered; 1455 uint32_t protocol_id; 1456 uint8_t chunk_flags; 1457 struct sctp_stream_reset_list *liste; 1458 1459 chk = NULL; 1460 tsn = ntohl(ch->dp.tsn); 1461 chunk_flags = ch->ch.chunk_flags; 1462 protocol_id = ch->dp.protocol_id; 1463 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0); 1464 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 1465 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1466 } 1467 if (stcb == NULL) { 1468 return (0); 1469 } 1470 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1471 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1472 asoc->cumulative_tsn == tsn) { 1473 /* It is a duplicate */ 1474 SCTP_STAT_INCR(sctps_recvdupdata); 1475 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1476 /* Record a dup for the next outbound sack */ 1477 asoc->dup_tsns[asoc->numduptsns] = tsn; 1478 asoc->numduptsns++; 1479 } 1480 return (0); 1481 } 1482 /* Calculate the number of TSN's between the base and this TSN */ 1483 if (tsn >= asoc->mapping_array_base_tsn) { 1484 gap = tsn - asoc->mapping_array_base_tsn; 1485 } else { 1486 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1487 } 1488 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1489 /* Can't hold the bit in the mapping at max array, toss it */ 1490 return (0); 1491 } 1492 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1493 SCTP_TCB_LOCK_ASSERT(stcb); 1494 if (sctp_expand_mapping_array(asoc, gap)) { 1495 /* Can't expand, drop it */ 1496 return (0); 1497 } 1498 } 1499 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1500 *high_tsn = tsn; 1501 } 1502 /* See if we have received this one already */ 1503 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1504 SCTP_STAT_INCR(sctps_recvdupdata); 1505 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1506 /* Record a dup for the next outbound sack */ 1507 asoc->dup_tsns[asoc->numduptsns] = tsn; 1508 asoc->numduptsns++; 1509 } 1510 asoc->send_sack = 1; 1511 return (0); 1512 } 1513 /* 1514 * Check to see about the GONE flag, duplicates would cause a sack 1515 * to be sent up above 1516 */ 1517 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1518 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1519 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1520 ) { 1521 /* 1522 * wait a minute, this guy is gone, there is no longer a 1523 * receiver. Send peer an ABORT! 1524 */ 1525 struct mbuf *op_err; 1526 1527 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1528 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED); 1529 *abort_flag = 1; 1530 return (0); 1531 } 1532 /* 1533 * Now before going further we see if there is room. If NOT then we 1534 * MAY let one through only IF this TSN is the one we are waiting 1535 * for on a partial delivery API. 1536 */ 1537 1538 /* now do the tests */ 1539 if (((asoc->cnt_on_all_streams + 1540 asoc->cnt_on_reasm_queue + 1541 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1542 (((int)asoc->my_rwnd) <= 0)) { 1543 /* 1544 * When we have NO room in the rwnd we check to make sure 1545 * the reader is doing its job... 1546 */ 1547 if (stcb->sctp_socket->so_rcv.sb_cc) { 1548 /* some to read, wake-up */ 1549 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1550 struct socket *so; 1551 1552 so = SCTP_INP_SO(stcb->sctp_ep); 1553 atomic_add_int(&stcb->asoc.refcnt, 1); 1554 SCTP_TCB_UNLOCK(stcb); 1555 SCTP_SOCKET_LOCK(so, 1); 1556 SCTP_TCB_LOCK(stcb); 1557 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1558 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1559 /* assoc was freed while we were unlocked */ 1560 SCTP_SOCKET_UNLOCK(so, 1); 1561 return (0); 1562 } 1563 #endif 1564 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1565 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1566 SCTP_SOCKET_UNLOCK(so, 1); 1567 #endif 1568 } 1569 /* now is it in the mapping array of what we have accepted? */ 1570 if (compare_with_wrap(tsn, 1571 asoc->highest_tsn_inside_map, MAX_TSN)) { 1572 1573 /* Nope not in the valid range dump it */ 1574 SCTPDBG(SCTP_DEBUG_INDATA1, "My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n", 1575 (u_long)tsn, (u_long)asoc->my_rwnd, 1576 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)); 1577 sctp_set_rwnd(stcb, asoc); 1578 if ((asoc->cnt_on_all_streams + 1579 asoc->cnt_on_reasm_queue + 1580 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1581 SCTP_STAT_INCR(sctps_datadropchklmt); 1582 } else { 1583 SCTP_STAT_INCR(sctps_datadroprwnd); 1584 } 1585 indx = *break_flag; 1586 *break_flag = 1; 1587 return (0); 1588 } 1589 } 1590 strmno = ntohs(ch->dp.stream_id); 1591 if (strmno >= asoc->streamincnt) { 1592 struct sctp_paramhdr *phdr; 1593 struct mbuf *mb; 1594 1595 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1596 0, M_DONTWAIT, 1, MT_DATA); 1597 if (mb != NULL) { 1598 /* add some space up front so prepend will work well */ 1599 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1600 phdr = mtod(mb, struct sctp_paramhdr *); 1601 /* 1602 * Error causes are just param's and this one has 1603 * two back to back phdr, one with the error type 1604 * and size, the other with the streamid and a rsvd 1605 */ 1606 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1607 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1608 phdr->param_length = 1609 htons(sizeof(struct sctp_paramhdr) * 2); 1610 phdr++; 1611 /* We insert the stream in the type field */ 1612 phdr->param_type = ch->dp.stream_id; 1613 /* And set the length to 0 for the rsvd field */ 1614 phdr->param_length = 0; 1615 sctp_queue_op_err(stcb, mb); 1616 } 1617 SCTP_STAT_INCR(sctps_badsid); 1618 SCTP_TCB_LOCK_ASSERT(stcb); 1619 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1620 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 1621 /* we have a new high score */ 1622 asoc->highest_tsn_inside_map = tsn; 1623 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 1624 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 1625 } 1626 } 1627 if (tsn == (asoc->cumulative_tsn + 1)) { 1628 /* Update cum-ack */ 1629 asoc->cumulative_tsn = tsn; 1630 } 1631 return (0); 1632 } 1633 /* 1634 * Before we continue lets validate that we are not being fooled by 1635 * an evil attacker. We can only have 4k chunks based on our TSN 1636 * spread allowed by the mapping array 512 * 8 bits, so there is no 1637 * way our stream sequence numbers could have wrapped. We of course 1638 * only validate the FIRST fragment so the bit must be set. 1639 */ 1640 strmseq = ntohs(ch->dp.stream_sequence); 1641 #ifdef SCTP_ASOCLOG_OF_TSNS 1642 SCTP_TCB_LOCK_ASSERT(stcb); 1643 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1644 asoc->tsn_in_at = 0; 1645 asoc->tsn_in_wrapped = 1; 1646 } 1647 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1648 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1649 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1650 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1651 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1652 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1653 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1654 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1655 asoc->tsn_in_at++; 1656 #endif 1657 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1658 (TAILQ_EMPTY(&asoc->resetHead)) && 1659 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1660 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1661 strmseq, MAX_SEQ) || 1662 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1663 /* The incoming sseq is behind where we last delivered? */ 1664 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1665 strmseq, asoc->strmin[strmno].last_sequence_delivered); 1666 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1667 0, M_DONTWAIT, 1, MT_DATA); 1668 if (oper) { 1669 struct sctp_paramhdr *ph; 1670 uint32_t *ippp; 1671 1672 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1673 (3 * sizeof(uint32_t)); 1674 ph = mtod(oper, struct sctp_paramhdr *); 1675 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1676 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1677 ippp = (uint32_t *) (ph + 1); 1678 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1679 ippp++; 1680 *ippp = tsn; 1681 ippp++; 1682 *ippp = ((strmno << 16) | strmseq); 1683 1684 } 1685 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1686 sctp_abort_an_association(stcb->sctp_ep, stcb, 1687 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1688 *abort_flag = 1; 1689 return (0); 1690 } 1691 /************************************ 1692 * From here down we may find ch-> invalid 1693 * so its a good idea NOT to use it. 1694 *************************************/ 1695 1696 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1697 if (last_chunk == 0) { 1698 dmbuf = SCTP_M_COPYM(*m, 1699 (offset + sizeof(struct sctp_data_chunk)), 1700 the_len, M_DONTWAIT); 1701 #ifdef SCTP_MBUF_LOGGING 1702 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 1703 struct mbuf *mat; 1704 1705 mat = dmbuf; 1706 while (mat) { 1707 if (SCTP_BUF_IS_EXTENDED(mat)) { 1708 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1709 } 1710 mat = SCTP_BUF_NEXT(mat); 1711 } 1712 } 1713 #endif 1714 } else { 1715 /* We can steal the last chunk */ 1716 int l_len; 1717 1718 dmbuf = *m; 1719 /* lop off the top part */ 1720 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1721 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1722 l_len = SCTP_BUF_LEN(dmbuf); 1723 } else { 1724 /* 1725 * need to count up the size hopefully does not hit 1726 * this to often :-0 1727 */ 1728 struct mbuf *lat; 1729 1730 l_len = 0; 1731 lat = dmbuf; 1732 while (lat) { 1733 l_len += SCTP_BUF_LEN(lat); 1734 lat = SCTP_BUF_NEXT(lat); 1735 } 1736 } 1737 if (l_len > the_len) { 1738 /* Trim the end round bytes off too */ 1739 m_adj(dmbuf, -(l_len - the_len)); 1740 } 1741 } 1742 if (dmbuf == NULL) { 1743 SCTP_STAT_INCR(sctps_nomem); 1744 return (0); 1745 } 1746 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1747 asoc->fragmented_delivery_inprogress == 0 && 1748 TAILQ_EMPTY(&asoc->resetHead) && 1749 ((ordered == 0) || 1750 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1751 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1752 /* Candidate for express delivery */ 1753 /* 1754 * Its not fragmented, No PD-API is up, Nothing in the 1755 * delivery queue, Its un-ordered OR ordered and the next to 1756 * deliver AND nothing else is stuck on the stream queue, 1757 * And there is room for it in the socket buffer. Lets just 1758 * stuff it up the buffer.... 1759 */ 1760 1761 /* It would be nice to avoid this copy if we could :< */ 1762 sctp_alloc_a_readq(stcb, control); 1763 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1764 protocol_id, 1765 stcb->asoc.context, 1766 strmno, strmseq, 1767 chunk_flags, 1768 dmbuf); 1769 if (control == NULL) { 1770 goto failed_express_del; 1771 } 1772 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 1773 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1774 /* for ordered, bump what we delivered */ 1775 asoc->strmin[strmno].last_sequence_delivered++; 1776 } 1777 SCTP_STAT_INCR(sctps_recvexpress); 1778 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 1779 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1780 SCTP_STR_LOG_FROM_EXPRS_DEL); 1781 } 1782 control = NULL; 1783 goto finish_express_del; 1784 } 1785 failed_express_del: 1786 /* If we reach here this is a new chunk */ 1787 chk = NULL; 1788 control = NULL; 1789 /* Express for fragmented delivery? */ 1790 if ((asoc->fragmented_delivery_inprogress) && 1791 (stcb->asoc.control_pdapi) && 1792 (asoc->str_of_pdapi == strmno) && 1793 (asoc->ssn_of_pdapi == strmseq) 1794 ) { 1795 control = stcb->asoc.control_pdapi; 1796 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1797 /* Can't be another first? */ 1798 goto failed_pdapi_express_del; 1799 } 1800 if (tsn == (control->sinfo_tsn + 1)) { 1801 /* Yep, we can add it on */ 1802 int end = 0; 1803 uint32_t cumack; 1804 1805 if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1806 end = 1; 1807 } 1808 cumack = asoc->cumulative_tsn; 1809 if ((cumack + 1) == tsn) 1810 cumack = tsn; 1811 1812 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1813 tsn, 1814 &stcb->sctp_socket->so_rcv)) { 1815 SCTP_PRINTF("Append fails end:%d\n", end); 1816 goto failed_pdapi_express_del; 1817 } 1818 SCTP_STAT_INCR(sctps_recvexpressm); 1819 control->sinfo_tsn = tsn; 1820 asoc->tsn_last_delivered = tsn; 1821 asoc->fragment_flags = chunk_flags; 1822 asoc->tsn_of_pdapi_last_delivered = tsn; 1823 asoc->last_flags_delivered = chunk_flags; 1824 asoc->last_strm_seq_delivered = strmseq; 1825 asoc->last_strm_no_delivered = strmno; 1826 if (end) { 1827 /* clean up the flags and such */ 1828 asoc->fragmented_delivery_inprogress = 0; 1829 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1830 asoc->strmin[strmno].last_sequence_delivered++; 1831 } 1832 stcb->asoc.control_pdapi = NULL; 1833 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1834 /* 1835 * There could be another message 1836 * ready 1837 */ 1838 need_reasm_check = 1; 1839 } 1840 } 1841 control = NULL; 1842 goto finish_express_del; 1843 } 1844 } 1845 failed_pdapi_express_del: 1846 control = NULL; 1847 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1848 sctp_alloc_a_chunk(stcb, chk); 1849 if (chk == NULL) { 1850 /* No memory so we drop the chunk */ 1851 SCTP_STAT_INCR(sctps_nomem); 1852 if (last_chunk == 0) { 1853 /* we copied it, free the copy */ 1854 sctp_m_freem(dmbuf); 1855 } 1856 return (0); 1857 } 1858 chk->rec.data.TSN_seq = tsn; 1859 chk->no_fr_allowed = 0; 1860 chk->rec.data.stream_seq = strmseq; 1861 chk->rec.data.stream_number = strmno; 1862 chk->rec.data.payloadtype = protocol_id; 1863 chk->rec.data.context = stcb->asoc.context; 1864 chk->rec.data.doing_fast_retransmit = 0; 1865 chk->rec.data.rcv_flags = chunk_flags; 1866 chk->asoc = asoc; 1867 chk->send_size = the_len; 1868 chk->whoTo = net; 1869 atomic_add_int(&net->ref_count, 1); 1870 chk->data = dmbuf; 1871 } else { 1872 sctp_alloc_a_readq(stcb, control); 1873 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1874 protocol_id, 1875 stcb->asoc.context, 1876 strmno, strmseq, 1877 chunk_flags, 1878 dmbuf); 1879 if (control == NULL) { 1880 /* No memory so we drop the chunk */ 1881 SCTP_STAT_INCR(sctps_nomem); 1882 if (last_chunk == 0) { 1883 /* we copied it, free the copy */ 1884 sctp_m_freem(dmbuf); 1885 } 1886 return (0); 1887 } 1888 control->length = the_len; 1889 } 1890 1891 /* Mark it as received */ 1892 /* Now queue it where it belongs */ 1893 if (control != NULL) { 1894 /* First a sanity check */ 1895 if (asoc->fragmented_delivery_inprogress) { 1896 /* 1897 * Ok, we have a fragmented delivery in progress if 1898 * this chunk is next to deliver OR belongs in our 1899 * view to the reassembly, the peer is evil or 1900 * broken. 1901 */ 1902 uint32_t estimate_tsn; 1903 1904 estimate_tsn = asoc->tsn_last_delivered + 1; 1905 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1906 (estimate_tsn == control->sinfo_tsn)) { 1907 /* Evil/Broke peer */ 1908 sctp_m_freem(control->data); 1909 control->data = NULL; 1910 if (control->whoFrom) { 1911 sctp_free_remote_addr(control->whoFrom); 1912 control->whoFrom = NULL; 1913 } 1914 sctp_free_a_readq(stcb, control); 1915 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1916 0, M_DONTWAIT, 1, MT_DATA); 1917 if (oper) { 1918 struct sctp_paramhdr *ph; 1919 uint32_t *ippp; 1920 1921 SCTP_BUF_LEN(oper) = 1922 sizeof(struct sctp_paramhdr) + 1923 (3 * sizeof(uint32_t)); 1924 ph = mtod(oper, struct sctp_paramhdr *); 1925 ph->param_type = 1926 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1927 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1928 ippp = (uint32_t *) (ph + 1); 1929 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1930 ippp++; 1931 *ippp = tsn; 1932 ippp++; 1933 *ippp = ((strmno << 16) | strmseq); 1934 } 1935 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1936 sctp_abort_an_association(stcb->sctp_ep, stcb, 1937 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1938 1939 *abort_flag = 1; 1940 return (0); 1941 } else { 1942 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1943 sctp_m_freem(control->data); 1944 control->data = NULL; 1945 if (control->whoFrom) { 1946 sctp_free_remote_addr(control->whoFrom); 1947 control->whoFrom = NULL; 1948 } 1949 sctp_free_a_readq(stcb, control); 1950 1951 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1952 0, M_DONTWAIT, 1, MT_DATA); 1953 if (oper) { 1954 struct sctp_paramhdr *ph; 1955 uint32_t *ippp; 1956 1957 SCTP_BUF_LEN(oper) = 1958 sizeof(struct sctp_paramhdr) + 1959 (3 * sizeof(uint32_t)); 1960 ph = mtod(oper, 1961 struct sctp_paramhdr *); 1962 ph->param_type = 1963 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1964 ph->param_length = 1965 htons(SCTP_BUF_LEN(oper)); 1966 ippp = (uint32_t *) (ph + 1); 1967 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 1968 ippp++; 1969 *ippp = tsn; 1970 ippp++; 1971 *ippp = ((strmno << 16) | strmseq); 1972 } 1973 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1974 sctp_abort_an_association(stcb->sctp_ep, 1975 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1976 1977 *abort_flag = 1; 1978 return (0); 1979 } 1980 } 1981 } else { 1982 /* No PDAPI running */ 1983 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1984 /* 1985 * Reassembly queue is NOT empty validate 1986 * that this tsn does not need to be in 1987 * reasembly queue. If it does then our peer 1988 * is broken or evil. 1989 */ 1990 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1991 sctp_m_freem(control->data); 1992 control->data = NULL; 1993 if (control->whoFrom) { 1994 sctp_free_remote_addr(control->whoFrom); 1995 control->whoFrom = NULL; 1996 } 1997 sctp_free_a_readq(stcb, control); 1998 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1999 0, M_DONTWAIT, 1, MT_DATA); 2000 if (oper) { 2001 struct sctp_paramhdr *ph; 2002 uint32_t *ippp; 2003 2004 SCTP_BUF_LEN(oper) = 2005 sizeof(struct sctp_paramhdr) + 2006 (3 * sizeof(uint32_t)); 2007 ph = mtod(oper, 2008 struct sctp_paramhdr *); 2009 ph->param_type = 2010 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2011 ph->param_length = 2012 htons(SCTP_BUF_LEN(oper)); 2013 ippp = (uint32_t *) (ph + 1); 2014 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2015 ippp++; 2016 *ippp = tsn; 2017 ippp++; 2018 *ippp = ((strmno << 16) | strmseq); 2019 } 2020 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 2021 sctp_abort_an_association(stcb->sctp_ep, 2022 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 2023 2024 *abort_flag = 1; 2025 return (0); 2026 } 2027 } 2028 } 2029 /* ok, if we reach here we have passed the sanity checks */ 2030 if (chunk_flags & SCTP_DATA_UNORDERED) { 2031 /* queue directly into socket buffer */ 2032 sctp_add_to_readq(stcb->sctp_ep, stcb, 2033 control, 2034 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 2035 } else { 2036 /* 2037 * Special check for when streams are resetting. We 2038 * could be more smart about this and check the 2039 * actual stream to see if it is not being reset.. 2040 * that way we would not create a HOLB when amongst 2041 * streams being reset and those not being reset. 2042 * 2043 * We take complete messages that have a stream reset 2044 * intervening (aka the TSN is after where our 2045 * cum-ack needs to be) off and put them on a 2046 * pending_reply_queue. The reassembly ones we do 2047 * not have to worry about since they are all sorted 2048 * and proceessed by TSN order. It is only the 2049 * singletons I must worry about. 2050 */ 2051 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2052 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN))) 2053 ) { 2054 /* 2055 * yep its past where we need to reset... go 2056 * ahead and queue it. 2057 */ 2058 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2059 /* first one on */ 2060 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2061 } else { 2062 struct sctp_queued_to_read *ctlOn; 2063 unsigned char inserted = 0; 2064 2065 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2066 while (ctlOn) { 2067 if (compare_with_wrap(control->sinfo_tsn, 2068 ctlOn->sinfo_tsn, MAX_TSN)) { 2069 ctlOn = TAILQ_NEXT(ctlOn, next); 2070 } else { 2071 /* found it */ 2072 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2073 inserted = 1; 2074 break; 2075 } 2076 } 2077 if (inserted == 0) { 2078 /* 2079 * must be put at end, use 2080 * prevP (all setup from 2081 * loop) to setup nextP. 2082 */ 2083 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2084 } 2085 } 2086 } else { 2087 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2088 if (*abort_flag) { 2089 return (0); 2090 } 2091 } 2092 } 2093 } else { 2094 /* Into the re-assembly queue */ 2095 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2096 if (*abort_flag) { 2097 /* 2098 * the assoc is now gone and chk was put onto the 2099 * reasm queue, which has all been freed. 2100 */ 2101 *m = NULL; 2102 return (0); 2103 } 2104 } 2105 finish_express_del: 2106 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2107 /* we have a new high score */ 2108 asoc->highest_tsn_inside_map = tsn; 2109 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2110 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2111 } 2112 } 2113 if (tsn == (asoc->cumulative_tsn + 1)) { 2114 /* Update cum-ack */ 2115 asoc->cumulative_tsn = tsn; 2116 } 2117 if (last_chunk) { 2118 *m = NULL; 2119 } 2120 if (ordered) { 2121 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2122 } else { 2123 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2124 } 2125 SCTP_STAT_INCR(sctps_recvdata); 2126 /* Set it present please */ 2127 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 2128 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2129 } 2130 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2131 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2132 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2133 } 2134 SCTP_TCB_LOCK_ASSERT(stcb); 2135 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2136 /* check the special flag for stream resets */ 2137 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2138 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2139 (asoc->cumulative_tsn == liste->tsn)) 2140 ) { 2141 /* 2142 * we have finished working through the backlogged TSN's now 2143 * time to reset streams. 1: call reset function. 2: free 2144 * pending_reply space 3: distribute any chunks in 2145 * pending_reply_queue. 2146 */ 2147 struct sctp_queued_to_read *ctl; 2148 2149 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2150 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2151 SCTP_FREE(liste, SCTP_M_STRESET); 2152 /* sa_ignore FREED_MEMORY */ 2153 liste = TAILQ_FIRST(&asoc->resetHead); 2154 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2155 if (ctl && (liste == NULL)) { 2156 /* All can be removed */ 2157 while (ctl) { 2158 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2159 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2160 if (*abort_flag) { 2161 return (0); 2162 } 2163 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2164 } 2165 } else if (ctl) { 2166 /* more than one in queue */ 2167 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2168 /* 2169 * if ctl->sinfo_tsn is <= liste->tsn we can 2170 * process it which is the NOT of 2171 * ctl->sinfo_tsn > liste->tsn 2172 */ 2173 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2174 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2175 if (*abort_flag) { 2176 return (0); 2177 } 2178 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2179 } 2180 } 2181 /* 2182 * Now service re-assembly to pick up anything that has been 2183 * held on reassembly queue? 2184 */ 2185 sctp_deliver_reasm_check(stcb, asoc); 2186 need_reasm_check = 0; 2187 } 2188 if (need_reasm_check) { 2189 /* Another one waits ? */ 2190 sctp_deliver_reasm_check(stcb, asoc); 2191 } 2192 return (1); 2193 } 2194 2195 int8_t sctp_map_lookup_tab[256] = { 2196 -1, 0, -1, 1, -1, 0, -1, 2, 2197 -1, 0, -1, 1, -1, 0, -1, 3, 2198 -1, 0, -1, 1, -1, 0, -1, 2, 2199 -1, 0, -1, 1, -1, 0, -1, 4, 2200 -1, 0, -1, 1, -1, 0, -1, 2, 2201 -1, 0, -1, 1, -1, 0, -1, 3, 2202 -1, 0, -1, 1, -1, 0, -1, 2, 2203 -1, 0, -1, 1, -1, 0, -1, 5, 2204 -1, 0, -1, 1, -1, 0, -1, 2, 2205 -1, 0, -1, 1, -1, 0, -1, 3, 2206 -1, 0, -1, 1, -1, 0, -1, 2, 2207 -1, 0, -1, 1, -1, 0, -1, 4, 2208 -1, 0, -1, 1, -1, 0, -1, 2, 2209 -1, 0, -1, 1, -1, 0, -1, 3, 2210 -1, 0, -1, 1, -1, 0, -1, 2, 2211 -1, 0, -1, 1, -1, 0, -1, 6, 2212 -1, 0, -1, 1, -1, 0, -1, 2, 2213 -1, 0, -1, 1, -1, 0, -1, 3, 2214 -1, 0, -1, 1, -1, 0, -1, 2, 2215 -1, 0, -1, 1, -1, 0, -1, 4, 2216 -1, 0, -1, 1, -1, 0, -1, 2, 2217 -1, 0, -1, 1, -1, 0, -1, 3, 2218 -1, 0, -1, 1, -1, 0, -1, 2, 2219 -1, 0, -1, 1, -1, 0, -1, 5, 2220 -1, 0, -1, 1, -1, 0, -1, 2, 2221 -1, 0, -1, 1, -1, 0, -1, 3, 2222 -1, 0, -1, 1, -1, 0, -1, 2, 2223 -1, 0, -1, 1, -1, 0, -1, 4, 2224 -1, 0, -1, 1, -1, 0, -1, 2, 2225 -1, 0, -1, 1, -1, 0, -1, 3, 2226 -1, 0, -1, 1, -1, 0, -1, 2, 2227 -1, 0, -1, 1, -1, 0, -1, 7, 2228 }; 2229 2230 2231 void 2232 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2233 { 2234 /* 2235 * Now we also need to check the mapping array in a couple of ways. 2236 * 1) Did we move the cum-ack point? 2237 */ 2238 struct sctp_association *asoc; 2239 int i, at; 2240 int last_all_ones = 0; 2241 int slide_from, slide_end, lgap, distance; 2242 uint32_t old_cumack, old_base, old_highest; 2243 unsigned char aux_array[64]; 2244 2245 2246 asoc = &stcb->asoc; 2247 at = 0; 2248 2249 old_cumack = asoc->cumulative_tsn; 2250 old_base = asoc->mapping_array_base_tsn; 2251 old_highest = asoc->highest_tsn_inside_map; 2252 if (asoc->mapping_array_size < 64) 2253 memcpy(aux_array, asoc->mapping_array, 2254 asoc->mapping_array_size); 2255 else 2256 memcpy(aux_array, asoc->mapping_array, 64); 2257 2258 /* 2259 * We could probably improve this a small bit by calculating the 2260 * offset of the current cum-ack as the starting point. 2261 */ 2262 at = 0; 2263 for (i = 0; i < stcb->asoc.mapping_array_size; i++) { 2264 2265 if (asoc->mapping_array[i] == 0xff) { 2266 at += 8; 2267 last_all_ones = 1; 2268 } else { 2269 /* there is a 0 bit */ 2270 at += sctp_map_lookup_tab[asoc->mapping_array[i]]; 2271 last_all_ones = 0; 2272 break; 2273 } 2274 } 2275 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones); 2276 /* at is one off, since in the table a embedded -1 is present */ 2277 at++; 2278 2279 if (compare_with_wrap(asoc->cumulative_tsn, 2280 asoc->highest_tsn_inside_map, 2281 MAX_TSN)) { 2282 #ifdef INVARIANTS 2283 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2284 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2285 #else 2286 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2287 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2288 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2289 #endif 2290 } 2291 if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) { 2292 /* The complete array was completed by a single FR */ 2293 /* higest becomes the cum-ack */ 2294 int clr; 2295 2296 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2297 /* clear the array */ 2298 clr = (at >> 3) + 1; 2299 if (clr > asoc->mapping_array_size) { 2300 clr = asoc->mapping_array_size; 2301 } 2302 memset(asoc->mapping_array, 0, clr); 2303 /* base becomes one ahead of the cum-ack */ 2304 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2305 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2306 sctp_log_map(old_base, old_cumack, old_highest, 2307 SCTP_MAP_PREPARE_SLIDE); 2308 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2309 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2310 } 2311 } else if (at >= 8) { 2312 /* we can slide the mapping array down */ 2313 /* Calculate the new byte postion we can move down */ 2314 slide_from = at >> 3; 2315 /* 2316 * now calculate the ceiling of the move using our highest 2317 * TSN value 2318 */ 2319 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2320 lgap = asoc->highest_tsn_inside_map - 2321 asoc->mapping_array_base_tsn; 2322 } else { 2323 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2324 asoc->highest_tsn_inside_map + 1; 2325 } 2326 slide_end = lgap >> 3; 2327 if (slide_end < slide_from) { 2328 panic("impossible slide"); 2329 } 2330 distance = (slide_end - slide_from) + 1; 2331 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2332 sctp_log_map(old_base, old_cumack, old_highest, 2333 SCTP_MAP_PREPARE_SLIDE); 2334 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2335 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2336 } 2337 if (distance + slide_from > asoc->mapping_array_size || 2338 distance < 0) { 2339 /* 2340 * Here we do NOT slide forward the array so that 2341 * hopefully when more data comes in to fill it up 2342 * we will be able to slide it forward. Really I 2343 * don't think this should happen :-0 2344 */ 2345 2346 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2347 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2348 (uint32_t) asoc->mapping_array_size, 2349 SCTP_MAP_SLIDE_NONE); 2350 } 2351 } else { 2352 int ii; 2353 2354 for (ii = 0; ii < distance; ii++) { 2355 asoc->mapping_array[ii] = 2356 asoc->mapping_array[slide_from + ii]; 2357 } 2358 for (ii = distance; ii <= slide_end; ii++) { 2359 asoc->mapping_array[ii] = 0; 2360 } 2361 asoc->mapping_array_base_tsn += (slide_from << 3); 2362 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2363 sctp_log_map(asoc->mapping_array_base_tsn, 2364 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2365 SCTP_MAP_SLIDE_RESULT); 2366 } 2367 } 2368 } 2369 /* 2370 * Now we need to see if we need to queue a sack or just start the 2371 * timer (if allowed). 2372 */ 2373 if (ok_to_sack) { 2374 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2375 /* 2376 * Ok special case, in SHUTDOWN-SENT case. here we 2377 * maker sure SACK timer is off and instead send a 2378 * SHUTDOWN and a SACK 2379 */ 2380 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2381 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2382 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2383 } 2384 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2385 sctp_send_sack(stcb); 2386 } else { 2387 int is_a_gap; 2388 2389 /* is there a gap now ? */ 2390 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2391 stcb->asoc.cumulative_tsn, MAX_TSN); 2392 2393 /* 2394 * CMT DAC algorithm: increase number of packets 2395 * received since last ack 2396 */ 2397 stcb->asoc.cmt_dac_pkts_rcvd++; 2398 2399 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2400 * SACK */ 2401 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2402 * longer is one */ 2403 (stcb->asoc.numduptsns) || /* we have dup's */ 2404 (is_a_gap) || /* is still a gap */ 2405 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2406 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2407 ) { 2408 2409 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) && 2410 (stcb->asoc.send_sack == 0) && 2411 (stcb->asoc.numduptsns == 0) && 2412 (stcb->asoc.delayed_ack) && 2413 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2414 2415 /* 2416 * CMT DAC algorithm: With CMT, 2417 * delay acks even in the face of 2418 * 2419 * reordering. Therefore, if acks that 2420 * do not have to be sent because of 2421 * the above reasons, will be 2422 * delayed. That is, acks that would 2423 * have been sent due to gap reports 2424 * will be delayed with DAC. Start 2425 * the delayed ack timer. 2426 */ 2427 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2428 stcb->sctp_ep, stcb, NULL); 2429 } else { 2430 /* 2431 * Ok we must build a SACK since the 2432 * timer is pending, we got our 2433 * first packet OR there are gaps or 2434 * duplicates. 2435 */ 2436 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2437 sctp_send_sack(stcb); 2438 } 2439 } else { 2440 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2441 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2442 stcb->sctp_ep, stcb, NULL); 2443 } 2444 } 2445 } 2446 } 2447 } 2448 2449 void 2450 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2451 { 2452 struct sctp_tmit_chunk *chk; 2453 uint32_t tsize; 2454 uint16_t nxt_todel; 2455 2456 if (asoc->fragmented_delivery_inprogress) { 2457 sctp_service_reassembly(stcb, asoc); 2458 } 2459 /* Can we proceed further, i.e. the PD-API is complete */ 2460 if (asoc->fragmented_delivery_inprogress) { 2461 /* no */ 2462 return; 2463 } 2464 /* 2465 * Now is there some other chunk I can deliver from the reassembly 2466 * queue. 2467 */ 2468 doit_again: 2469 chk = TAILQ_FIRST(&asoc->reasmqueue); 2470 if (chk == NULL) { 2471 asoc->size_on_reasm_queue = 0; 2472 asoc->cnt_on_reasm_queue = 0; 2473 return; 2474 } 2475 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2476 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2477 ((nxt_todel == chk->rec.data.stream_seq) || 2478 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2479 /* 2480 * Yep the first one is here. We setup to start reception, 2481 * by backing down the TSN just in case we can't deliver. 2482 */ 2483 2484 /* 2485 * Before we start though either all of the message should 2486 * be here or 1/4 the socket buffer max or nothing on the 2487 * delivery queue and something can be delivered. 2488 */ 2489 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2490 (tsize >= stcb->sctp_ep->partial_delivery_point))) { 2491 asoc->fragmented_delivery_inprogress = 1; 2492 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2493 asoc->str_of_pdapi = chk->rec.data.stream_number; 2494 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2495 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2496 asoc->fragment_flags = chk->rec.data.rcv_flags; 2497 sctp_service_reassembly(stcb, asoc); 2498 if (asoc->fragmented_delivery_inprogress == 0) { 2499 goto doit_again; 2500 } 2501 } 2502 } 2503 } 2504 2505 int 2506 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2507 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2508 struct sctp_nets *net, uint32_t * high_tsn) 2509 { 2510 struct sctp_data_chunk *ch, chunk_buf; 2511 struct sctp_association *asoc; 2512 int num_chunks = 0; /* number of control chunks processed */ 2513 int stop_proc = 0; 2514 int chk_length, break_flag, last_chunk; 2515 int abort_flag = 0, was_a_gap = 0; 2516 struct mbuf *m; 2517 2518 /* set the rwnd */ 2519 sctp_set_rwnd(stcb, &stcb->asoc); 2520 2521 m = *mm; 2522 SCTP_TCB_LOCK_ASSERT(stcb); 2523 asoc = &stcb->asoc; 2524 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2525 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2526 /* there was a gap before this data was processed */ 2527 was_a_gap = 1; 2528 } 2529 /* 2530 * setup where we got the last DATA packet from for any SACK that 2531 * may need to go out. Don't bump the net. This is done ONLY when a 2532 * chunk is assigned. 2533 */ 2534 asoc->last_data_chunk_from = net; 2535 2536 /*- 2537 * Now before we proceed we must figure out if this is a wasted 2538 * cluster... i.e. it is a small packet sent in and yet the driver 2539 * underneath allocated a full cluster for it. If so we must copy it 2540 * to a smaller mbuf and free up the cluster mbuf. This will help 2541 * with cluster starvation. Note for __Panda__ we don't do this 2542 * since it has clusters all the way down to 64 bytes. 2543 */ 2544 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2545 /* we only handle mbufs that are singletons.. not chains */ 2546 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA); 2547 if (m) { 2548 /* ok lets see if we can copy the data up */ 2549 caddr_t *from, *to; 2550 2551 /* get the pointers and copy */ 2552 to = mtod(m, caddr_t *); 2553 from = mtod((*mm), caddr_t *); 2554 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2555 /* copy the length and free up the old */ 2556 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2557 sctp_m_freem(*mm); 2558 /* sucess, back copy */ 2559 *mm = m; 2560 } else { 2561 /* We are in trouble in the mbuf world .. yikes */ 2562 m = *mm; 2563 } 2564 } 2565 /* get pointer to the first chunk header */ 2566 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2567 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2568 if (ch == NULL) { 2569 return (1); 2570 } 2571 /* 2572 * process all DATA chunks... 2573 */ 2574 *high_tsn = asoc->cumulative_tsn; 2575 break_flag = 0; 2576 asoc->data_pkts_seen++; 2577 while (stop_proc == 0) { 2578 /* validate chunk length */ 2579 chk_length = ntohs(ch->ch.chunk_length); 2580 if (length - *offset < chk_length) { 2581 /* all done, mutulated chunk */ 2582 stop_proc = 1; 2583 break; 2584 } 2585 if (ch->ch.chunk_type == SCTP_DATA) { 2586 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2587 /* 2588 * Need to send an abort since we had a 2589 * invalid data chunk. 2590 */ 2591 struct mbuf *op_err; 2592 2593 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 2594 0, M_DONTWAIT, 1, MT_DATA); 2595 2596 if (op_err) { 2597 struct sctp_paramhdr *ph; 2598 uint32_t *ippp; 2599 2600 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 2601 (2 * sizeof(uint32_t)); 2602 ph = mtod(op_err, struct sctp_paramhdr *); 2603 ph->param_type = 2604 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2605 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2606 ippp = (uint32_t *) (ph + 1); 2607 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2608 ippp++; 2609 *ippp = asoc->cumulative_tsn; 2610 2611 } 2612 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2613 sctp_abort_association(inp, stcb, m, iphlen, sh, 2614 op_err, 0); 2615 return (2); 2616 } 2617 #ifdef SCTP_AUDITING_ENABLED 2618 sctp_audit_log(0xB1, 0); 2619 #endif 2620 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2621 last_chunk = 1; 2622 } else { 2623 last_chunk = 0; 2624 } 2625 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2626 chk_length, net, high_tsn, &abort_flag, &break_flag, 2627 last_chunk)) { 2628 num_chunks++; 2629 } 2630 if (abort_flag) 2631 return (2); 2632 2633 if (break_flag) { 2634 /* 2635 * Set because of out of rwnd space and no 2636 * drop rep space left. 2637 */ 2638 stop_proc = 1; 2639 break; 2640 } 2641 } else { 2642 /* not a data chunk in the data region */ 2643 switch (ch->ch.chunk_type) { 2644 case SCTP_INITIATION: 2645 case SCTP_INITIATION_ACK: 2646 case SCTP_SELECTIVE_ACK: 2647 case SCTP_HEARTBEAT_REQUEST: 2648 case SCTP_HEARTBEAT_ACK: 2649 case SCTP_ABORT_ASSOCIATION: 2650 case SCTP_SHUTDOWN: 2651 case SCTP_SHUTDOWN_ACK: 2652 case SCTP_OPERATION_ERROR: 2653 case SCTP_COOKIE_ECHO: 2654 case SCTP_COOKIE_ACK: 2655 case SCTP_ECN_ECHO: 2656 case SCTP_ECN_CWR: 2657 case SCTP_SHUTDOWN_COMPLETE: 2658 case SCTP_AUTHENTICATION: 2659 case SCTP_ASCONF_ACK: 2660 case SCTP_PACKET_DROPPED: 2661 case SCTP_STREAM_RESET: 2662 case SCTP_FORWARD_CUM_TSN: 2663 case SCTP_ASCONF: 2664 /* 2665 * Now, what do we do with KNOWN chunks that 2666 * are NOT in the right place? 2667 * 2668 * For now, I do nothing but ignore them. We 2669 * may later want to add sysctl stuff to 2670 * switch out and do either an ABORT() or 2671 * possibly process them. 2672 */ 2673 if (sctp_strict_data_order) { 2674 struct mbuf *op_err; 2675 2676 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2677 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0); 2678 return (2); 2679 } 2680 break; 2681 default: 2682 /* unknown chunk type, use bit rules */ 2683 if (ch->ch.chunk_type & 0x40) { 2684 /* Add a error report to the queue */ 2685 struct mbuf *merr; 2686 struct sctp_paramhdr *phd; 2687 2688 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA); 2689 if (merr) { 2690 phd = mtod(merr, struct sctp_paramhdr *); 2691 /* 2692 * We cheat and use param 2693 * type since we did not 2694 * bother to define a error 2695 * cause struct. They are 2696 * the same basic format 2697 * with different names. 2698 */ 2699 phd->param_type = 2700 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2701 phd->param_length = 2702 htons(chk_length + sizeof(*phd)); 2703 SCTP_BUF_LEN(merr) = sizeof(*phd); 2704 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, 2705 SCTP_SIZE32(chk_length), 2706 M_DONTWAIT); 2707 if (SCTP_BUF_NEXT(merr)) { 2708 sctp_queue_op_err(stcb, merr); 2709 } else { 2710 sctp_m_freem(merr); 2711 } 2712 } 2713 } 2714 if ((ch->ch.chunk_type & 0x80) == 0) { 2715 /* discard the rest of this packet */ 2716 stop_proc = 1; 2717 } /* else skip this bad chunk and 2718 * continue... */ 2719 break; 2720 }; /* switch of chunk type */ 2721 } 2722 *offset += SCTP_SIZE32(chk_length); 2723 if ((*offset >= length) || stop_proc) { 2724 /* no more data left in the mbuf chain */ 2725 stop_proc = 1; 2726 continue; 2727 } 2728 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2729 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2730 if (ch == NULL) { 2731 *offset = length; 2732 stop_proc = 1; 2733 break; 2734 2735 } 2736 } /* while */ 2737 if (break_flag) { 2738 /* 2739 * we need to report rwnd overrun drops. 2740 */ 2741 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2742 } 2743 if (num_chunks) { 2744 /* 2745 * Did we get data, if so update the time for auto-close and 2746 * give peer credit for being alive. 2747 */ 2748 SCTP_STAT_INCR(sctps_recvpktwithdata); 2749 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 2750 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2751 stcb->asoc.overall_error_count, 2752 0, 2753 SCTP_FROM_SCTP_INDATA, 2754 __LINE__); 2755 } 2756 stcb->asoc.overall_error_count = 0; 2757 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2758 } 2759 /* now service all of the reassm queue if needed */ 2760 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2761 sctp_service_queues(stcb, asoc); 2762 2763 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2764 /* Assure that we ack right away */ 2765 stcb->asoc.send_sack = 1; 2766 } 2767 /* Start a sack timer or QUEUE a SACK for sending */ 2768 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2769 (stcb->asoc.mapping_array[0] != 0xff)) { 2770 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) || 2771 (stcb->asoc.delayed_ack == 0) || 2772 (stcb->asoc.send_sack == 1)) { 2773 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2774 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2775 } 2776 sctp_send_sack(stcb); 2777 } else { 2778 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2779 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2780 stcb->sctp_ep, stcb, NULL); 2781 } 2782 } 2783 } else { 2784 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2785 } 2786 if (abort_flag) 2787 return (2); 2788 2789 return (0); 2790 } 2791 2792 static void 2793 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 2794 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2795 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 2796 int num_seg, int *ecn_seg_sums) 2797 { 2798 /************************************************/ 2799 /* process fragments and update sendqueue */ 2800 /************************************************/ 2801 struct sctp_sack *sack; 2802 struct sctp_gap_ack_block *frag, block; 2803 struct sctp_tmit_chunk *tp1; 2804 int i; 2805 unsigned int j; 2806 int num_frs = 0; 2807 2808 uint16_t frag_strt, frag_end, primary_flag_set; 2809 u_long last_frag_high; 2810 2811 /* 2812 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2813 */ 2814 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2815 primary_flag_set = 1; 2816 } else { 2817 primary_flag_set = 0; 2818 } 2819 sack = &ch->sack; 2820 2821 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 2822 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 2823 *offset += sizeof(block); 2824 if (frag == NULL) { 2825 return; 2826 } 2827 tp1 = NULL; 2828 last_frag_high = 0; 2829 for (i = 0; i < num_seg; i++) { 2830 frag_strt = ntohs(frag->start); 2831 frag_end = ntohs(frag->end); 2832 /* some sanity checks on the fargment offsets */ 2833 if (frag_strt > frag_end) { 2834 /* this one is malformed, skip */ 2835 frag++; 2836 continue; 2837 } 2838 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2839 MAX_TSN)) 2840 *biggest_tsn_acked = frag_end + last_tsn; 2841 2842 /* mark acked dgs and find out the highestTSN being acked */ 2843 if (tp1 == NULL) { 2844 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2845 2846 /* save the locations of the last frags */ 2847 last_frag_high = frag_end + last_tsn; 2848 } else { 2849 /* 2850 * now lets see if we need to reset the queue due to 2851 * a out-of-order SACK fragment 2852 */ 2853 if (compare_with_wrap(frag_strt + last_tsn, 2854 last_frag_high, MAX_TSN)) { 2855 /* 2856 * if the new frag starts after the last TSN 2857 * frag covered, we are ok and this one is 2858 * beyond the last one 2859 */ 2860 ; 2861 } else { 2862 /* 2863 * ok, they have reset us, so we need to 2864 * reset the queue this will cause extra 2865 * hunting but hey, they chose the 2866 * performance hit when they failed to order 2867 * there gaps.. 2868 */ 2869 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2870 } 2871 last_frag_high = frag_end + last_tsn; 2872 } 2873 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2874 while (tp1) { 2875 if (tp1->rec.data.doing_fast_retransmit) 2876 num_frs++; 2877 2878 /* 2879 * CMT: CUCv2 algorithm. For each TSN being 2880 * processed from the sent queue, track the 2881 * next expected pseudo-cumack, or 2882 * rtx_pseudo_cumack, if required. Separate 2883 * cumack trackers for first transmissions, 2884 * and retransmissions. 2885 */ 2886 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2887 (tp1->snd_count == 1)) { 2888 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2889 tp1->whoTo->find_pseudo_cumack = 0; 2890 } 2891 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2892 (tp1->snd_count > 1)) { 2893 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2894 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2895 } 2896 if (tp1->rec.data.TSN_seq == j) { 2897 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2898 /* 2899 * must be held until 2900 * cum-ack passes 2901 */ 2902 /* 2903 * ECN Nonce: Add the nonce 2904 * value to the sender's 2905 * nonce sum 2906 */ 2907 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2908 /*- 2909 * If it is less than RESEND, it is 2910 * now no-longer in flight. 2911 * Higher values may already be set 2912 * via previous Gap Ack Blocks... 2913 * i.e. ACKED or RESEND. 2914 */ 2915 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2916 *biggest_newly_acked_tsn, MAX_TSN)) { 2917 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2918 } 2919 /* 2920 * CMT: SFR algo 2921 * (and HTNA) - set 2922 * saw_newack to 1 2923 * for dest being 2924 * newly acked. 2925 * update 2926 * this_sack_highest_ 2927 * newack if 2928 * appropriate. 2929 */ 2930 if (tp1->rec.data.chunk_was_revoked == 0) 2931 tp1->whoTo->saw_newack = 1; 2932 2933 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2934 tp1->whoTo->this_sack_highest_newack, 2935 MAX_TSN)) { 2936 tp1->whoTo->this_sack_highest_newack = 2937 tp1->rec.data.TSN_seq; 2938 } 2939 /* 2940 * CMT DAC algo: 2941 * also update 2942 * this_sack_lowest_n 2943 * ewack 2944 */ 2945 if (*this_sack_lowest_newack == 0) { 2946 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 2947 sctp_log_sack(*this_sack_lowest_newack, 2948 last_tsn, 2949 tp1->rec.data.TSN_seq, 2950 0, 2951 0, 2952 SCTP_LOG_TSN_ACKED); 2953 } 2954 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2955 } 2956 /* 2957 * CMT: CUCv2 2958 * algorithm. If 2959 * (rtx-)pseudo-cumac 2960 * k for corresp 2961 * dest is being 2962 * acked, then we 2963 * have a new 2964 * (rtx-)pseudo-cumac 2965 * k. Set 2966 * new_(rtx_)pseudo_c 2967 * umack to TRUE so 2968 * that the cwnd for 2969 * this dest can be 2970 * updated. Also 2971 * trigger search 2972 * for the next 2973 * expected 2974 * (rtx-)pseudo-cumac 2975 * k. Separate 2976 * pseudo_cumack 2977 * trackers for 2978 * first 2979 * transmissions and 2980 * retransmissions. 2981 */ 2982 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2983 if (tp1->rec.data.chunk_was_revoked == 0) { 2984 tp1->whoTo->new_pseudo_cumack = 1; 2985 } 2986 tp1->whoTo->find_pseudo_cumack = 1; 2987 } 2988 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 2989 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2990 } 2991 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2992 if (tp1->rec.data.chunk_was_revoked == 0) { 2993 tp1->whoTo->new_pseudo_cumack = 1; 2994 } 2995 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2996 } 2997 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 2998 sctp_log_sack(*biggest_newly_acked_tsn, 2999 last_tsn, 3000 tp1->rec.data.TSN_seq, 3001 frag_strt, 3002 frag_end, 3003 SCTP_LOG_TSN_ACKED); 3004 } 3005 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3006 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3007 tp1->whoTo->flight_size, 3008 tp1->book_size, 3009 (uintptr_t) tp1->whoTo, 3010 tp1->rec.data.TSN_seq); 3011 } 3012 sctp_flight_size_decrease(tp1); 3013 sctp_total_flight_decrease(stcb, tp1); 3014 3015 tp1->whoTo->net_ack += tp1->send_size; 3016 if (tp1->snd_count < 2) { 3017 /* 3018 * True 3019 * non-retran 3020 * smited 3021 * chunk */ 3022 tp1->whoTo->net_ack2 += tp1->send_size; 3023 3024 /* 3025 * update RTO 3026 * too ? */ 3027 if (tp1->do_rtt) { 3028 tp1->whoTo->RTO = 3029 sctp_calculate_rto(stcb, 3030 asoc, 3031 tp1->whoTo, 3032 &tp1->sent_rcv_time, 3033 sctp_align_safe_nocopy); 3034 tp1->do_rtt = 0; 3035 } 3036 } 3037 } 3038 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3039 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3040 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3041 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3042 asoc->this_sack_highest_gap, 3043 MAX_TSN)) { 3044 asoc->this_sack_highest_gap = 3045 tp1->rec.data.TSN_seq; 3046 } 3047 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3048 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3049 #ifdef SCTP_AUDITING_ENABLED 3050 sctp_audit_log(0xB2, 3051 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3052 #endif 3053 } 3054 } 3055 /* 3056 * All chunks NOT UNSENT 3057 * fall through here and are 3058 * marked 3059 */ 3060 tp1->sent = SCTP_DATAGRAM_MARKED; 3061 if (tp1->rec.data.chunk_was_revoked) { 3062 /* deflate the cwnd */ 3063 tp1->whoTo->cwnd -= tp1->book_size; 3064 tp1->rec.data.chunk_was_revoked = 0; 3065 } 3066 } 3067 break; 3068 } /* if (tp1->TSN_seq == j) */ 3069 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3070 MAX_TSN)) 3071 break; 3072 3073 tp1 = TAILQ_NEXT(tp1, sctp_next); 3074 } /* end while (tp1) */ 3075 } /* end for (j = fragStart */ 3076 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3077 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 3078 *offset += sizeof(block); 3079 if (frag == NULL) { 3080 break; 3081 } 3082 } 3083 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3084 if (num_frs) 3085 sctp_log_fr(*biggest_tsn_acked, 3086 *biggest_newly_acked_tsn, 3087 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3088 } 3089 } 3090 3091 static void 3092 sctp_check_for_revoked(struct sctp_tcb *stcb, 3093 struct sctp_association *asoc, uint32_t cumack, 3094 u_long biggest_tsn_acked) 3095 { 3096 struct sctp_tmit_chunk *tp1; 3097 int tot_revoked = 0; 3098 3099 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3100 while (tp1) { 3101 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3102 MAX_TSN)) { 3103 /* 3104 * ok this guy is either ACK or MARKED. If it is 3105 * ACKED it has been previously acked but not this 3106 * time i.e. revoked. If it is MARKED it was ACK'ed 3107 * again. 3108 */ 3109 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3110 MAX_TSN)) 3111 break; 3112 3113 3114 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3115 /* it has been revoked */ 3116 tp1->sent = SCTP_DATAGRAM_SENT; 3117 tp1->rec.data.chunk_was_revoked = 1; 3118 /* 3119 * We must add this stuff back in to assure 3120 * timers and such get started. 3121 */ 3122 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3123 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3124 tp1->whoTo->flight_size, 3125 tp1->book_size, 3126 (uintptr_t) tp1->whoTo, 3127 tp1->rec.data.TSN_seq); 3128 } 3129 sctp_flight_size_increase(tp1); 3130 sctp_total_flight_increase(stcb, tp1); 3131 /* 3132 * We inflate the cwnd to compensate for our 3133 * artificial inflation of the flight_size. 3134 */ 3135 tp1->whoTo->cwnd += tp1->book_size; 3136 tot_revoked++; 3137 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 3138 sctp_log_sack(asoc->last_acked_seq, 3139 cumack, 3140 tp1->rec.data.TSN_seq, 3141 0, 3142 0, 3143 SCTP_LOG_TSN_REVOKED); 3144 } 3145 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3146 /* it has been re-acked in this SACK */ 3147 tp1->sent = SCTP_DATAGRAM_ACKED; 3148 } 3149 } 3150 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3151 break; 3152 tp1 = TAILQ_NEXT(tp1, sctp_next); 3153 } 3154 if (tot_revoked > 0) { 3155 /* 3156 * Setup the ecn nonce re-sync point. We do this since once 3157 * data is revoked we begin to retransmit things, which do 3158 * NOT have the ECN bits set. This means we are now out of 3159 * sync and must wait until we get back in sync with the 3160 * peer to check ECN bits. 3161 */ 3162 tp1 = TAILQ_FIRST(&asoc->send_queue); 3163 if (tp1 == NULL) { 3164 asoc->nonce_resync_tsn = asoc->sending_seq; 3165 } else { 3166 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3167 } 3168 asoc->nonce_wait_for_ecne = 0; 3169 asoc->nonce_sum_check = 0; 3170 } 3171 } 3172 3173 static void 3174 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3175 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3176 { 3177 struct sctp_tmit_chunk *tp1; 3178 int strike_flag = 0; 3179 struct timeval now; 3180 int tot_retrans = 0; 3181 uint32_t sending_seq; 3182 struct sctp_nets *net; 3183 int num_dests_sacked = 0; 3184 3185 /* 3186 * select the sending_seq, this is either the next thing ready to be 3187 * sent but not transmitted, OR, the next seq we assign. 3188 */ 3189 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3190 if (tp1 == NULL) { 3191 sending_seq = asoc->sending_seq; 3192 } else { 3193 sending_seq = tp1->rec.data.TSN_seq; 3194 } 3195 3196 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3197 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3198 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3199 if (net->saw_newack) 3200 num_dests_sacked++; 3201 } 3202 } 3203 if (stcb->asoc.peer_supports_prsctp) { 3204 (void)SCTP_GETTIME_TIMEVAL(&now); 3205 } 3206 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3207 while (tp1) { 3208 strike_flag = 0; 3209 if (tp1->no_fr_allowed) { 3210 /* this one had a timeout or something */ 3211 tp1 = TAILQ_NEXT(tp1, sctp_next); 3212 continue; 3213 } 3214 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3215 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3216 sctp_log_fr(biggest_tsn_newly_acked, 3217 tp1->rec.data.TSN_seq, 3218 tp1->sent, 3219 SCTP_FR_LOG_CHECK_STRIKE); 3220 } 3221 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3222 MAX_TSN) || 3223 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3224 /* done */ 3225 break; 3226 } 3227 if (stcb->asoc.peer_supports_prsctp) { 3228 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3229 /* Is it expired? */ 3230 if ( 3231 (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) 3232 ) { 3233 /* Yes so drop it */ 3234 if (tp1->data != NULL) { 3235 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3236 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3237 &asoc->sent_queue, SCTP_SO_NOT_LOCKED); 3238 } 3239 tp1 = TAILQ_NEXT(tp1, sctp_next); 3240 continue; 3241 } 3242 } 3243 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3244 /* Has it been retransmitted tv_sec times? */ 3245 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3246 /* Yes, so drop it */ 3247 if (tp1->data != NULL) { 3248 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3249 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3250 &asoc->sent_queue, SCTP_SO_NOT_LOCKED); 3251 } 3252 tp1 = TAILQ_NEXT(tp1, sctp_next); 3253 continue; 3254 } 3255 } 3256 } 3257 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3258 asoc->this_sack_highest_gap, MAX_TSN)) { 3259 /* we are beyond the tsn in the sack */ 3260 break; 3261 } 3262 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3263 /* either a RESEND, ACKED, or MARKED */ 3264 /* skip */ 3265 tp1 = TAILQ_NEXT(tp1, sctp_next); 3266 continue; 3267 } 3268 /* 3269 * CMT : SFR algo (covers part of DAC and HTNA as well) 3270 */ 3271 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3272 /* 3273 * No new acks were receieved for data sent to this 3274 * dest. Therefore, according to the SFR algo for 3275 * CMT, no data sent to this dest can be marked for 3276 * FR using this SACK. 3277 */ 3278 tp1 = TAILQ_NEXT(tp1, sctp_next); 3279 continue; 3280 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq, 3281 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3282 /* 3283 * CMT: New acks were receieved for data sent to 3284 * this dest. But no new acks were seen for data 3285 * sent after tp1. Therefore, according to the SFR 3286 * algo for CMT, tp1 cannot be marked for FR using 3287 * this SACK. This step covers part of the DAC algo 3288 * and the HTNA algo as well. 3289 */ 3290 tp1 = TAILQ_NEXT(tp1, sctp_next); 3291 continue; 3292 } 3293 /* 3294 * Here we check to see if we were have already done a FR 3295 * and if so we see if the biggest TSN we saw in the sack is 3296 * smaller than the recovery point. If so we don't strike 3297 * the tsn... otherwise we CAN strike the TSN. 3298 */ 3299 /* 3300 * @@@ JRI: Check for CMT if (accum_moved && 3301 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3302 * 0)) { 3303 */ 3304 if (accum_moved && asoc->fast_retran_loss_recovery) { 3305 /* 3306 * Strike the TSN if in fast-recovery and cum-ack 3307 * moved. 3308 */ 3309 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3310 sctp_log_fr(biggest_tsn_newly_acked, 3311 tp1->rec.data.TSN_seq, 3312 tp1->sent, 3313 SCTP_FR_LOG_STRIKE_CHUNK); 3314 } 3315 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3316 tp1->sent++; 3317 } 3318 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3319 /* 3320 * CMT DAC algorithm: If SACK flag is set to 3321 * 0, then lowest_newack test will not pass 3322 * because it would have been set to the 3323 * cumack earlier. If not already to be 3324 * rtx'd, If not a mixed sack and if tp1 is 3325 * not between two sacked TSNs, then mark by 3326 * one more. NOTE that we are marking by one 3327 * additional time since the SACK DAC flag 3328 * indicates that two packets have been 3329 * received after this missing TSN. 3330 */ 3331 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3332 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3333 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3334 sctp_log_fr(16 + num_dests_sacked, 3335 tp1->rec.data.TSN_seq, 3336 tp1->sent, 3337 SCTP_FR_LOG_STRIKE_CHUNK); 3338 } 3339 tp1->sent++; 3340 } 3341 } 3342 } else if (tp1->rec.data.doing_fast_retransmit) { 3343 /* 3344 * For those that have done a FR we must take 3345 * special consideration if we strike. I.e the 3346 * biggest_newly_acked must be higher than the 3347 * sending_seq at the time we did the FR. 3348 */ 3349 if ( 3350 #ifdef SCTP_FR_TO_ALTERNATE 3351 /* 3352 * If FR's go to new networks, then we must only do 3353 * this for singly homed asoc's. However if the FR's 3354 * go to the same network (Armando's work) then its 3355 * ok to FR multiple times. 3356 */ 3357 (asoc->numnets < 2) 3358 #else 3359 (1) 3360 #endif 3361 ) { 3362 3363 if ((compare_with_wrap(biggest_tsn_newly_acked, 3364 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3365 (biggest_tsn_newly_acked == 3366 tp1->rec.data.fast_retran_tsn)) { 3367 /* 3368 * Strike the TSN, since this ack is 3369 * beyond where things were when we 3370 * did a FR. 3371 */ 3372 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3373 sctp_log_fr(biggest_tsn_newly_acked, 3374 tp1->rec.data.TSN_seq, 3375 tp1->sent, 3376 SCTP_FR_LOG_STRIKE_CHUNK); 3377 } 3378 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3379 tp1->sent++; 3380 } 3381 strike_flag = 1; 3382 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3383 /* 3384 * CMT DAC algorithm: If 3385 * SACK flag is set to 0, 3386 * then lowest_newack test 3387 * will not pass because it 3388 * would have been set to 3389 * the cumack earlier. If 3390 * not already to be rtx'd, 3391 * If not a mixed sack and 3392 * if tp1 is not between two 3393 * sacked TSNs, then mark by 3394 * one more. NOTE that we 3395 * are marking by one 3396 * additional time since the 3397 * SACK DAC flag indicates 3398 * that two packets have 3399 * been received after this 3400 * missing TSN. 3401 */ 3402 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3403 (num_dests_sacked == 1) && 3404 compare_with_wrap(this_sack_lowest_newack, 3405 tp1->rec.data.TSN_seq, MAX_TSN)) { 3406 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3407 sctp_log_fr(32 + num_dests_sacked, 3408 tp1->rec.data.TSN_seq, 3409 tp1->sent, 3410 SCTP_FR_LOG_STRIKE_CHUNK); 3411 } 3412 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3413 tp1->sent++; 3414 } 3415 } 3416 } 3417 } 3418 } 3419 /* 3420 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3421 * algo covers HTNA. 3422 */ 3423 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3424 biggest_tsn_newly_acked, MAX_TSN)) { 3425 /* 3426 * We don't strike these: This is the HTNA 3427 * algorithm i.e. we don't strike If our TSN is 3428 * larger than the Highest TSN Newly Acked. 3429 */ 3430 ; 3431 } else { 3432 /* Strike the TSN */ 3433 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3434 sctp_log_fr(biggest_tsn_newly_acked, 3435 tp1->rec.data.TSN_seq, 3436 tp1->sent, 3437 SCTP_FR_LOG_STRIKE_CHUNK); 3438 } 3439 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3440 tp1->sent++; 3441 } 3442 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3443 /* 3444 * CMT DAC algorithm: If SACK flag is set to 3445 * 0, then lowest_newack test will not pass 3446 * because it would have been set to the 3447 * cumack earlier. If not already to be 3448 * rtx'd, If not a mixed sack and if tp1 is 3449 * not between two sacked TSNs, then mark by 3450 * one more. NOTE that we are marking by one 3451 * additional time since the SACK DAC flag 3452 * indicates that two packets have been 3453 * received after this missing TSN. 3454 */ 3455 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3456 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3457 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3458 sctp_log_fr(48 + num_dests_sacked, 3459 tp1->rec.data.TSN_seq, 3460 tp1->sent, 3461 SCTP_FR_LOG_STRIKE_CHUNK); 3462 } 3463 tp1->sent++; 3464 } 3465 } 3466 } 3467 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3468 /* Increment the count to resend */ 3469 struct sctp_nets *alt; 3470 3471 /* printf("OK, we are now ready to FR this guy\n"); */ 3472 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3473 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3474 0, SCTP_FR_MARKED); 3475 } 3476 if (strike_flag) { 3477 /* This is a subsequent FR */ 3478 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3479 } 3480 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3481 if (sctp_cmt_on_off) { 3482 /* 3483 * CMT: Using RTX_SSTHRESH policy for CMT. 3484 * If CMT is being used, then pick dest with 3485 * largest ssthresh for any retransmission. 3486 */ 3487 tp1->no_fr_allowed = 1; 3488 alt = tp1->whoTo; 3489 /* sa_ignore NO_NULL_CHK */ 3490 if (sctp_cmt_on_off && sctp_cmt_pf) { 3491 /* 3492 * JRS 5/18/07 - If CMT PF is on, 3493 * use the PF version of 3494 * find_alt_net() 3495 */ 3496 alt = sctp_find_alternate_net(stcb, alt, 2); 3497 } else { 3498 /* 3499 * JRS 5/18/07 - If only CMT is on, 3500 * use the CMT version of 3501 * find_alt_net() 3502 */ 3503 /* sa_ignore NO_NULL_CHK */ 3504 alt = sctp_find_alternate_net(stcb, alt, 1); 3505 } 3506 if (alt == NULL) { 3507 alt = tp1->whoTo; 3508 } 3509 /* 3510 * CUCv2: If a different dest is picked for 3511 * the retransmission, then new 3512 * (rtx-)pseudo_cumack needs to be tracked 3513 * for orig dest. Let CUCv2 track new (rtx-) 3514 * pseudo-cumack always. 3515 */ 3516 if (tp1->whoTo) { 3517 tp1->whoTo->find_pseudo_cumack = 1; 3518 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3519 } 3520 } else {/* CMT is OFF */ 3521 3522 #ifdef SCTP_FR_TO_ALTERNATE 3523 /* Can we find an alternate? */ 3524 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3525 #else 3526 /* 3527 * default behavior is to NOT retransmit 3528 * FR's to an alternate. Armando Caro's 3529 * paper details why. 3530 */ 3531 alt = tp1->whoTo; 3532 #endif 3533 } 3534 3535 tp1->rec.data.doing_fast_retransmit = 1; 3536 tot_retrans++; 3537 /* mark the sending seq for possible subsequent FR's */ 3538 /* 3539 * printf("Marking TSN for FR new value %x\n", 3540 * (uint32_t)tpi->rec.data.TSN_seq); 3541 */ 3542 if (TAILQ_EMPTY(&asoc->send_queue)) { 3543 /* 3544 * If the queue of send is empty then its 3545 * the next sequence number that will be 3546 * assigned so we subtract one from this to 3547 * get the one we last sent. 3548 */ 3549 tp1->rec.data.fast_retran_tsn = sending_seq; 3550 } else { 3551 /* 3552 * If there are chunks on the send queue 3553 * (unsent data that has made it from the 3554 * stream queues but not out the door, we 3555 * take the first one (which will have the 3556 * lowest TSN) and subtract one to get the 3557 * one we last sent. 3558 */ 3559 struct sctp_tmit_chunk *ttt; 3560 3561 ttt = TAILQ_FIRST(&asoc->send_queue); 3562 tp1->rec.data.fast_retran_tsn = 3563 ttt->rec.data.TSN_seq; 3564 } 3565 3566 if (tp1->do_rtt) { 3567 /* 3568 * this guy had a RTO calculation pending on 3569 * it, cancel it 3570 */ 3571 tp1->do_rtt = 0; 3572 } 3573 /* fix counts and things */ 3574 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3575 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3576 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3577 tp1->book_size, 3578 (uintptr_t) tp1->whoTo, 3579 tp1->rec.data.TSN_seq); 3580 } 3581 if (tp1->whoTo) { 3582 tp1->whoTo->net_ack++; 3583 sctp_flight_size_decrease(tp1); 3584 } 3585 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 3586 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3587 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh); 3588 } 3589 /* add back to the rwnd */ 3590 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3591 3592 /* remove from the total flight */ 3593 sctp_total_flight_decrease(stcb, tp1); 3594 if (alt != tp1->whoTo) { 3595 /* yes, there is an alternate. */ 3596 sctp_free_remote_addr(tp1->whoTo); 3597 /* sa_ignore FREED_MEMORY */ 3598 tp1->whoTo = alt; 3599 atomic_add_int(&alt->ref_count, 1); 3600 } 3601 } 3602 tp1 = TAILQ_NEXT(tp1, sctp_next); 3603 } /* while (tp1) */ 3604 3605 if (tot_retrans > 0) { 3606 /* 3607 * Setup the ecn nonce re-sync point. We do this since once 3608 * we go to FR something we introduce a Karn's rule scenario 3609 * and won't know the totals for the ECN bits. 3610 */ 3611 asoc->nonce_resync_tsn = sending_seq; 3612 asoc->nonce_wait_for_ecne = 0; 3613 asoc->nonce_sum_check = 0; 3614 } 3615 } 3616 3617 struct sctp_tmit_chunk * 3618 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3619 struct sctp_association *asoc) 3620 { 3621 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3622 struct timeval now; 3623 int now_filled = 0; 3624 3625 if (asoc->peer_supports_prsctp == 0) { 3626 return (NULL); 3627 } 3628 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3629 while (tp1) { 3630 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3631 tp1->sent != SCTP_DATAGRAM_RESEND) { 3632 /* no chance to advance, out of here */ 3633 break; 3634 } 3635 if (!PR_SCTP_ENABLED(tp1->flags)) { 3636 /* 3637 * We can't fwd-tsn past any that are reliable aka 3638 * retransmitted until the asoc fails. 3639 */ 3640 break; 3641 } 3642 if (!now_filled) { 3643 (void)SCTP_GETTIME_TIMEVAL(&now); 3644 now_filled = 1; 3645 } 3646 tp2 = TAILQ_NEXT(tp1, sctp_next); 3647 /* 3648 * now we got a chunk which is marked for another 3649 * retransmission to a PR-stream but has run out its chances 3650 * already maybe OR has been marked to skip now. Can we skip 3651 * it if its a resend? 3652 */ 3653 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3654 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3655 /* 3656 * Now is this one marked for resend and its time is 3657 * now up? 3658 */ 3659 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3660 /* Yes so drop it */ 3661 if (tp1->data) { 3662 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3663 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3664 &asoc->sent_queue, SCTP_SO_NOT_LOCKED); 3665 } 3666 } else { 3667 /* 3668 * No, we are done when hit one for resend 3669 * whos time as not expired. 3670 */ 3671 break; 3672 } 3673 } 3674 /* 3675 * Ok now if this chunk is marked to drop it we can clean up 3676 * the chunk, advance our peer ack point and we can check 3677 * the next chunk. 3678 */ 3679 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3680 /* advance PeerAckPoint goes forward */ 3681 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3682 a_adv = tp1; 3683 /* 3684 * we don't want to de-queue it here. Just wait for 3685 * the next peer SACK to come with a new cumTSN and 3686 * then the chunk will be droped in the normal 3687 * fashion. 3688 */ 3689 if (tp1->data) { 3690 sctp_free_bufspace(stcb, asoc, tp1, 1); 3691 /* 3692 * Maybe there should be another 3693 * notification type 3694 */ 3695 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3696 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3697 tp1, SCTP_SO_NOT_LOCKED); 3698 sctp_m_freem(tp1->data); 3699 tp1->data = NULL; 3700 if (stcb->sctp_socket) { 3701 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3702 struct socket *so; 3703 3704 so = SCTP_INP_SO(stcb->sctp_ep); 3705 atomic_add_int(&stcb->asoc.refcnt, 1); 3706 SCTP_TCB_UNLOCK(stcb); 3707 SCTP_SOCKET_LOCK(so, 1); 3708 SCTP_TCB_LOCK(stcb); 3709 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3710 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3711 /* 3712 * assoc was freed while we 3713 * were unlocked 3714 */ 3715 SCTP_SOCKET_UNLOCK(so, 1); 3716 return (NULL); 3717 } 3718 #endif 3719 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 3720 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3721 SCTP_SOCKET_UNLOCK(so, 1); 3722 #endif 3723 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 3724 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3725 } 3726 } 3727 } 3728 } else { 3729 /* 3730 * If it is still in RESEND we can advance no 3731 * further 3732 */ 3733 break; 3734 } 3735 /* 3736 * If we hit here we just dumped tp1, move to next tsn on 3737 * sent queue. 3738 */ 3739 tp1 = tp2; 3740 } 3741 return (a_adv); 3742 } 3743 3744 static void 3745 sctp_fs_audit(struct sctp_association *asoc) 3746 { 3747 struct sctp_tmit_chunk *chk; 3748 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3749 3750 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3751 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3752 inflight++; 3753 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3754 resend++; 3755 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3756 inbetween++; 3757 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3758 above++; 3759 } else { 3760 acked++; 3761 } 3762 } 3763 3764 if ((inflight > 0) || (inbetween > 0)) { 3765 #ifdef INVARIANTS 3766 panic("Flight size-express incorrect? \n"); 3767 #else 3768 SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n", 3769 inflight, inbetween); 3770 #endif 3771 } 3772 } 3773 3774 3775 static void 3776 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3777 struct sctp_association *asoc, 3778 struct sctp_nets *net, 3779 struct sctp_tmit_chunk *tp1) 3780 { 3781 struct sctp_tmit_chunk *chk; 3782 3783 /* First setup this one and get it moved back */ 3784 tp1->sent = SCTP_DATAGRAM_UNSENT; 3785 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3786 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3787 tp1->whoTo->flight_size, 3788 tp1->book_size, 3789 (uintptr_t) tp1->whoTo, 3790 tp1->rec.data.TSN_seq); 3791 } 3792 sctp_flight_size_decrease(tp1); 3793 sctp_total_flight_decrease(stcb, tp1); 3794 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3795 TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next); 3796 asoc->sent_queue_cnt--; 3797 asoc->send_queue_cnt++; 3798 /* 3799 * Now all guys marked for RESEND on the sent_queue must be moved 3800 * back too. 3801 */ 3802 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3803 if (chk->sent == SCTP_DATAGRAM_RESEND) { 3804 /* Another chunk to move */ 3805 chk->sent = SCTP_DATAGRAM_UNSENT; 3806 /* It should not be in flight */ 3807 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3808 TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next); 3809 asoc->sent_queue_cnt--; 3810 asoc->send_queue_cnt++; 3811 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3812 } 3813 } 3814 } 3815 3816 void 3817 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3818 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 3819 { 3820 struct sctp_nets *net; 3821 struct sctp_association *asoc; 3822 struct sctp_tmit_chunk *tp1, *tp2; 3823 uint32_t old_rwnd; 3824 int win_probe_recovery = 0; 3825 int win_probe_recovered = 0; 3826 int j, done_once = 0; 3827 3828 if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3829 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3830 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3831 } 3832 SCTP_TCB_LOCK_ASSERT(stcb); 3833 #ifdef SCTP_ASOCLOG_OF_TSNS 3834 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3835 stcb->asoc.cumack_log_at++; 3836 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3837 stcb->asoc.cumack_log_at = 0; 3838 } 3839 #endif 3840 asoc = &stcb->asoc; 3841 old_rwnd = asoc->peers_rwnd; 3842 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) { 3843 /* old ack */ 3844 return; 3845 } else if (asoc->last_acked_seq == cumack) { 3846 /* Window update sack */ 3847 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3848 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 3849 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3850 /* SWS sender side engages */ 3851 asoc->peers_rwnd = 0; 3852 } 3853 if (asoc->peers_rwnd > old_rwnd) { 3854 goto again; 3855 } 3856 return; 3857 } 3858 /* First setup for CC stuff */ 3859 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3860 net->prev_cwnd = net->cwnd; 3861 net->net_ack = 0; 3862 net->net_ack2 = 0; 3863 3864 /* 3865 * CMT: Reset CUC and Fast recovery algo variables before 3866 * SACK processing 3867 */ 3868 net->new_pseudo_cumack = 0; 3869 net->will_exit_fast_recovery = 0; 3870 } 3871 if (sctp_strict_sacks) { 3872 uint32_t send_s; 3873 3874 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3875 tp1 = TAILQ_LAST(&asoc->sent_queue, 3876 sctpchunk_listhead); 3877 send_s = tp1->rec.data.TSN_seq + 1; 3878 } else { 3879 send_s = asoc->sending_seq; 3880 } 3881 if ((cumack == send_s) || 3882 compare_with_wrap(cumack, send_s, MAX_TSN)) { 3883 #ifndef INVARIANTS 3884 struct mbuf *oper; 3885 3886 #endif 3887 #ifdef INVARIANTS 3888 panic("Impossible sack 1"); 3889 #else 3890 *abort_now = 1; 3891 /* XXX */ 3892 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 3893 0, M_DONTWAIT, 1, MT_DATA); 3894 if (oper) { 3895 struct sctp_paramhdr *ph; 3896 uint32_t *ippp; 3897 3898 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 3899 sizeof(uint32_t); 3900 ph = mtod(oper, struct sctp_paramhdr *); 3901 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 3902 ph->param_length = htons(SCTP_BUF_LEN(oper)); 3903 ippp = (uint32_t *) (ph + 1); 3904 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 3905 } 3906 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 3907 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 3908 return; 3909 #endif 3910 } 3911 } 3912 asoc->this_sack_highest_gap = cumack; 3913 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 3914 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3915 stcb->asoc.overall_error_count, 3916 0, 3917 SCTP_FROM_SCTP_INDATA, 3918 __LINE__); 3919 } 3920 stcb->asoc.overall_error_count = 0; 3921 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) { 3922 /* process the new consecutive TSN first */ 3923 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3924 while (tp1) { 3925 tp2 = TAILQ_NEXT(tp1, sctp_next); 3926 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 3927 MAX_TSN) || 3928 cumack == tp1->rec.data.TSN_seq) { 3929 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3930 printf("Warning, an unsent is now acked?\n"); 3931 } 3932 /* 3933 * ECN Nonce: Add the nonce to the sender's 3934 * nonce sum 3935 */ 3936 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 3937 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3938 /* 3939 * If it is less than ACKED, it is 3940 * now no-longer in flight. Higher 3941 * values may occur during marking 3942 */ 3943 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3944 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3945 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3946 tp1->whoTo->flight_size, 3947 tp1->book_size, 3948 (uintptr_t) tp1->whoTo, 3949 tp1->rec.data.TSN_seq); 3950 } 3951 sctp_flight_size_decrease(tp1); 3952 sctp_total_flight_decrease(stcb, tp1); 3953 } 3954 tp1->whoTo->net_ack += tp1->send_size; 3955 if (tp1->snd_count < 2) { 3956 /* 3957 * True non-retransmited 3958 * chunk 3959 */ 3960 tp1->whoTo->net_ack2 += 3961 tp1->send_size; 3962 3963 /* update RTO too? */ 3964 if (tp1->do_rtt) { 3965 tp1->whoTo->RTO = 3966 sctp_calculate_rto(stcb, 3967 asoc, tp1->whoTo, 3968 &tp1->sent_rcv_time, 3969 sctp_align_safe_nocopy); 3970 tp1->do_rtt = 0; 3971 } 3972 } 3973 /* 3974 * CMT: CUCv2 algorithm. From the 3975 * cumack'd TSNs, for each TSN being 3976 * acked for the first time, set the 3977 * following variables for the 3978 * corresp destination. 3979 * new_pseudo_cumack will trigger a 3980 * cwnd update. 3981 * find_(rtx_)pseudo_cumack will 3982 * trigger search for the next 3983 * expected (rtx-)pseudo-cumack. 3984 */ 3985 tp1->whoTo->new_pseudo_cumack = 1; 3986 tp1->whoTo->find_pseudo_cumack = 1; 3987 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3988 3989 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 3990 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3991 } 3992 } 3993 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3994 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3995 } 3996 if (tp1->rec.data.chunk_was_revoked) { 3997 /* deflate the cwnd */ 3998 tp1->whoTo->cwnd -= tp1->book_size; 3999 tp1->rec.data.chunk_was_revoked = 0; 4000 } 4001 tp1->sent = SCTP_DATAGRAM_ACKED; 4002 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4003 if (tp1->data) { 4004 sctp_free_bufspace(stcb, asoc, tp1, 1); 4005 sctp_m_freem(tp1->data); 4006 } 4007 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4008 sctp_log_sack(asoc->last_acked_seq, 4009 cumack, 4010 tp1->rec.data.TSN_seq, 4011 0, 4012 0, 4013 SCTP_LOG_FREE_SENT); 4014 } 4015 tp1->data = NULL; 4016 asoc->sent_queue_cnt--; 4017 sctp_free_a_chunk(stcb, tp1); 4018 tp1 = tp2; 4019 } else { 4020 break; 4021 } 4022 } 4023 4024 } 4025 if (stcb->sctp_socket) { 4026 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4027 struct socket *so; 4028 4029 #endif 4030 4031 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4032 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 4033 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 4034 } 4035 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4036 so = SCTP_INP_SO(stcb->sctp_ep); 4037 atomic_add_int(&stcb->asoc.refcnt, 1); 4038 SCTP_TCB_UNLOCK(stcb); 4039 SCTP_SOCKET_LOCK(so, 1); 4040 SCTP_TCB_LOCK(stcb); 4041 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4042 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4043 /* assoc was freed while we were unlocked */ 4044 SCTP_SOCKET_UNLOCK(so, 1); 4045 return; 4046 } 4047 #endif 4048 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4049 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4050 SCTP_SOCKET_UNLOCK(so, 1); 4051 #endif 4052 } else { 4053 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 4054 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 4055 } 4056 } 4057 4058 /* JRS - Use the congestion control given in the CC module */ 4059 if (asoc->last_acked_seq != cumack) 4060 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4061 4062 asoc->last_acked_seq = cumack; 4063 4064 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4065 /* nothing left in-flight */ 4066 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4067 net->flight_size = 0; 4068 net->partial_bytes_acked = 0; 4069 } 4070 asoc->total_flight = 0; 4071 asoc->total_flight_count = 0; 4072 } 4073 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4074 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4075 asoc->advanced_peer_ack_point = cumack; 4076 } 4077 /* ECN Nonce updates */ 4078 if (asoc->ecn_nonce_allowed) { 4079 if (asoc->nonce_sum_check) { 4080 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4081 if (asoc->nonce_wait_for_ecne == 0) { 4082 struct sctp_tmit_chunk *lchk; 4083 4084 lchk = TAILQ_FIRST(&asoc->send_queue); 4085 asoc->nonce_wait_for_ecne = 1; 4086 if (lchk) { 4087 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4088 } else { 4089 asoc->nonce_wait_tsn = asoc->sending_seq; 4090 } 4091 } else { 4092 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4093 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4094 /* 4095 * Misbehaving peer. We need 4096 * to react to this guy 4097 */ 4098 asoc->ecn_allowed = 0; 4099 asoc->ecn_nonce_allowed = 0; 4100 } 4101 } 4102 } 4103 } else { 4104 /* See if Resynchronization Possible */ 4105 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4106 asoc->nonce_sum_check = 1; 4107 /* 4108 * now we must calculate what the base is. 4109 * We do this based on two things, we know 4110 * the total's for all the segments 4111 * gap-acked in the SACK (none), We also 4112 * know the SACK's nonce sum, its in 4113 * nonce_sum_flag. So we can build a truth 4114 * table to back-calculate the new value of 4115 * asoc->nonce_sum_expect_base: 4116 * 4117 * SACK-flag-Value Seg-Sums Base 0 0 0 4118 * 1 0 1 0 1 1 1 4119 * 1 0 4120 */ 4121 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4122 } 4123 } 4124 } 4125 /* RWND update */ 4126 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4127 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4128 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4129 /* SWS sender side engages */ 4130 asoc->peers_rwnd = 0; 4131 } 4132 if (asoc->peers_rwnd > old_rwnd) { 4133 win_probe_recovery = 1; 4134 } 4135 /* Now assure a timer where data is queued at */ 4136 again: 4137 j = 0; 4138 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4139 if (win_probe_recovery && (net->window_probe)) { 4140 net->window_probe = 0; 4141 win_probe_recovered = 1; 4142 /* 4143 * Find first chunk that was used with window probe 4144 * and clear the sent 4145 */ 4146 /* sa_ignore FREED_MEMORY */ 4147 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4148 if (tp1->window_probe) { 4149 /* move back to data send queue */ 4150 sctp_window_probe_recovery(stcb, asoc, net, tp1); 4151 break; 4152 } 4153 } 4154 } 4155 if (net->flight_size) { 4156 int to_ticks; 4157 4158 if (net->RTO == 0) { 4159 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4160 } else { 4161 to_ticks = MSEC_TO_TICKS(net->RTO); 4162 } 4163 j++; 4164 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4165 sctp_timeout_handler, &net->rxt_timer); 4166 } else { 4167 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4168 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4169 stcb, net, 4170 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4171 } 4172 if (sctp_early_fr) { 4173 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4174 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4175 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4176 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4177 } 4178 } 4179 } 4180 } 4181 if ((j == 0) && 4182 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4183 (asoc->sent_queue_retran_cnt == 0) && 4184 (win_probe_recovered == 0) && 4185 (done_once == 0)) { 4186 /* huh, this should not happen */ 4187 sctp_fs_audit(asoc); 4188 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4189 net->flight_size = 0; 4190 } 4191 asoc->total_flight = 0; 4192 asoc->total_flight_count = 0; 4193 asoc->sent_queue_retran_cnt = 0; 4194 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4195 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4196 sctp_flight_size_increase(tp1); 4197 sctp_total_flight_increase(stcb, tp1); 4198 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4199 asoc->sent_queue_retran_cnt++; 4200 } 4201 } 4202 done_once = 1; 4203 goto again; 4204 } 4205 /**********************************/ 4206 /* Now what about shutdown issues */ 4207 /**********************************/ 4208 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4209 /* nothing left on sendqueue.. consider done */ 4210 /* clean up */ 4211 if ((asoc->stream_queue_cnt == 1) && 4212 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4213 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4214 (asoc->locked_on_sending) 4215 ) { 4216 struct sctp_stream_queue_pending *sp; 4217 4218 /* 4219 * I may be in a state where we got all across.. but 4220 * cannot write more due to a shutdown... we abort 4221 * since the user did not indicate EOR in this case. 4222 * The sp will be cleaned during free of the asoc. 4223 */ 4224 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4225 sctp_streamhead); 4226 if ((sp) && (sp->length == 0)) { 4227 /* Let cleanup code purge it */ 4228 if (sp->msg_is_complete) { 4229 asoc->stream_queue_cnt--; 4230 } else { 4231 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4232 asoc->locked_on_sending = NULL; 4233 asoc->stream_queue_cnt--; 4234 } 4235 } 4236 } 4237 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4238 (asoc->stream_queue_cnt == 0)) { 4239 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4240 /* Need to abort here */ 4241 struct mbuf *oper; 4242 4243 abort_out_now: 4244 *abort_now = 1; 4245 /* XXX */ 4246 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4247 0, M_DONTWAIT, 1, MT_DATA); 4248 if (oper) { 4249 struct sctp_paramhdr *ph; 4250 uint32_t *ippp; 4251 4252 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4253 sizeof(uint32_t); 4254 ph = mtod(oper, struct sctp_paramhdr *); 4255 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4256 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4257 ippp = (uint32_t *) (ph + 1); 4258 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4259 } 4260 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4261 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED); 4262 } else { 4263 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4264 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4265 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4266 } 4267 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4268 sctp_stop_timers_for_shutdown(stcb); 4269 sctp_send_shutdown(stcb, 4270 stcb->asoc.primary_destination); 4271 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4272 stcb->sctp_ep, stcb, asoc->primary_destination); 4273 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4274 stcb->sctp_ep, stcb, asoc->primary_destination); 4275 } 4276 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4277 (asoc->stream_queue_cnt == 0)) { 4278 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4279 goto abort_out_now; 4280 } 4281 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4282 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4283 sctp_send_shutdown_ack(stcb, 4284 stcb->asoc.primary_destination); 4285 4286 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4287 stcb->sctp_ep, stcb, asoc->primary_destination); 4288 } 4289 } 4290 if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) { 4291 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4292 rwnd, 4293 stcb->asoc.peers_rwnd, 4294 stcb->asoc.total_flight, 4295 stcb->asoc.total_output_queue_size); 4296 } 4297 } 4298 4299 void 4300 sctp_handle_sack(struct mbuf *m, int offset, 4301 struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4302 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd) 4303 { 4304 struct sctp_association *asoc; 4305 struct sctp_sack *sack; 4306 struct sctp_tmit_chunk *tp1, *tp2; 4307 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4308 this_sack_lowest_newack; 4309 uint32_t sav_cum_ack; 4310 uint16_t num_seg, num_dup; 4311 uint16_t wake_him = 0; 4312 unsigned int sack_length; 4313 uint32_t send_s = 0; 4314 long j; 4315 int accum_moved = 0; 4316 int will_exit_fast_recovery = 0; 4317 uint32_t a_rwnd, old_rwnd; 4318 int win_probe_recovery = 0; 4319 int win_probe_recovered = 0; 4320 struct sctp_nets *net = NULL; 4321 int nonce_sum_flag, ecn_seg_sums = 0; 4322 int done_once; 4323 uint8_t reneged_all = 0; 4324 uint8_t cmt_dac_flag; 4325 4326 /* 4327 * we take any chance we can to service our queues since we cannot 4328 * get awoken when the socket is read from :< 4329 */ 4330 /* 4331 * Now perform the actual SACK handling: 1) Verify that it is not an 4332 * old sack, if so discard. 2) If there is nothing left in the send 4333 * queue (cum-ack is equal to last acked) then you have a duplicate 4334 * too, update any rwnd change and verify no timers are running. 4335 * then return. 3) Process any new consequtive data i.e. cum-ack 4336 * moved process these first and note that it moved. 4) Process any 4337 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4338 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4339 * sync up flightsizes and things, stop all timers and also check 4340 * for shutdown_pending state. If so then go ahead and send off the 4341 * shutdown. If in shutdown recv, send off the shutdown-ack and 4342 * start that timer, Ret. 9) Strike any non-acked things and do FR 4343 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4344 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4345 * if in shutdown_recv state. 4346 */ 4347 SCTP_TCB_LOCK_ASSERT(stcb); 4348 sack = &ch->sack; 4349 /* CMT DAC algo */ 4350 this_sack_lowest_newack = 0; 4351 j = 0; 4352 sack_length = (unsigned int)sack_len; 4353 /* ECN Nonce */ 4354 SCTP_STAT_INCR(sctps_slowpath_sack); 4355 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4356 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4357 #ifdef SCTP_ASOCLOG_OF_TSNS 4358 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4359 stcb->asoc.cumack_log_at++; 4360 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4361 stcb->asoc.cumack_log_at = 0; 4362 } 4363 #endif 4364 num_seg = ntohs(sack->num_gap_ack_blks); 4365 a_rwnd = rwnd; 4366 4367 if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4368 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4369 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4370 } 4371 /* CMT DAC algo */ 4372 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4373 num_dup = ntohs(sack->num_dup_tsns); 4374 4375 old_rwnd = stcb->asoc.peers_rwnd; 4376 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4377 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4378 stcb->asoc.overall_error_count, 4379 0, 4380 SCTP_FROM_SCTP_INDATA, 4381 __LINE__); 4382 } 4383 stcb->asoc.overall_error_count = 0; 4384 asoc = &stcb->asoc; 4385 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4386 sctp_log_sack(asoc->last_acked_seq, 4387 cum_ack, 4388 0, 4389 num_seg, 4390 num_dup, 4391 SCTP_LOG_NEW_SACK); 4392 } 4393 if ((num_dup) && (sctp_logging_level & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) { 4394 int off_to_dup, iii; 4395 uint32_t *dupdata, dblock; 4396 4397 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4398 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4399 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup, 4400 sizeof(uint32_t), (uint8_t *) & dblock); 4401 off_to_dup += sizeof(uint32_t); 4402 if (dupdata) { 4403 for (iii = 0; iii < num_dup; iii++) { 4404 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4405 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup, 4406 sizeof(uint32_t), (uint8_t *) & dblock); 4407 if (dupdata == NULL) 4408 break; 4409 off_to_dup += sizeof(uint32_t); 4410 } 4411 } 4412 } else { 4413 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4414 off_to_dup, num_dup, sack_length, num_seg); 4415 } 4416 } 4417 if (sctp_strict_sacks) { 4418 /* reality check */ 4419 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4420 tp1 = TAILQ_LAST(&asoc->sent_queue, 4421 sctpchunk_listhead); 4422 send_s = tp1->rec.data.TSN_seq + 1; 4423 } else { 4424 send_s = asoc->sending_seq; 4425 } 4426 if (cum_ack == send_s || 4427 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4428 #ifndef INVARIANTS 4429 struct mbuf *oper; 4430 4431 #endif 4432 #ifdef INVARIANTS 4433 hopeless_peer: 4434 panic("Impossible sack 1"); 4435 #else 4436 4437 4438 /* 4439 * no way, we have not even sent this TSN out yet. 4440 * Peer is hopelessly messed up with us. 4441 */ 4442 hopeless_peer: 4443 *abort_now = 1; 4444 /* XXX */ 4445 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4446 0, M_DONTWAIT, 1, MT_DATA); 4447 if (oper) { 4448 struct sctp_paramhdr *ph; 4449 uint32_t *ippp; 4450 4451 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4452 sizeof(uint32_t); 4453 ph = mtod(oper, struct sctp_paramhdr *); 4454 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4455 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4456 ippp = (uint32_t *) (ph + 1); 4457 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4458 } 4459 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4460 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 4461 return; 4462 #endif 4463 } 4464 } 4465 /**********************/ 4466 /* 1) check the range */ 4467 /**********************/ 4468 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4469 /* acking something behind */ 4470 return; 4471 } 4472 sav_cum_ack = asoc->last_acked_seq; 4473 4474 /* update the Rwnd of the peer */ 4475 if (TAILQ_EMPTY(&asoc->sent_queue) && 4476 TAILQ_EMPTY(&asoc->send_queue) && 4477 (asoc->stream_queue_cnt == 0) 4478 ) { 4479 /* nothing left on send/sent and strmq */ 4480 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 4481 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4482 asoc->peers_rwnd, 0, 0, a_rwnd); 4483 } 4484 asoc->peers_rwnd = a_rwnd; 4485 if (asoc->sent_queue_retran_cnt) { 4486 asoc->sent_queue_retran_cnt = 0; 4487 } 4488 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4489 /* SWS sender side engages */ 4490 asoc->peers_rwnd = 0; 4491 } 4492 /* stop any timers */ 4493 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4494 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4495 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4496 if (sctp_early_fr) { 4497 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4498 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4499 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4500 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4501 } 4502 } 4503 net->partial_bytes_acked = 0; 4504 net->flight_size = 0; 4505 } 4506 asoc->total_flight = 0; 4507 asoc->total_flight_count = 0; 4508 return; 4509 } 4510 /* 4511 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4512 * things. The total byte count acked is tracked in netAckSz AND 4513 * netAck2 is used to track the total bytes acked that are un- 4514 * amibguious and were never retransmitted. We track these on a per 4515 * destination address basis. 4516 */ 4517 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4518 net->prev_cwnd = net->cwnd; 4519 net->net_ack = 0; 4520 net->net_ack2 = 0; 4521 4522 /* 4523 * CMT: Reset CUC and Fast recovery algo variables before 4524 * SACK processing 4525 */ 4526 net->new_pseudo_cumack = 0; 4527 net->will_exit_fast_recovery = 0; 4528 } 4529 /* process the new consecutive TSN first */ 4530 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4531 while (tp1) { 4532 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4533 MAX_TSN) || 4534 last_tsn == tp1->rec.data.TSN_seq) { 4535 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4536 /* 4537 * ECN Nonce: Add the nonce to the sender's 4538 * nonce sum 4539 */ 4540 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4541 accum_moved = 1; 4542 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4543 /* 4544 * If it is less than ACKED, it is 4545 * now no-longer in flight. Higher 4546 * values may occur during marking 4547 */ 4548 if ((tp1->whoTo->dest_state & 4549 SCTP_ADDR_UNCONFIRMED) && 4550 (tp1->snd_count < 2)) { 4551 /* 4552 * If there was no retran 4553 * and the address is 4554 * un-confirmed and we sent 4555 * there and are now 4556 * sacked.. its confirmed, 4557 * mark it so. 4558 */ 4559 tp1->whoTo->dest_state &= 4560 ~SCTP_ADDR_UNCONFIRMED; 4561 } 4562 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4563 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 4564 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4565 tp1->whoTo->flight_size, 4566 tp1->book_size, 4567 (uintptr_t) tp1->whoTo, 4568 tp1->rec.data.TSN_seq); 4569 } 4570 sctp_flight_size_decrease(tp1); 4571 sctp_total_flight_decrease(stcb, tp1); 4572 } 4573 tp1->whoTo->net_ack += tp1->send_size; 4574 4575 /* CMT SFR and DAC algos */ 4576 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4577 tp1->whoTo->saw_newack = 1; 4578 4579 if (tp1->snd_count < 2) { 4580 /* 4581 * True non-retransmited 4582 * chunk 4583 */ 4584 tp1->whoTo->net_ack2 += 4585 tp1->send_size; 4586 4587 /* update RTO too? */ 4588 if (tp1->do_rtt) { 4589 tp1->whoTo->RTO = 4590 sctp_calculate_rto(stcb, 4591 asoc, tp1->whoTo, 4592 &tp1->sent_rcv_time, 4593 sctp_align_safe_nocopy); 4594 tp1->do_rtt = 0; 4595 } 4596 } 4597 /* 4598 * CMT: CUCv2 algorithm. From the 4599 * cumack'd TSNs, for each TSN being 4600 * acked for the first time, set the 4601 * following variables for the 4602 * corresp destination. 4603 * new_pseudo_cumack will trigger a 4604 * cwnd update. 4605 * find_(rtx_)pseudo_cumack will 4606 * trigger search for the next 4607 * expected (rtx-)pseudo-cumack. 4608 */ 4609 tp1->whoTo->new_pseudo_cumack = 1; 4610 tp1->whoTo->find_pseudo_cumack = 1; 4611 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4612 4613 4614 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4615 sctp_log_sack(asoc->last_acked_seq, 4616 cum_ack, 4617 tp1->rec.data.TSN_seq, 4618 0, 4619 0, 4620 SCTP_LOG_TSN_ACKED); 4621 } 4622 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 4623 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4624 } 4625 } 4626 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4627 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4628 #ifdef SCTP_AUDITING_ENABLED 4629 sctp_audit_log(0xB3, 4630 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4631 #endif 4632 } 4633 if (tp1->rec.data.chunk_was_revoked) { 4634 /* deflate the cwnd */ 4635 tp1->whoTo->cwnd -= tp1->book_size; 4636 tp1->rec.data.chunk_was_revoked = 0; 4637 } 4638 tp1->sent = SCTP_DATAGRAM_ACKED; 4639 } 4640 } else { 4641 break; 4642 } 4643 tp1 = TAILQ_NEXT(tp1, sctp_next); 4644 } 4645 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4646 /* always set this up to cum-ack */ 4647 asoc->this_sack_highest_gap = last_tsn; 4648 4649 /* Move offset up to point to gaps/dups */ 4650 offset += sizeof(struct sctp_sack_chunk); 4651 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4652 4653 /* skip corrupt segments */ 4654 goto skip_segments; 4655 } 4656 if (num_seg > 0) { 4657 4658 /* 4659 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4660 * to be greater than the cumack. Also reset saw_newack to 0 4661 * for all dests. 4662 */ 4663 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4664 net->saw_newack = 0; 4665 net->this_sack_highest_newack = last_tsn; 4666 } 4667 4668 /* 4669 * thisSackHighestGap will increase while handling NEW 4670 * segments this_sack_highest_newack will increase while 4671 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4672 * used for CMT DAC algo. saw_newack will also change. 4673 */ 4674 sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn, 4675 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4676 num_seg, &ecn_seg_sums); 4677 4678 if (sctp_strict_sacks) { 4679 /* 4680 * validate the biggest_tsn_acked in the gap acks if 4681 * strict adherence is wanted. 4682 */ 4683 if ((biggest_tsn_acked == send_s) || 4684 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4685 /* 4686 * peer is either confused or we are under 4687 * attack. We must abort. 4688 */ 4689 goto hopeless_peer; 4690 } 4691 } 4692 } 4693 skip_segments: 4694 /*******************************************/ 4695 /* cancel ALL T3-send timer if accum moved */ 4696 /*******************************************/ 4697 if (sctp_cmt_on_off) { 4698 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4699 if (net->new_pseudo_cumack) 4700 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4701 stcb, net, 4702 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4703 4704 } 4705 } else { 4706 if (accum_moved) { 4707 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4708 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4709 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4710 } 4711 } 4712 } 4713 /********************************************/ 4714 /* drop the acked chunks from the sendqueue */ 4715 /********************************************/ 4716 asoc->last_acked_seq = cum_ack; 4717 4718 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4719 if (tp1 == NULL) 4720 goto done_with_it; 4721 do { 4722 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4723 MAX_TSN)) { 4724 break; 4725 } 4726 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4727 /* no more sent on list */ 4728 printf("Warning, tp1->sent == %d and its now acked?\n", 4729 tp1->sent); 4730 } 4731 tp2 = TAILQ_NEXT(tp1, sctp_next); 4732 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4733 if (tp1->pr_sctp_on) { 4734 if (asoc->pr_sctp_cnt != 0) 4735 asoc->pr_sctp_cnt--; 4736 } 4737 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4738 (asoc->total_flight > 0)) { 4739 #ifdef INVARIANTS 4740 panic("Warning flight size is postive and should be 0"); 4741 #else 4742 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4743 asoc->total_flight); 4744 #endif 4745 asoc->total_flight = 0; 4746 } 4747 if (tp1->data) { 4748 sctp_free_bufspace(stcb, asoc, tp1, 1); 4749 sctp_m_freem(tp1->data); 4750 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4751 asoc->sent_queue_cnt_removeable--; 4752 } 4753 } 4754 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4755 sctp_log_sack(asoc->last_acked_seq, 4756 cum_ack, 4757 tp1->rec.data.TSN_seq, 4758 0, 4759 0, 4760 SCTP_LOG_FREE_SENT); 4761 } 4762 tp1->data = NULL; 4763 asoc->sent_queue_cnt--; 4764 sctp_free_a_chunk(stcb, tp1); 4765 wake_him++; 4766 tp1 = tp2; 4767 } while (tp1 != NULL); 4768 4769 done_with_it: 4770 if ((wake_him) && (stcb->sctp_socket)) { 4771 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4772 struct socket *so; 4773 4774 #endif 4775 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4776 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 4777 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 4778 } 4779 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4780 so = SCTP_INP_SO(stcb->sctp_ep); 4781 atomic_add_int(&stcb->asoc.refcnt, 1); 4782 SCTP_TCB_UNLOCK(stcb); 4783 SCTP_SOCKET_LOCK(so, 1); 4784 SCTP_TCB_LOCK(stcb); 4785 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4786 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4787 /* assoc was freed while we were unlocked */ 4788 SCTP_SOCKET_UNLOCK(so, 1); 4789 return; 4790 } 4791 #endif 4792 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4794 SCTP_SOCKET_UNLOCK(so, 1); 4795 #endif 4796 } else { 4797 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 4798 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 4799 } 4800 } 4801 4802 if (asoc->fast_retran_loss_recovery && accum_moved) { 4803 if (compare_with_wrap(asoc->last_acked_seq, 4804 asoc->fast_recovery_tsn, MAX_TSN) || 4805 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 4806 /* Setup so we will exit RFC2582 fast recovery */ 4807 will_exit_fast_recovery = 1; 4808 } 4809 } 4810 /* 4811 * Check for revoked fragments: 4812 * 4813 * if Previous sack - Had no frags then we can't have any revoked if 4814 * Previous sack - Had frag's then - If we now have frags aka 4815 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4816 * some of them. else - The peer revoked all ACKED fragments, since 4817 * we had some before and now we have NONE. 4818 */ 4819 4820 if (num_seg) 4821 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4822 else if (asoc->saw_sack_with_frags) { 4823 int cnt_revoked = 0; 4824 4825 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4826 if (tp1 != NULL) { 4827 /* Peer revoked all dg's marked or acked */ 4828 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4829 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 4830 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 4831 tp1->sent = SCTP_DATAGRAM_SENT; 4832 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 4833 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4834 tp1->whoTo->flight_size, 4835 tp1->book_size, 4836 (uintptr_t) tp1->whoTo, 4837 tp1->rec.data.TSN_seq); 4838 } 4839 sctp_flight_size_increase(tp1); 4840 sctp_total_flight_increase(stcb, tp1); 4841 tp1->rec.data.chunk_was_revoked = 1; 4842 /* 4843 * To ensure that this increase in 4844 * flightsize, which is artificial, 4845 * does not throttle the sender, we 4846 * also increase the cwnd 4847 * artificially. 4848 */ 4849 tp1->whoTo->cwnd += tp1->book_size; 4850 cnt_revoked++; 4851 } 4852 } 4853 if (cnt_revoked) { 4854 reneged_all = 1; 4855 } 4856 } 4857 asoc->saw_sack_with_frags = 0; 4858 } 4859 if (num_seg) 4860 asoc->saw_sack_with_frags = 1; 4861 else 4862 asoc->saw_sack_with_frags = 0; 4863 4864 /* JRS - Use the congestion control given in the CC module */ 4865 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4866 4867 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4868 /* nothing left in-flight */ 4869 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4870 /* stop all timers */ 4871 if (sctp_early_fr) { 4872 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4873 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4874 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4875 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4876 } 4877 } 4878 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4879 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4880 net->flight_size = 0; 4881 net->partial_bytes_acked = 0; 4882 } 4883 asoc->total_flight = 0; 4884 asoc->total_flight_count = 0; 4885 } 4886 /**********************************/ 4887 /* Now what about shutdown issues */ 4888 /**********************************/ 4889 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4890 /* nothing left on sendqueue.. consider done */ 4891 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 4892 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4893 asoc->peers_rwnd, 0, 0, a_rwnd); 4894 } 4895 asoc->peers_rwnd = a_rwnd; 4896 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4897 /* SWS sender side engages */ 4898 asoc->peers_rwnd = 0; 4899 } 4900 /* clean up */ 4901 if ((asoc->stream_queue_cnt == 1) && 4902 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4903 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4904 (asoc->locked_on_sending) 4905 ) { 4906 struct sctp_stream_queue_pending *sp; 4907 4908 /* 4909 * I may be in a state where we got all across.. but 4910 * cannot write more due to a shutdown... we abort 4911 * since the user did not indicate EOR in this case. 4912 */ 4913 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4914 sctp_streamhead); 4915 if ((sp) && (sp->length == 0)) { 4916 asoc->locked_on_sending = NULL; 4917 if (sp->msg_is_complete) { 4918 asoc->stream_queue_cnt--; 4919 } else { 4920 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4921 asoc->stream_queue_cnt--; 4922 } 4923 } 4924 } 4925 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4926 (asoc->stream_queue_cnt == 0)) { 4927 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4928 /* Need to abort here */ 4929 struct mbuf *oper; 4930 4931 abort_out_now: 4932 *abort_now = 1; 4933 /* XXX */ 4934 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4935 0, M_DONTWAIT, 1, MT_DATA); 4936 if (oper) { 4937 struct sctp_paramhdr *ph; 4938 uint32_t *ippp; 4939 4940 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4941 sizeof(uint32_t); 4942 ph = mtod(oper, struct sctp_paramhdr *); 4943 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4944 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4945 ippp = (uint32_t *) (ph + 1); 4946 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4947 } 4948 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 4949 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED); 4950 return; 4951 } else { 4952 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4953 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4954 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4955 } 4956 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4957 sctp_stop_timers_for_shutdown(stcb); 4958 sctp_send_shutdown(stcb, 4959 stcb->asoc.primary_destination); 4960 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4961 stcb->sctp_ep, stcb, asoc->primary_destination); 4962 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4963 stcb->sctp_ep, stcb, asoc->primary_destination); 4964 } 4965 return; 4966 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4967 (asoc->stream_queue_cnt == 0)) { 4968 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4969 goto abort_out_now; 4970 } 4971 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4972 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4973 sctp_send_shutdown_ack(stcb, 4974 stcb->asoc.primary_destination); 4975 4976 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4977 stcb->sctp_ep, stcb, asoc->primary_destination); 4978 return; 4979 } 4980 } 4981 /* 4982 * Now here we are going to recycle net_ack for a different use... 4983 * HEADS UP. 4984 */ 4985 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4986 net->net_ack = 0; 4987 } 4988 4989 /* 4990 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 4991 * to be done. Setting this_sack_lowest_newack to the cum_ack will 4992 * automatically ensure that. 4993 */ 4994 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) { 4995 this_sack_lowest_newack = cum_ack; 4996 } 4997 if (num_seg > 0) { 4998 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4999 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5000 } 5001 /*********************************************/ 5002 /* Here we perform PR-SCTP procedures */ 5003 /* (section 4.2) */ 5004 /*********************************************/ 5005 /* C1. update advancedPeerAckPoint */ 5006 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 5007 asoc->advanced_peer_ack_point = cum_ack; 5008 } 5009 /* C2. try to further move advancedPeerAckPoint ahead */ 5010 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 5011 struct sctp_tmit_chunk *lchk; 5012 5013 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5014 /* C3. See if we need to send a Fwd-TSN */ 5015 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 5016 MAX_TSN)) { 5017 /* 5018 * ISSUE with ECN, see FWD-TSN processing for notes 5019 * on issues that will occur when the ECN NONCE 5020 * stuff is put into SCTP for cross checking. 5021 */ 5022 send_forward_tsn(stcb, asoc); 5023 5024 /* 5025 * ECN Nonce: Disable Nonce Sum check when FWD TSN 5026 * is sent and store resync tsn 5027 */ 5028 asoc->nonce_sum_check = 0; 5029 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 5030 if (lchk) { 5031 /* Assure a timer is up */ 5032 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5033 stcb->sctp_ep, stcb, lchk->whoTo); 5034 } 5035 } 5036 } 5037 /* JRS - Use the congestion control given in the CC module */ 5038 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5039 5040 /****************************************************************** 5041 * Here we do the stuff with ECN Nonce checking. 5042 * We basically check to see if the nonce sum flag was incorrect 5043 * or if resynchronization needs to be done. Also if we catch a 5044 * misbehaving receiver we give him the kick. 5045 ******************************************************************/ 5046 5047 if (asoc->ecn_nonce_allowed) { 5048 if (asoc->nonce_sum_check) { 5049 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 5050 if (asoc->nonce_wait_for_ecne == 0) { 5051 struct sctp_tmit_chunk *lchk; 5052 5053 lchk = TAILQ_FIRST(&asoc->send_queue); 5054 asoc->nonce_wait_for_ecne = 1; 5055 if (lchk) { 5056 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 5057 } else { 5058 asoc->nonce_wait_tsn = asoc->sending_seq; 5059 } 5060 } else { 5061 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 5062 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 5063 /* 5064 * Misbehaving peer. We need 5065 * to react to this guy 5066 */ 5067 asoc->ecn_allowed = 0; 5068 asoc->ecn_nonce_allowed = 0; 5069 } 5070 } 5071 } 5072 } else { 5073 /* See if Resynchronization Possible */ 5074 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 5075 asoc->nonce_sum_check = 1; 5076 /* 5077 * now we must calculate what the base is. 5078 * We do this based on two things, we know 5079 * the total's for all the segments 5080 * gap-acked in the SACK, its stored in 5081 * ecn_seg_sums. We also know the SACK's 5082 * nonce sum, its in nonce_sum_flag. So we 5083 * can build a truth table to back-calculate 5084 * the new value of 5085 * asoc->nonce_sum_expect_base: 5086 * 5087 * SACK-flag-Value Seg-Sums Base 0 0 0 5088 * 1 0 1 0 1 1 1 5089 * 1 0 5090 */ 5091 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 5092 } 5093 } 5094 } 5095 /* Now are we exiting loss recovery ? */ 5096 if (will_exit_fast_recovery) { 5097 /* Ok, we must exit fast recovery */ 5098 asoc->fast_retran_loss_recovery = 0; 5099 } 5100 if ((asoc->sat_t3_loss_recovery) && 5101 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 5102 MAX_TSN) || 5103 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 5104 /* end satellite t3 loss recovery */ 5105 asoc->sat_t3_loss_recovery = 0; 5106 } 5107 /* 5108 * CMT Fast recovery 5109 */ 5110 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5111 if (net->will_exit_fast_recovery) { 5112 /* Ok, we must exit fast recovery */ 5113 net->fast_retran_loss_recovery = 0; 5114 } 5115 } 5116 5117 /* Adjust and set the new rwnd value */ 5118 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 5119 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5120 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 5121 } 5122 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5123 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 5124 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5125 /* SWS sender side engages */ 5126 asoc->peers_rwnd = 0; 5127 } 5128 if (asoc->peers_rwnd > old_rwnd) { 5129 win_probe_recovery = 1; 5130 } 5131 /* 5132 * Now we must setup so we have a timer up for anyone with 5133 * outstanding data. 5134 */ 5135 done_once = 0; 5136 again: 5137 j = 0; 5138 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5139 if (win_probe_recovery && (net->window_probe)) { 5140 net->window_probe = 0; 5141 win_probe_recovered = 1; 5142 /*- 5143 * Find first chunk that was used with 5144 * window probe and clear the event. Put 5145 * it back into the send queue as if has 5146 * not been sent. 5147 */ 5148 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5149 if (tp1->window_probe) { 5150 sctp_window_probe_recovery(stcb, asoc, net, tp1); 5151 break; 5152 } 5153 } 5154 } 5155 if (net->flight_size) { 5156 j++; 5157 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5158 stcb->sctp_ep, stcb, net); 5159 } else { 5160 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5161 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5162 stcb, net, 5163 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 5164 } 5165 if (sctp_early_fr) { 5166 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 5167 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 5168 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5169 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 5170 } 5171 } 5172 } 5173 } 5174 if ((j == 0) && 5175 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5176 (asoc->sent_queue_retran_cnt == 0) && 5177 (win_probe_recovered == 0) && 5178 (done_once == 0)) { 5179 /* huh, this should not happen */ 5180 sctp_fs_audit(asoc); 5181 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5182 net->flight_size = 0; 5183 } 5184 asoc->total_flight = 0; 5185 asoc->total_flight_count = 0; 5186 asoc->sent_queue_retran_cnt = 0; 5187 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5188 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5189 sctp_flight_size_increase(tp1); 5190 sctp_total_flight_increase(stcb, tp1); 5191 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5192 asoc->sent_queue_retran_cnt++; 5193 } 5194 } 5195 done_once = 1; 5196 goto again; 5197 } 5198 if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) { 5199 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5200 a_rwnd, 5201 stcb->asoc.peers_rwnd, 5202 stcb->asoc.total_flight, 5203 stcb->asoc.total_output_queue_size); 5204 } 5205 } 5206 5207 void 5208 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5209 struct sctp_nets *netp, int *abort_flag) 5210 { 5211 /* Copy cum-ack */ 5212 uint32_t cum_ack, a_rwnd; 5213 5214 cum_ack = ntohl(cp->cumulative_tsn_ack); 5215 /* Arrange so a_rwnd does NOT change */ 5216 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5217 5218 /* Now call the express sack handling */ 5219 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5220 } 5221 5222 static void 5223 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5224 struct sctp_stream_in *strmin) 5225 { 5226 struct sctp_queued_to_read *ctl, *nctl; 5227 struct sctp_association *asoc; 5228 int tt; 5229 5230 asoc = &stcb->asoc; 5231 tt = strmin->last_sequence_delivered; 5232 /* 5233 * First deliver anything prior to and including the stream no that 5234 * came in 5235 */ 5236 ctl = TAILQ_FIRST(&strmin->inqueue); 5237 while (ctl) { 5238 nctl = TAILQ_NEXT(ctl, next); 5239 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5240 (tt == ctl->sinfo_ssn)) { 5241 /* this is deliverable now */ 5242 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5243 /* subtract pending on streams */ 5244 asoc->size_on_all_streams -= ctl->length; 5245 sctp_ucount_decr(asoc->cnt_on_all_streams); 5246 /* deliver it to at least the delivery-q */ 5247 if (stcb->sctp_socket) { 5248 sctp_add_to_readq(stcb->sctp_ep, stcb, 5249 ctl, 5250 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 5251 } 5252 } else { 5253 /* no more delivery now. */ 5254 break; 5255 } 5256 ctl = nctl; 5257 } 5258 /* 5259 * now we must deliver things in queue the normal way if any are 5260 * now ready. 5261 */ 5262 tt = strmin->last_sequence_delivered + 1; 5263 ctl = TAILQ_FIRST(&strmin->inqueue); 5264 while (ctl) { 5265 nctl = TAILQ_NEXT(ctl, next); 5266 if (tt == ctl->sinfo_ssn) { 5267 /* this is deliverable now */ 5268 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5269 /* subtract pending on streams */ 5270 asoc->size_on_all_streams -= ctl->length; 5271 sctp_ucount_decr(asoc->cnt_on_all_streams); 5272 /* deliver it to at least the delivery-q */ 5273 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5274 if (stcb->sctp_socket) { 5275 sctp_add_to_readq(stcb->sctp_ep, stcb, 5276 ctl, 5277 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 5278 } 5279 tt = strmin->last_sequence_delivered + 1; 5280 } else { 5281 break; 5282 } 5283 ctl = nctl; 5284 } 5285 } 5286 5287 void 5288 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5289 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset) 5290 { 5291 /* 5292 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5293 * forward TSN, when the SACK comes back that acknowledges the 5294 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5295 * get quite tricky since we may have sent more data interveneing 5296 * and must carefully account for what the SACK says on the nonce 5297 * and any gaps that are reported. This work will NOT be done here, 5298 * but I note it here since it is really related to PR-SCTP and 5299 * FWD-TSN's 5300 */ 5301 5302 /* The pr-sctp fwd tsn */ 5303 /* 5304 * here we will perform all the data receiver side steps for 5305 * processing FwdTSN, as required in by pr-sctp draft: 5306 * 5307 * Assume we get FwdTSN(x): 5308 * 5309 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5310 * others we have 3) examine and update re-ordering queue on 5311 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5312 * report where we are. 5313 */ 5314 struct sctp_association *asoc; 5315 uint32_t new_cum_tsn, gap; 5316 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5317 struct sctp_stream_in *strm; 5318 struct sctp_tmit_chunk *chk, *at; 5319 5320 cumack_set_flag = 0; 5321 asoc = &stcb->asoc; 5322 cnt_gone = 0; 5323 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5324 SCTPDBG(SCTP_DEBUG_INDATA1, 5325 "Bad size too small/big fwd-tsn\n"); 5326 return; 5327 } 5328 m_size = (stcb->asoc.mapping_array_size << 3); 5329 /*************************************************************/ 5330 /* 1. Here we update local cumTSN and shift the bitmap array */ 5331 /*************************************************************/ 5332 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5333 5334 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5335 asoc->cumulative_tsn == new_cum_tsn) { 5336 /* Already got there ... */ 5337 return; 5338 } 5339 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5340 MAX_TSN)) { 5341 asoc->highest_tsn_inside_map = new_cum_tsn; 5342 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 5343 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5344 } 5345 } 5346 /* 5347 * now we know the new TSN is more advanced, let's find the actual 5348 * gap 5349 */ 5350 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5351 MAX_TSN)) || 5352 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5353 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5354 } else { 5355 /* try to prevent underflow here */ 5356 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5357 } 5358 5359 if (gap >= m_size) { 5360 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 5361 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5362 } 5363 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5364 struct mbuf *oper; 5365 5366 /* 5367 * out of range (of single byte chunks in the rwnd I 5368 * give out). This must be an attacker. 5369 */ 5370 *abort_flag = 1; 5371 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 5372 0, M_DONTWAIT, 1, MT_DATA); 5373 if (oper) { 5374 struct sctp_paramhdr *ph; 5375 uint32_t *ippp; 5376 5377 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 5378 (sizeof(uint32_t) * 3); 5379 ph = mtod(oper, struct sctp_paramhdr *); 5380 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 5381 ph->param_length = htons(SCTP_BUF_LEN(oper)); 5382 ippp = (uint32_t *) (ph + 1); 5383 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 5384 ippp++; 5385 *ippp = asoc->highest_tsn_inside_map; 5386 ippp++; 5387 *ippp = new_cum_tsn; 5388 } 5389 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5390 sctp_abort_an_association(stcb->sctp_ep, stcb, 5391 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 5392 return; 5393 } 5394 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5395 slide_out: 5396 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5397 cumack_set_flag = 1; 5398 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5399 asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn; 5400 5401 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 5402 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5403 } 5404 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5405 } else { 5406 SCTP_TCB_LOCK_ASSERT(stcb); 5407 if ((compare_with_wrap(((uint32_t) asoc->cumulative_tsn + gap), asoc->highest_tsn_inside_map, MAX_TSN)) || 5408 (((uint32_t) asoc->cumulative_tsn + gap) == asoc->highest_tsn_inside_map)) { 5409 goto slide_out; 5410 } else { 5411 for (i = 0; i <= gap; i++) { 5412 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5413 } 5414 } 5415 /* 5416 * Now after marking all, slide thing forward but no sack 5417 * please. 5418 */ 5419 sctp_sack_check(stcb, 0, 0, abort_flag); 5420 if (*abort_flag) 5421 return; 5422 } 5423 5424 /*************************************************************/ 5425 /* 2. Clear up re-assembly queue */ 5426 /*************************************************************/ 5427 /* 5428 * First service it if pd-api is up, just in case we can progress it 5429 * forward 5430 */ 5431 if (asoc->fragmented_delivery_inprogress) { 5432 sctp_service_reassembly(stcb, asoc); 5433 } 5434 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5435 /* For each one on here see if we need to toss it */ 5436 /* 5437 * For now large messages held on the reasmqueue that are 5438 * complete will be tossed too. We could in theory do more 5439 * work to spin through and stop after dumping one msg aka 5440 * seeing the start of a new msg at the head, and call the 5441 * delivery function... to see if it can be delivered... But 5442 * for now we just dump everything on the queue. 5443 */ 5444 chk = TAILQ_FIRST(&asoc->reasmqueue); 5445 while (chk) { 5446 at = TAILQ_NEXT(chk, sctp_next); 5447 if (compare_with_wrap(asoc->cumulative_tsn, 5448 chk->rec.data.TSN_seq, MAX_TSN) || 5449 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5450 /* It needs to be tossed */ 5451 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5452 if (compare_with_wrap(chk->rec.data.TSN_seq, 5453 asoc->tsn_last_delivered, MAX_TSN)) { 5454 asoc->tsn_last_delivered = 5455 chk->rec.data.TSN_seq; 5456 asoc->str_of_pdapi = 5457 chk->rec.data.stream_number; 5458 asoc->ssn_of_pdapi = 5459 chk->rec.data.stream_seq; 5460 asoc->fragment_flags = 5461 chk->rec.data.rcv_flags; 5462 } 5463 asoc->size_on_reasm_queue -= chk->send_size; 5464 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5465 cnt_gone++; 5466 5467 /* Clear up any stream problem */ 5468 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5469 SCTP_DATA_UNORDERED && 5470 (compare_with_wrap(chk->rec.data.stream_seq, 5471 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5472 MAX_SEQ))) { 5473 /* 5474 * We must dump forward this streams 5475 * sequence number if the chunk is 5476 * not unordered that is being 5477 * skipped. There is a chance that 5478 * if the peer does not include the 5479 * last fragment in its FWD-TSN we 5480 * WILL have a problem here since 5481 * you would have a partial chunk in 5482 * queue that may not be 5483 * deliverable. Also if a Partial 5484 * delivery API as started the user 5485 * may get a partial chunk. The next 5486 * read returning a new chunk... 5487 * really ugly but I see no way 5488 * around it! Maybe a notify?? 5489 */ 5490 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5491 chk->rec.data.stream_seq; 5492 } 5493 if (chk->data) { 5494 sctp_m_freem(chk->data); 5495 chk->data = NULL; 5496 } 5497 sctp_free_a_chunk(stcb, chk); 5498 } else { 5499 /* 5500 * Ok we have gone beyond the end of the 5501 * fwd-tsn's mark. Some checks... 5502 */ 5503 if ((asoc->fragmented_delivery_inprogress) && 5504 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5505 uint32_t str_seq; 5506 5507 /* 5508 * Special case PD-API is up and 5509 * what we fwd-tsn' over includes 5510 * one that had the LAST_FRAG. We no 5511 * longer need to do the PD-API. 5512 */ 5513 asoc->fragmented_delivery_inprogress = 0; 5514 5515 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5516 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5517 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED); 5518 5519 } 5520 break; 5521 } 5522 chk = at; 5523 } 5524 } 5525 if (asoc->fragmented_delivery_inprogress) { 5526 /* 5527 * Ok we removed cnt_gone chunks in the PD-API queue that 5528 * were being delivered. So now we must turn off the flag. 5529 */ 5530 uint32_t str_seq; 5531 5532 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5533 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5534 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED); 5535 asoc->fragmented_delivery_inprogress = 0; 5536 } 5537 /*************************************************************/ 5538 /* 3. Update the PR-stream re-ordering queues */ 5539 /*************************************************************/ 5540 fwd_sz -= sizeof(*fwd); 5541 if (m && fwd_sz) { 5542 /* New method. */ 5543 unsigned int num_str; 5544 struct sctp_strseq *stseq, strseqbuf; 5545 5546 offset += sizeof(*fwd); 5547 5548 num_str = fwd_sz / sizeof(struct sctp_strseq); 5549 for (i = 0; i < num_str; i++) { 5550 uint16_t st; 5551 5552 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5553 sizeof(struct sctp_strseq), 5554 (uint8_t *) & strseqbuf); 5555 offset += sizeof(struct sctp_strseq); 5556 if (stseq == NULL) { 5557 break; 5558 } 5559 /* Convert */ 5560 st = ntohs(stseq->stream); 5561 stseq->stream = st; 5562 st = ntohs(stseq->sequence); 5563 stseq->sequence = st; 5564 /* now process */ 5565 if (stseq->stream >= asoc->streamincnt) { 5566 /* screwed up streams, stop! */ 5567 break; 5568 } 5569 strm = &asoc->strmin[stseq->stream]; 5570 if (compare_with_wrap(stseq->sequence, 5571 strm->last_sequence_delivered, MAX_SEQ)) { 5572 /* Update the sequence number */ 5573 strm->last_sequence_delivered = 5574 stseq->sequence; 5575 } 5576 /* now kick the stream the new way */ 5577 sctp_kick_prsctp_reorder_queue(stcb, strm); 5578 } 5579 } 5580 if (TAILQ_FIRST(&asoc->reasmqueue)) { 5581 /* now lets kick out and check for more fragmented delivery */ 5582 sctp_deliver_reasm_check(stcb, &stcb->asoc); 5583 } 5584 } 5585