1 /*- 2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 48 49 /* 50 * NOTES: On the outbound side of things I need to check the sack timer to 51 * see if I should generate a sack into the chunk queue (if I have data to 52 * send that is and will be sending it .. for bundling. 53 * 54 * The callback in sctp_usrreq.c will get called when the socket is read from. 55 * This will cause sctp_service_queues() to get called on the top entry in 56 * the list. 57 */ 58 59 __inline void 60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 61 { 62 uint32_t calc, calc_w_oh; 63 64 /* 65 * This is really set wrong with respect to a 1-2-m socket. Since 66 * the sb_cc is the count that everyone as put up. When we re-write 67 * sctp_soreceive then we will fix this so that ONLY this 68 * associations data is taken into account. 69 */ 70 if (stcb->sctp_socket == NULL) 71 return; 72 73 if (stcb->asoc.sb_cc == 0 && 74 asoc->size_on_reasm_queue == 0 && 75 asoc->size_on_all_streams == 0) { 76 /* Full rwnd granted */ 77 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 78 SCTP_MINIMAL_RWND); 79 return; 80 } 81 /* get actual space */ 82 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 83 84 /* 85 * take out what has NOT been put on socket queue and we yet hold 86 * for putting up. 87 */ 88 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 89 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 90 91 if (calc == 0) { 92 /* out of space */ 93 asoc->my_rwnd = 0; 94 return; 95 } 96 /* what is the overhead of all these rwnd's */ 97 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 98 asoc->my_rwnd = calc; 99 if (calc_w_oh == 0) { 100 /* 101 * If our overhead is greater than the advertised rwnd, we 102 * clamp the rwnd to 1. This lets us still accept inbound 103 * segments, but hopefully will shut the sender down when he 104 * finally gets the message. 105 */ 106 asoc->my_rwnd = 1; 107 } else { 108 /* SWS threshold */ 109 if (asoc->my_rwnd && 110 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 111 /* SWS engaged, tell peer none left */ 112 asoc->my_rwnd = 1; 113 } 114 } 115 } 116 117 /* Calculate what the rwnd would be */ 118 119 __inline uint32_t 120 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 121 { 122 uint32_t calc = 0, calc_w_oh; 123 124 /* 125 * This is really set wrong with respect to a 1-2-m socket. Since 126 * the sb_cc is the count that everyone as put up. When we re-write 127 * sctp_soreceive then we will fix this so that ONLY this 128 * associations data is taken into account. 129 */ 130 if (stcb->sctp_socket == NULL) 131 return (calc); 132 133 if (stcb->asoc.sb_cc == 0 && 134 asoc->size_on_reasm_queue == 0 && 135 asoc->size_on_all_streams == 0) { 136 /* Full rwnd granted */ 137 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 138 SCTP_MINIMAL_RWND); 139 return (calc); 140 } 141 /* get actual space */ 142 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 143 144 /* 145 * take out what has NOT been put on socket queue and we yet hold 146 * for putting up. 147 */ 148 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 149 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 150 151 if (calc == 0) { 152 /* out of space */ 153 return (calc); 154 } 155 /* what is the overhead of all these rwnd's */ 156 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 157 if (calc_w_oh == 0) { 158 /* 159 * If our overhead is greater than the advertised rwnd, we 160 * clamp the rwnd to 1. This lets us still accept inbound 161 * segments, but hopefully will shut the sender down when he 162 * finally gets the message. 163 */ 164 calc = 1; 165 } else { 166 /* SWS threshold */ 167 if (calc && 168 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 169 /* SWS engaged, tell peer none left */ 170 calc = 1; 171 } 172 } 173 return (calc); 174 } 175 176 177 178 /* 179 * Build out our readq entry based on the incoming packet. 180 */ 181 struct sctp_queued_to_read * 182 sctp_build_readq_entry(struct sctp_tcb *stcb, 183 struct sctp_nets *net, 184 uint32_t tsn, uint32_t ppid, 185 uint32_t context, uint16_t stream_no, 186 uint16_t stream_seq, uint8_t flags, 187 struct mbuf *dm) 188 { 189 struct sctp_queued_to_read *read_queue_e = NULL; 190 191 sctp_alloc_a_readq(stcb, read_queue_e); 192 if (read_queue_e == NULL) { 193 goto failed_build; 194 } 195 read_queue_e->sinfo_stream = stream_no; 196 read_queue_e->sinfo_ssn = stream_seq; 197 read_queue_e->sinfo_flags = (flags << 8); 198 read_queue_e->sinfo_ppid = ppid; 199 read_queue_e->sinfo_context = stcb->asoc.context; 200 read_queue_e->sinfo_timetolive = 0; 201 read_queue_e->sinfo_tsn = tsn; 202 read_queue_e->sinfo_cumtsn = tsn; 203 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 204 read_queue_e->whoFrom = net; 205 read_queue_e->length = 0; 206 atomic_add_int(&net->ref_count, 1); 207 read_queue_e->data = dm; 208 read_queue_e->spec_flags = 0; 209 read_queue_e->tail_mbuf = NULL; 210 read_queue_e->stcb = stcb; 211 read_queue_e->port_from = stcb->rport; 212 read_queue_e->do_not_ref_stcb = 0; 213 read_queue_e->end_added = 0; 214 read_queue_e->some_taken = 0; 215 read_queue_e->pdapi_aborted = 0; 216 failed_build: 217 return (read_queue_e); 218 } 219 220 221 /* 222 * Build out our readq entry based on the incoming packet. 223 */ 224 static struct sctp_queued_to_read * 225 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 226 struct sctp_tmit_chunk *chk) 227 { 228 struct sctp_queued_to_read *read_queue_e = NULL; 229 230 sctp_alloc_a_readq(stcb, read_queue_e); 231 if (read_queue_e == NULL) { 232 goto failed_build; 233 } 234 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 235 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 236 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 237 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 238 read_queue_e->sinfo_context = stcb->asoc.context; 239 read_queue_e->sinfo_timetolive = 0; 240 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 241 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 242 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 243 read_queue_e->whoFrom = chk->whoTo; 244 read_queue_e->length = 0; 245 atomic_add_int(&chk->whoTo->ref_count, 1); 246 read_queue_e->data = chk->data; 247 read_queue_e->tail_mbuf = NULL; 248 read_queue_e->stcb = stcb; 249 read_queue_e->port_from = stcb->rport; 250 read_queue_e->spec_flags = 0; 251 read_queue_e->do_not_ref_stcb = 0; 252 read_queue_e->end_added = 0; 253 read_queue_e->some_taken = 0; 254 read_queue_e->pdapi_aborted = 0; 255 failed_build: 256 return (read_queue_e); 257 } 258 259 260 struct mbuf * 261 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 262 struct sctp_sndrcvinfo *sinfo) 263 { 264 struct sctp_sndrcvinfo *outinfo; 265 struct cmsghdr *cmh; 266 struct mbuf *ret; 267 int len; 268 int use_extended = 0; 269 270 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 271 /* user does not want the sndrcv ctl */ 272 return (NULL); 273 } 274 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 275 use_extended = 1; 276 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 277 } else { 278 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 279 } 280 281 282 ret = sctp_get_mbuf_for_msg(len, 283 0, M_DONTWAIT, 1, MT_DATA); 284 285 if (ret == NULL) { 286 /* No space */ 287 return (ret); 288 } 289 /* We need a CMSG header followed by the struct */ 290 cmh = mtod(ret, struct cmsghdr *); 291 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 292 cmh->cmsg_level = IPPROTO_SCTP; 293 if (use_extended) { 294 cmh->cmsg_type = SCTP_EXTRCV; 295 cmh->cmsg_len = len; 296 memcpy(outinfo, sinfo, len); 297 } else { 298 cmh->cmsg_type = SCTP_SNDRCV; 299 cmh->cmsg_len = len; 300 *outinfo = *sinfo; 301 } 302 SCTP_BUF_LEN(ret) = cmh->cmsg_len; 303 return (ret); 304 } 305 306 307 /* 308 * We are delivering currently from the reassembly queue. We must continue to 309 * deliver until we either: 1) run out of space. 2) run out of sequential 310 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 311 */ 312 static void 313 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 314 { 315 struct sctp_tmit_chunk *chk; 316 uint16_t nxt_todel; 317 uint16_t stream_no; 318 int end = 0; 319 int cntDel; 320 struct sctp_queued_to_read *control, *ctl, *ctlat; 321 322 cntDel = stream_no = 0; 323 if (stcb && 324 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 325 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 326 /* socket above is long gone */ 327 asoc->fragmented_delivery_inprogress = 0; 328 chk = TAILQ_FIRST(&asoc->reasmqueue); 329 while (chk) { 330 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 331 asoc->size_on_reasm_queue -= chk->send_size; 332 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 333 /* 334 * Lose the data pointer, since its in the socket 335 * buffer 336 */ 337 if (chk->data) { 338 sctp_m_freem(chk->data); 339 chk->data = NULL; 340 } 341 /* Now free the address and data */ 342 sctp_free_remote_addr(chk->whoTo); 343 sctp_free_a_chunk(stcb, chk); 344 chk = TAILQ_FIRST(&asoc->reasmqueue); 345 } 346 return; 347 } 348 SCTP_TCB_LOCK_ASSERT(stcb); 349 do { 350 chk = TAILQ_FIRST(&asoc->reasmqueue); 351 if (chk == NULL) { 352 return; 353 } 354 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 355 /* Can't deliver more :< */ 356 return; 357 } 358 stream_no = chk->rec.data.stream_number; 359 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 360 if (nxt_todel != chk->rec.data.stream_seq && 361 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 362 /* 363 * Not the next sequence to deliver in its stream OR 364 * unordered 365 */ 366 return; 367 } 368 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 369 370 control = sctp_build_readq_entry_chk(stcb, chk); 371 if (control == NULL) { 372 /* out of memory? */ 373 return; 374 } 375 /* save it off for our future deliveries */ 376 stcb->asoc.control_pdapi = control; 377 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 378 end = 1; 379 else 380 end = 0; 381 sctp_add_to_readq(stcb->sctp_ep, 382 stcb, control, &stcb->sctp_socket->so_rcv, end); 383 cntDel++; 384 } else { 385 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 386 end = 1; 387 else 388 end = 0; 389 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 390 stcb->asoc.control_pdapi, 391 chk->data, end, chk->rec.data.TSN_seq, 392 &stcb->sctp_socket->so_rcv)) { 393 /* 394 * something is very wrong, either 395 * control_pdapi is NULL, or the tail_mbuf 396 * is corrupt, or there is a EOM already on 397 * the mbuf chain. 398 */ 399 if (stcb->asoc.control_pdapi == NULL) { 400 panic("This should not happen control_pdapi NULL?"); 401 } 402 if (stcb->asoc.control_pdapi->tail_mbuf == NULL) { 403 panic("This should not happen, tail_mbuf not being maintained?"); 404 } 405 /* if we did not panic, it was a EOM */ 406 panic("Bad chunking ??"); 407 } 408 cntDel++; 409 } 410 /* pull it we did it */ 411 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 412 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 413 asoc->fragmented_delivery_inprogress = 0; 414 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 415 asoc->strmin[stream_no].last_sequence_delivered++; 416 } 417 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 418 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 419 } 420 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 421 /* 422 * turn the flag back on since we just delivered 423 * yet another one. 424 */ 425 asoc->fragmented_delivery_inprogress = 1; 426 } 427 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 428 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 429 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 430 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 431 432 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 433 asoc->size_on_reasm_queue -= chk->send_size; 434 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 435 /* free up the chk */ 436 chk->data = NULL; 437 sctp_free_remote_addr(chk->whoTo); 438 sctp_free_a_chunk(stcb, chk); 439 440 if (asoc->fragmented_delivery_inprogress == 0) { 441 /* 442 * Now lets see if we can deliver the next one on 443 * the stream 444 */ 445 uint16_t nxt_todel; 446 struct sctp_stream_in *strm; 447 448 strm = &asoc->strmin[stream_no]; 449 nxt_todel = strm->last_sequence_delivered + 1; 450 ctl = TAILQ_FIRST(&strm->inqueue); 451 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 452 while (ctl != NULL) { 453 /* Deliver more if we can. */ 454 if (nxt_todel == ctl->sinfo_ssn) { 455 ctlat = TAILQ_NEXT(ctl, next); 456 TAILQ_REMOVE(&strm->inqueue, ctl, next); 457 asoc->size_on_all_streams -= ctl->length; 458 sctp_ucount_decr(asoc->cnt_on_all_streams); 459 strm->last_sequence_delivered++; 460 sctp_add_to_readq(stcb->sctp_ep, stcb, 461 ctl, 462 &stcb->sctp_socket->so_rcv, 1); 463 ctl = ctlat; 464 } else { 465 break; 466 } 467 nxt_todel = strm->last_sequence_delivered + 1; 468 } 469 } 470 break; 471 } 472 chk = TAILQ_FIRST(&asoc->reasmqueue); 473 } while (chk); 474 } 475 476 /* 477 * Queue the chunk either right into the socket buffer if it is the next one 478 * to go OR put it in the correct place in the delivery queue. If we do 479 * append to the so_buf, keep doing so until we are out of order. One big 480 * question still remains, what to do when the socket buffer is FULL?? 481 */ 482 static void 483 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 484 struct sctp_queued_to_read *control, int *abort_flag) 485 { 486 /* 487 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 488 * all the data in one stream this could happen quite rapidly. One 489 * could use the TSN to keep track of things, but this scheme breaks 490 * down in the other type of stream useage that could occur. Send a 491 * single msg to stream 0, send 4Billion messages to stream 1, now 492 * send a message to stream 0. You have a situation where the TSN 493 * has wrapped but not in the stream. Is this worth worrying about 494 * or should we just change our queue sort at the bottom to be by 495 * TSN. 496 * 497 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 498 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 499 * assignment this could happen... and I don't see how this would be 500 * a violation. So for now I am undecided an will leave the sort by 501 * SSN alone. Maybe a hybred approach is the answer 502 * 503 */ 504 struct sctp_stream_in *strm; 505 struct sctp_queued_to_read *at; 506 int queue_needed; 507 uint16_t nxt_todel; 508 struct mbuf *oper; 509 510 queue_needed = 1; 511 asoc->size_on_all_streams += control->length; 512 sctp_ucount_incr(asoc->cnt_on_all_streams); 513 strm = &asoc->strmin[control->sinfo_stream]; 514 nxt_todel = strm->last_sequence_delivered + 1; 515 #ifdef SCTP_STR_LOGGING 516 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 517 #endif 518 #ifdef SCTP_DEBUG 519 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 520 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 521 (uint32_t) control->sinfo_stream, 522 (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel); 523 } 524 #endif 525 if (compare_with_wrap(strm->last_sequence_delivered, 526 control->sinfo_ssn, MAX_SEQ) || 527 (strm->last_sequence_delivered == control->sinfo_ssn)) { 528 /* The incoming sseq is behind where we last delivered? */ 529 #ifdef SCTP_DEBUG 530 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 531 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 532 control->sinfo_ssn, 533 strm->last_sequence_delivered); 534 } 535 #endif 536 /* 537 * throw it in the stream so it gets cleaned up in 538 * association destruction 539 */ 540 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 541 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 542 0, M_DONTWAIT, 1, MT_DATA); 543 if (oper) { 544 struct sctp_paramhdr *ph; 545 uint32_t *ippp; 546 547 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 548 (sizeof(uint32_t) * 3); 549 ph = mtod(oper, struct sctp_paramhdr *); 550 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 551 ph->param_length = htons(SCTP_BUF_LEN(oper)); 552 ippp = (uint32_t *) (ph + 1); 553 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 554 ippp++; 555 *ippp = control->sinfo_tsn; 556 ippp++; 557 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 558 } 559 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 560 sctp_abort_an_association(stcb->sctp_ep, stcb, 561 SCTP_PEER_FAULTY, oper); 562 563 *abort_flag = 1; 564 return; 565 566 } 567 if (nxt_todel == control->sinfo_ssn) { 568 /* can be delivered right away? */ 569 #ifdef SCTP_STR_LOGGING 570 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 571 #endif 572 queue_needed = 0; 573 asoc->size_on_all_streams -= control->length; 574 sctp_ucount_decr(asoc->cnt_on_all_streams); 575 strm->last_sequence_delivered++; 576 sctp_add_to_readq(stcb->sctp_ep, stcb, 577 control, 578 &stcb->sctp_socket->so_rcv, 1); 579 control = TAILQ_FIRST(&strm->inqueue); 580 while (control != NULL) { 581 /* all delivered */ 582 nxt_todel = strm->last_sequence_delivered + 1; 583 if (nxt_todel == control->sinfo_ssn) { 584 at = TAILQ_NEXT(control, next); 585 TAILQ_REMOVE(&strm->inqueue, control, next); 586 asoc->size_on_all_streams -= control->length; 587 sctp_ucount_decr(asoc->cnt_on_all_streams); 588 strm->last_sequence_delivered++; 589 /* 590 * We ignore the return of deliver_data here 591 * since we always can hold the chunk on the 592 * d-queue. And we have a finite number that 593 * can be delivered from the strq. 594 */ 595 #ifdef SCTP_STR_LOGGING 596 sctp_log_strm_del(control, NULL, 597 SCTP_STR_LOG_FROM_IMMED_DEL); 598 #endif 599 sctp_add_to_readq(stcb->sctp_ep, stcb, 600 control, 601 &stcb->sctp_socket->so_rcv, 1); 602 control = at; 603 continue; 604 } 605 break; 606 } 607 } 608 if (queue_needed) { 609 /* 610 * Ok, we did not deliver this guy, find the correct place 611 * to put it on the queue. 612 */ 613 if (TAILQ_EMPTY(&strm->inqueue)) { 614 /* Empty queue */ 615 #ifdef SCTP_STR_LOGGING 616 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 617 #endif 618 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 619 } else { 620 TAILQ_FOREACH(at, &strm->inqueue, next) { 621 if (compare_with_wrap(at->sinfo_ssn, 622 control->sinfo_ssn, MAX_SEQ)) { 623 /* 624 * one in queue is bigger than the 625 * new one, insert before this one 626 */ 627 #ifdef SCTP_STR_LOGGING 628 sctp_log_strm_del(control, at, 629 SCTP_STR_LOG_FROM_INSERT_MD); 630 #endif 631 TAILQ_INSERT_BEFORE(at, control, next); 632 break; 633 } else if (at->sinfo_ssn == control->sinfo_ssn) { 634 /* 635 * Gak, He sent me a duplicate str 636 * seq number 637 */ 638 /* 639 * foo bar, I guess I will just free 640 * this new guy, should we abort 641 * too? FIX ME MAYBE? Or it COULD be 642 * that the SSN's have wrapped. 643 * Maybe I should compare to TSN 644 * somehow... sigh for now just blow 645 * away the chunk! 646 */ 647 648 if (control->data) 649 sctp_m_freem(control->data); 650 control->data = NULL; 651 asoc->size_on_all_streams -= control->length; 652 sctp_ucount_decr(asoc->cnt_on_all_streams); 653 sctp_free_remote_addr(control->whoFrom); 654 sctp_free_a_readq(stcb, control); 655 return; 656 } else { 657 if (TAILQ_NEXT(at, next) == NULL) { 658 /* 659 * We are at the end, insert 660 * it after this one 661 */ 662 #ifdef SCTP_STR_LOGGING 663 sctp_log_strm_del(control, at, 664 SCTP_STR_LOG_FROM_INSERT_TL); 665 #endif 666 TAILQ_INSERT_AFTER(&strm->inqueue, 667 at, control, next); 668 break; 669 } 670 } 671 } 672 } 673 } 674 } 675 676 /* 677 * Returns two things: You get the total size of the deliverable parts of the 678 * first fragmented message on the reassembly queue. And you get a 1 back if 679 * all of the message is ready or a 0 back if the message is still incomplete 680 */ 681 static int 682 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 683 { 684 struct sctp_tmit_chunk *chk; 685 uint32_t tsn; 686 687 *t_size = 0; 688 chk = TAILQ_FIRST(&asoc->reasmqueue); 689 if (chk == NULL) { 690 /* nothing on the queue */ 691 return (0); 692 } 693 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 694 /* Not a first on the queue */ 695 return (0); 696 } 697 tsn = chk->rec.data.TSN_seq; 698 while (chk) { 699 if (tsn != chk->rec.data.TSN_seq) { 700 return (0); 701 } 702 *t_size += chk->send_size; 703 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 704 return (1); 705 } 706 tsn++; 707 chk = TAILQ_NEXT(chk, sctp_next); 708 } 709 return (0); 710 } 711 712 static void 713 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 714 { 715 struct sctp_tmit_chunk *chk; 716 uint16_t nxt_todel; 717 uint32_t tsize; 718 719 doit_again: 720 chk = TAILQ_FIRST(&asoc->reasmqueue); 721 if (chk == NULL) { 722 /* Huh? */ 723 asoc->size_on_reasm_queue = 0; 724 asoc->cnt_on_reasm_queue = 0; 725 return; 726 } 727 if (asoc->fragmented_delivery_inprogress == 0) { 728 nxt_todel = 729 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 730 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 731 (nxt_todel == chk->rec.data.stream_seq || 732 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 733 /* 734 * Yep the first one is here and its ok to deliver 735 * but should we? 736 */ 737 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 738 (tsize > stcb->sctp_ep->partial_delivery_point))) { 739 740 /* 741 * Yes, we setup to start reception, by 742 * backing down the TSN just in case we 743 * can't deliver. If we 744 */ 745 asoc->fragmented_delivery_inprogress = 1; 746 asoc->tsn_last_delivered = 747 chk->rec.data.TSN_seq - 1; 748 asoc->str_of_pdapi = 749 chk->rec.data.stream_number; 750 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 751 asoc->pdapi_ppid = chk->rec.data.payloadtype; 752 asoc->fragment_flags = chk->rec.data.rcv_flags; 753 sctp_service_reassembly(stcb, asoc); 754 } 755 } 756 } else { 757 /* 758 * Service re-assembly will deliver stream data queued at 759 * the end of fragmented delivery.. but it wont know to go 760 * back and call itself again... we do that here with the 761 * got doit_again 762 */ 763 sctp_service_reassembly(stcb, asoc); 764 if (asoc->fragmented_delivery_inprogress == 0) { 765 /* 766 * finished our Fragmented delivery, could be more 767 * waiting? 768 */ 769 goto doit_again; 770 } 771 } 772 } 773 774 /* 775 * Dump onto the re-assembly queue, in its proper place. After dumping on the 776 * queue, see if anthing can be delivered. If so pull it off (or as much as 777 * we can. If we run out of space then we must dump what we can and set the 778 * appropriate flag to say we queued what we could. 779 */ 780 static void 781 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 782 struct sctp_tmit_chunk *chk, int *abort_flag) 783 { 784 struct mbuf *oper; 785 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 786 u_char last_flags; 787 struct sctp_tmit_chunk *at, *prev, *next; 788 789 prev = next = NULL; 790 cum_ackp1 = asoc->tsn_last_delivered + 1; 791 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 792 /* This is the first one on the queue */ 793 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 794 /* 795 * we do not check for delivery of anything when only one 796 * fragment is here 797 */ 798 asoc->size_on_reasm_queue = chk->send_size; 799 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 800 if (chk->rec.data.TSN_seq == cum_ackp1) { 801 if (asoc->fragmented_delivery_inprogress == 0 && 802 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 803 SCTP_DATA_FIRST_FRAG) { 804 /* 805 * An empty queue, no delivery inprogress, 806 * we hit the next one and it does NOT have 807 * a FIRST fragment mark. 808 */ 809 #ifdef SCTP_DEBUG 810 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 811 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 812 } 813 #endif 814 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 815 0, M_DONTWAIT, 1, MT_DATA); 816 817 if (oper) { 818 struct sctp_paramhdr *ph; 819 uint32_t *ippp; 820 821 SCTP_BUF_LEN(oper) = 822 sizeof(struct sctp_paramhdr) + 823 (sizeof(uint32_t) * 3); 824 ph = mtod(oper, struct sctp_paramhdr *); 825 ph->param_type = 826 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 827 ph->param_length = htons(SCTP_BUF_LEN(oper)); 828 ippp = (uint32_t *) (ph + 1); 829 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 830 ippp++; 831 *ippp = chk->rec.data.TSN_seq; 832 ippp++; 833 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 834 835 } 836 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 837 sctp_abort_an_association(stcb->sctp_ep, stcb, 838 SCTP_PEER_FAULTY, oper); 839 *abort_flag = 1; 840 } else if (asoc->fragmented_delivery_inprogress && 841 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 842 /* 843 * We are doing a partial delivery and the 844 * NEXT chunk MUST be either the LAST or 845 * MIDDLE fragment NOT a FIRST 846 */ 847 #ifdef SCTP_DEBUG 848 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 849 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 850 } 851 #endif 852 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 853 0, M_DONTWAIT, 1, MT_DATA); 854 if (oper) { 855 struct sctp_paramhdr *ph; 856 uint32_t *ippp; 857 858 SCTP_BUF_LEN(oper) = 859 sizeof(struct sctp_paramhdr) + 860 (3 * sizeof(uint32_t)); 861 ph = mtod(oper, struct sctp_paramhdr *); 862 ph->param_type = 863 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 864 ph->param_length = htons(SCTP_BUF_LEN(oper)); 865 ippp = (uint32_t *) (ph + 1); 866 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 867 ippp++; 868 *ippp = chk->rec.data.TSN_seq; 869 ippp++; 870 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 871 } 872 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 873 sctp_abort_an_association(stcb->sctp_ep, stcb, 874 SCTP_PEER_FAULTY, oper); 875 *abort_flag = 1; 876 } else if (asoc->fragmented_delivery_inprogress) { 877 /* 878 * Here we are ok with a MIDDLE or LAST 879 * piece 880 */ 881 if (chk->rec.data.stream_number != 882 asoc->str_of_pdapi) { 883 /* Got to be the right STR No */ 884 #ifdef SCTP_DEBUG 885 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 886 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n", 887 chk->rec.data.stream_number, 888 asoc->str_of_pdapi); 889 } 890 #endif 891 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 892 0, M_DONTWAIT, 1, MT_DATA); 893 if (oper) { 894 struct sctp_paramhdr *ph; 895 uint32_t *ippp; 896 897 SCTP_BUF_LEN(oper) = 898 sizeof(struct sctp_paramhdr) + 899 (sizeof(uint32_t) * 3); 900 ph = mtod(oper, 901 struct sctp_paramhdr *); 902 ph->param_type = 903 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 904 ph->param_length = 905 htons(SCTP_BUF_LEN(oper)); 906 ippp = (uint32_t *) (ph + 1); 907 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 908 ippp++; 909 *ippp = chk->rec.data.TSN_seq; 910 ippp++; 911 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 912 } 913 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 914 sctp_abort_an_association(stcb->sctp_ep, 915 stcb, SCTP_PEER_FAULTY, oper); 916 *abort_flag = 1; 917 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 918 SCTP_DATA_UNORDERED && 919 chk->rec.data.stream_seq != 920 asoc->ssn_of_pdapi) { 921 /* Got to be the right STR Seq */ 922 #ifdef SCTP_DEBUG 923 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 924 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n", 925 chk->rec.data.stream_seq, 926 asoc->ssn_of_pdapi); 927 } 928 #endif 929 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 930 0, M_DONTWAIT, 1, MT_DATA); 931 if (oper) { 932 struct sctp_paramhdr *ph; 933 uint32_t *ippp; 934 935 SCTP_BUF_LEN(oper) = 936 sizeof(struct sctp_paramhdr) + 937 (3 * sizeof(uint32_t)); 938 ph = mtod(oper, 939 struct sctp_paramhdr *); 940 ph->param_type = 941 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 942 ph->param_length = 943 htons(SCTP_BUF_LEN(oper)); 944 ippp = (uint32_t *) (ph + 1); 945 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 946 ippp++; 947 *ippp = chk->rec.data.TSN_seq; 948 ippp++; 949 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 950 951 } 952 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 953 sctp_abort_an_association(stcb->sctp_ep, 954 stcb, SCTP_PEER_FAULTY, oper); 955 *abort_flag = 1; 956 } 957 } 958 } 959 return; 960 } 961 /* Find its place */ 962 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 963 if (compare_with_wrap(at->rec.data.TSN_seq, 964 chk->rec.data.TSN_seq, MAX_TSN)) { 965 /* 966 * one in queue is bigger than the new one, insert 967 * before this one 968 */ 969 /* A check */ 970 asoc->size_on_reasm_queue += chk->send_size; 971 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 972 next = at; 973 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 974 break; 975 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 976 /* Gak, He sent me a duplicate str seq number */ 977 /* 978 * foo bar, I guess I will just free this new guy, 979 * should we abort too? FIX ME MAYBE? Or it COULD be 980 * that the SSN's have wrapped. Maybe I should 981 * compare to TSN somehow... sigh for now just blow 982 * away the chunk! 983 */ 984 if (chk->data) { 985 sctp_m_freem(chk->data); 986 chk->data = NULL; 987 } 988 sctp_free_remote_addr(chk->whoTo); 989 sctp_free_a_chunk(stcb, chk); 990 return; 991 } else { 992 last_flags = at->rec.data.rcv_flags; 993 last_tsn = at->rec.data.TSN_seq; 994 prev = at; 995 if (TAILQ_NEXT(at, sctp_next) == NULL) { 996 /* 997 * We are at the end, insert it after this 998 * one 999 */ 1000 /* check it first */ 1001 asoc->size_on_reasm_queue += chk->send_size; 1002 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1003 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1004 break; 1005 } 1006 } 1007 } 1008 /* Now the audits */ 1009 if (prev) { 1010 prev_tsn = chk->rec.data.TSN_seq - 1; 1011 if (prev_tsn == prev->rec.data.TSN_seq) { 1012 /* 1013 * Ok the one I am dropping onto the end is the 1014 * NEXT. A bit of valdiation here. 1015 */ 1016 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1017 SCTP_DATA_FIRST_FRAG || 1018 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1019 SCTP_DATA_MIDDLE_FRAG) { 1020 /* 1021 * Insert chk MUST be a MIDDLE or LAST 1022 * fragment 1023 */ 1024 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1025 SCTP_DATA_FIRST_FRAG) { 1026 #ifdef SCTP_DEBUG 1027 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1028 printf("Prev check - It can be a midlle or last but not a first\n"); 1029 printf("Gak, Evil plot, it's a FIRST!\n"); 1030 } 1031 #endif 1032 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1033 0, M_DONTWAIT, 1, MT_DATA); 1034 if (oper) { 1035 struct sctp_paramhdr *ph; 1036 uint32_t *ippp; 1037 1038 SCTP_BUF_LEN(oper) = 1039 sizeof(struct sctp_paramhdr) + 1040 (3 * sizeof(uint32_t)); 1041 ph = mtod(oper, 1042 struct sctp_paramhdr *); 1043 ph->param_type = 1044 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1045 ph->param_length = 1046 htons(SCTP_BUF_LEN(oper)); 1047 ippp = (uint32_t *) (ph + 1); 1048 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1049 ippp++; 1050 *ippp = chk->rec.data.TSN_seq; 1051 ippp++; 1052 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1053 1054 } 1055 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1056 sctp_abort_an_association(stcb->sctp_ep, 1057 stcb, SCTP_PEER_FAULTY, oper); 1058 *abort_flag = 1; 1059 return; 1060 } 1061 if (chk->rec.data.stream_number != 1062 prev->rec.data.stream_number) { 1063 /* 1064 * Huh, need the correct STR here, 1065 * they must be the same. 1066 */ 1067 #ifdef SCTP_DEBUG 1068 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1069 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1070 chk->rec.data.stream_number, 1071 prev->rec.data.stream_number); 1072 } 1073 #endif 1074 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1075 0, M_DONTWAIT, 1, MT_DATA); 1076 if (oper) { 1077 struct sctp_paramhdr *ph; 1078 uint32_t *ippp; 1079 1080 SCTP_BUF_LEN(oper) = 1081 sizeof(struct sctp_paramhdr) + 1082 (3 * sizeof(uint32_t)); 1083 ph = mtod(oper, 1084 struct sctp_paramhdr *); 1085 ph->param_type = 1086 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1087 ph->param_length = 1088 htons(SCTP_BUF_LEN(oper)); 1089 ippp = (uint32_t *) (ph + 1); 1090 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1091 ippp++; 1092 *ippp = chk->rec.data.TSN_seq; 1093 ippp++; 1094 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1095 } 1096 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1097 sctp_abort_an_association(stcb->sctp_ep, 1098 stcb, SCTP_PEER_FAULTY, oper); 1099 1100 *abort_flag = 1; 1101 return; 1102 } 1103 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1104 chk->rec.data.stream_seq != 1105 prev->rec.data.stream_seq) { 1106 /* 1107 * Huh, need the correct STR here, 1108 * they must be the same. 1109 */ 1110 #ifdef SCTP_DEBUG 1111 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1112 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1113 chk->rec.data.stream_seq, 1114 prev->rec.data.stream_seq); 1115 } 1116 #endif 1117 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1118 0, M_DONTWAIT, 1, MT_DATA); 1119 if (oper) { 1120 struct sctp_paramhdr *ph; 1121 uint32_t *ippp; 1122 1123 SCTP_BUF_LEN(oper) = 1124 sizeof(struct sctp_paramhdr) + 1125 (3 * sizeof(uint32_t)); 1126 ph = mtod(oper, 1127 struct sctp_paramhdr *); 1128 ph->param_type = 1129 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1130 ph->param_length = 1131 htons(SCTP_BUF_LEN(oper)); 1132 ippp = (uint32_t *) (ph + 1); 1133 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1134 ippp++; 1135 *ippp = chk->rec.data.TSN_seq; 1136 ippp++; 1137 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1138 } 1139 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1140 sctp_abort_an_association(stcb->sctp_ep, 1141 stcb, SCTP_PEER_FAULTY, oper); 1142 1143 *abort_flag = 1; 1144 return; 1145 } 1146 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1147 SCTP_DATA_LAST_FRAG) { 1148 /* Insert chk MUST be a FIRST */ 1149 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1150 SCTP_DATA_FIRST_FRAG) { 1151 #ifdef SCTP_DEBUG 1152 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1153 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1154 } 1155 #endif 1156 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1157 0, M_DONTWAIT, 1, MT_DATA); 1158 if (oper) { 1159 struct sctp_paramhdr *ph; 1160 uint32_t *ippp; 1161 1162 SCTP_BUF_LEN(oper) = 1163 sizeof(struct sctp_paramhdr) + 1164 (3 * sizeof(uint32_t)); 1165 ph = mtod(oper, 1166 struct sctp_paramhdr *); 1167 ph->param_type = 1168 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1169 ph->param_length = 1170 htons(SCTP_BUF_LEN(oper)); 1171 ippp = (uint32_t *) (ph + 1); 1172 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1173 ippp++; 1174 *ippp = chk->rec.data.TSN_seq; 1175 ippp++; 1176 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1177 1178 } 1179 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1180 sctp_abort_an_association(stcb->sctp_ep, 1181 stcb, SCTP_PEER_FAULTY, oper); 1182 1183 *abort_flag = 1; 1184 return; 1185 } 1186 } 1187 } 1188 } 1189 if (next) { 1190 post_tsn = chk->rec.data.TSN_seq + 1; 1191 if (post_tsn == next->rec.data.TSN_seq) { 1192 /* 1193 * Ok the one I am inserting ahead of is my NEXT 1194 * one. A bit of valdiation here. 1195 */ 1196 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1197 /* Insert chk MUST be a last fragment */ 1198 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1199 != SCTP_DATA_LAST_FRAG) { 1200 #ifdef SCTP_DEBUG 1201 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1202 printf("Next chk - Next is FIRST, we must be LAST\n"); 1203 printf("Gak, Evil plot, its not a last!\n"); 1204 } 1205 #endif 1206 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1207 0, M_DONTWAIT, 1, MT_DATA); 1208 if (oper) { 1209 struct sctp_paramhdr *ph; 1210 uint32_t *ippp; 1211 1212 SCTP_BUF_LEN(oper) = 1213 sizeof(struct sctp_paramhdr) + 1214 (3 * sizeof(uint32_t)); 1215 ph = mtod(oper, 1216 struct sctp_paramhdr *); 1217 ph->param_type = 1218 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1219 ph->param_length = 1220 htons(SCTP_BUF_LEN(oper)); 1221 ippp = (uint32_t *) (ph + 1); 1222 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1223 ippp++; 1224 *ippp = chk->rec.data.TSN_seq; 1225 ippp++; 1226 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1227 } 1228 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1229 sctp_abort_an_association(stcb->sctp_ep, 1230 stcb, SCTP_PEER_FAULTY, oper); 1231 1232 *abort_flag = 1; 1233 return; 1234 } 1235 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1236 SCTP_DATA_MIDDLE_FRAG || 1237 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1238 SCTP_DATA_LAST_FRAG) { 1239 /* 1240 * Insert chk CAN be MIDDLE or FIRST NOT 1241 * LAST 1242 */ 1243 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1244 SCTP_DATA_LAST_FRAG) { 1245 #ifdef SCTP_DEBUG 1246 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1247 printf("Next chk - Next is a MIDDLE/LAST\n"); 1248 printf("Gak, Evil plot, new prev chunk is a LAST\n"); 1249 } 1250 #endif 1251 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1252 0, M_DONTWAIT, 1, MT_DATA); 1253 if (oper) { 1254 struct sctp_paramhdr *ph; 1255 uint32_t *ippp; 1256 1257 SCTP_BUF_LEN(oper) = 1258 sizeof(struct sctp_paramhdr) + 1259 (3 * sizeof(uint32_t)); 1260 ph = mtod(oper, 1261 struct sctp_paramhdr *); 1262 ph->param_type = 1263 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1264 ph->param_length = 1265 htons(SCTP_BUF_LEN(oper)); 1266 ippp = (uint32_t *) (ph + 1); 1267 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1268 ippp++; 1269 *ippp = chk->rec.data.TSN_seq; 1270 ippp++; 1271 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1272 1273 } 1274 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1275 sctp_abort_an_association(stcb->sctp_ep, 1276 stcb, SCTP_PEER_FAULTY, oper); 1277 1278 *abort_flag = 1; 1279 return; 1280 } 1281 if (chk->rec.data.stream_number != 1282 next->rec.data.stream_number) { 1283 /* 1284 * Huh, need the correct STR here, 1285 * they must be the same. 1286 */ 1287 #ifdef SCTP_DEBUG 1288 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1289 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1290 chk->rec.data.stream_number, 1291 next->rec.data.stream_number); 1292 } 1293 #endif 1294 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1295 0, M_DONTWAIT, 1, MT_DATA); 1296 if (oper) { 1297 struct sctp_paramhdr *ph; 1298 uint32_t *ippp; 1299 1300 SCTP_BUF_LEN(oper) = 1301 sizeof(struct sctp_paramhdr) + 1302 (3 * sizeof(uint32_t)); 1303 ph = mtod(oper, 1304 struct sctp_paramhdr *); 1305 ph->param_type = 1306 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1307 ph->param_length = 1308 htons(SCTP_BUF_LEN(oper)); 1309 ippp = (uint32_t *) (ph + 1); 1310 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1311 ippp++; 1312 *ippp = chk->rec.data.TSN_seq; 1313 ippp++; 1314 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1315 1316 } 1317 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1318 sctp_abort_an_association(stcb->sctp_ep, 1319 stcb, SCTP_PEER_FAULTY, oper); 1320 1321 *abort_flag = 1; 1322 return; 1323 } 1324 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1325 chk->rec.data.stream_seq != 1326 next->rec.data.stream_seq) { 1327 /* 1328 * Huh, need the correct STR here, 1329 * they must be the same. 1330 */ 1331 #ifdef SCTP_DEBUG 1332 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1333 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1334 chk->rec.data.stream_seq, 1335 next->rec.data.stream_seq); 1336 } 1337 #endif 1338 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1339 0, M_DONTWAIT, 1, MT_DATA); 1340 if (oper) { 1341 struct sctp_paramhdr *ph; 1342 uint32_t *ippp; 1343 1344 SCTP_BUF_LEN(oper) = 1345 sizeof(struct sctp_paramhdr) + 1346 (3 * sizeof(uint32_t)); 1347 ph = mtod(oper, 1348 struct sctp_paramhdr *); 1349 ph->param_type = 1350 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1351 ph->param_length = 1352 htons(SCTP_BUF_LEN(oper)); 1353 ippp = (uint32_t *) (ph + 1); 1354 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1355 ippp++; 1356 *ippp = chk->rec.data.TSN_seq; 1357 ippp++; 1358 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1359 } 1360 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1361 sctp_abort_an_association(stcb->sctp_ep, 1362 stcb, SCTP_PEER_FAULTY, oper); 1363 1364 *abort_flag = 1; 1365 return; 1366 1367 } 1368 } 1369 } 1370 } 1371 /* Do we need to do some delivery? check */ 1372 sctp_deliver_reasm_check(stcb, asoc); 1373 } 1374 1375 /* 1376 * This is an unfortunate routine. It checks to make sure a evil guy is not 1377 * stuffing us full of bad packet fragments. A broken peer could also do this 1378 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1379 * :< more cycles. 1380 */ 1381 static int 1382 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1383 uint32_t TSN_seq) 1384 { 1385 struct sctp_tmit_chunk *at; 1386 uint32_t tsn_est; 1387 1388 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1389 if (compare_with_wrap(TSN_seq, 1390 at->rec.data.TSN_seq, MAX_TSN)) { 1391 /* is it one bigger? */ 1392 tsn_est = at->rec.data.TSN_seq + 1; 1393 if (tsn_est == TSN_seq) { 1394 /* yep. It better be a last then */ 1395 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1396 SCTP_DATA_LAST_FRAG) { 1397 /* 1398 * Ok this guy belongs next to a guy 1399 * that is NOT last, it should be a 1400 * middle/last, not a complete 1401 * chunk. 1402 */ 1403 return (1); 1404 } else { 1405 /* 1406 * This guy is ok since its a LAST 1407 * and the new chunk is a fully 1408 * self- contained one. 1409 */ 1410 return (0); 1411 } 1412 } 1413 } else if (TSN_seq == at->rec.data.TSN_seq) { 1414 /* Software error since I have a dup? */ 1415 return (1); 1416 } else { 1417 /* 1418 * Ok, 'at' is larger than new chunk but does it 1419 * need to be right before it. 1420 */ 1421 tsn_est = TSN_seq + 1; 1422 if (tsn_est == at->rec.data.TSN_seq) { 1423 /* Yep, It better be a first */ 1424 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1425 SCTP_DATA_FIRST_FRAG) { 1426 return (1); 1427 } else { 1428 return (0); 1429 } 1430 } 1431 } 1432 } 1433 return (0); 1434 } 1435 1436 1437 static int 1438 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1439 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1440 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1441 int *break_flag, int last_chunk) 1442 { 1443 /* Process a data chunk */ 1444 /* struct sctp_tmit_chunk *chk; */ 1445 struct sctp_tmit_chunk *chk; 1446 uint32_t tsn, gap; 1447 struct mbuf *dmbuf; 1448 int indx, the_len; 1449 int need_reasm_check = 0; 1450 uint16_t strmno, strmseq; 1451 struct mbuf *oper; 1452 struct sctp_queued_to_read *control; 1453 int ordered; 1454 uint32_t protocol_id; 1455 uint8_t chunk_flags; 1456 1457 chk = NULL; 1458 tsn = ntohl(ch->dp.tsn); 1459 chunk_flags = ch->ch.chunk_flags; 1460 protocol_id = ch->dp.protocol_id; 1461 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0); 1462 #ifdef SCTP_MAP_LOGGING 1463 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE); 1464 #endif 1465 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1466 asoc->cumulative_tsn == tsn) { 1467 /* It is a duplicate */ 1468 SCTP_STAT_INCR(sctps_recvdupdata); 1469 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1470 /* Record a dup for the next outbound sack */ 1471 asoc->dup_tsns[asoc->numduptsns] = tsn; 1472 asoc->numduptsns++; 1473 } 1474 return (0); 1475 } 1476 /* Calculate the number of TSN's between the base and this TSN */ 1477 if (tsn >= asoc->mapping_array_base_tsn) { 1478 gap = tsn - asoc->mapping_array_base_tsn; 1479 } else { 1480 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1481 } 1482 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1483 /* Can't hold the bit in the mapping at max array, toss it */ 1484 return (0); 1485 } 1486 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1487 if (sctp_expand_mapping_array(asoc)) { 1488 /* Can't expand, drop it */ 1489 return (0); 1490 } 1491 } 1492 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1493 *high_tsn = tsn; 1494 } 1495 /* See if we have received this one already */ 1496 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1497 SCTP_STAT_INCR(sctps_recvdupdata); 1498 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1499 /* Record a dup for the next outbound sack */ 1500 asoc->dup_tsns[asoc->numduptsns] = tsn; 1501 asoc->numduptsns++; 1502 } 1503 asoc->send_sack = 1; 1504 return (0); 1505 } 1506 /* 1507 * Check to see about the GONE flag, duplicates would cause a sack 1508 * to be sent up above 1509 */ 1510 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1511 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1512 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1513 ) { 1514 /* 1515 * wait a minute, this guy is gone, there is no longer a 1516 * receiver. Send peer an ABORT! 1517 */ 1518 struct mbuf *op_err; 1519 1520 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1521 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 1522 *abort_flag = 1; 1523 return (0); 1524 } 1525 /* 1526 * Now before going further we see if there is room. If NOT then we 1527 * MAY let one through only IF this TSN is the one we are waiting 1528 * for on a partial delivery API. 1529 */ 1530 1531 /* now do the tests */ 1532 if (((asoc->cnt_on_all_streams + 1533 asoc->cnt_on_reasm_queue + 1534 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1535 (((int)asoc->my_rwnd) <= 0)) { 1536 /* 1537 * When we have NO room in the rwnd we check to make sure 1538 * the reader is doing its job... 1539 */ 1540 if (stcb->sctp_socket->so_rcv.sb_cc) { 1541 /* some to read, wake-up */ 1542 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1543 } 1544 /* now is it in the mapping array of what we have accepted? */ 1545 if (compare_with_wrap(tsn, 1546 asoc->highest_tsn_inside_map, MAX_TSN)) { 1547 1548 /* Nope not in the valid range dump it */ 1549 #ifdef SCTP_DEBUG 1550 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1551 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n", 1552 (u_long)tsn, (u_long)asoc->my_rwnd, 1553 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)); 1554 1555 } 1556 #endif 1557 sctp_set_rwnd(stcb, asoc); 1558 if ((asoc->cnt_on_all_streams + 1559 asoc->cnt_on_reasm_queue + 1560 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1561 SCTP_STAT_INCR(sctps_datadropchklmt); 1562 } else { 1563 SCTP_STAT_INCR(sctps_datadroprwnd); 1564 } 1565 indx = *break_flag; 1566 *break_flag = 1; 1567 return (0); 1568 } 1569 } 1570 strmno = ntohs(ch->dp.stream_id); 1571 if (strmno >= asoc->streamincnt) { 1572 struct sctp_paramhdr *phdr; 1573 struct mbuf *mb; 1574 1575 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1576 0, M_DONTWAIT, 1, MT_DATA); 1577 if (mb != NULL) { 1578 /* add some space up front so prepend will work well */ 1579 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1580 phdr = mtod(mb, struct sctp_paramhdr *); 1581 /* 1582 * Error causes are just param's and this one has 1583 * two back to back phdr, one with the error type 1584 * and size, the other with the streamid and a rsvd 1585 */ 1586 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1587 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1588 phdr->param_length = 1589 htons(sizeof(struct sctp_paramhdr) * 2); 1590 phdr++; 1591 /* We insert the stream in the type field */ 1592 phdr->param_type = ch->dp.stream_id; 1593 /* And set the length to 0 for the rsvd field */ 1594 phdr->param_length = 0; 1595 sctp_queue_op_err(stcb, mb); 1596 } 1597 SCTP_STAT_INCR(sctps_badsid); 1598 return (0); 1599 } 1600 /* 1601 * Before we continue lets validate that we are not being fooled by 1602 * an evil attacker. We can only have 4k chunks based on our TSN 1603 * spread allowed by the mapping array 512 * 8 bits, so there is no 1604 * way our stream sequence numbers could have wrapped. We of course 1605 * only validate the FIRST fragment so the bit must be set. 1606 */ 1607 strmseq = ntohs(ch->dp.stream_sequence); 1608 1609 #ifdef SCTP_ASOCLOG_OF_TSNS 1610 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1611 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1612 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1613 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1614 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1615 asoc->tsn_in_at++; 1616 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1617 asoc->tsn_in_at = 0; 1618 asoc->tsn_in_wrapped = 1; 1619 } 1620 #endif 1621 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1622 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1623 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1624 strmseq, MAX_SEQ) || 1625 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1626 /* The incoming sseq is behind where we last delivered? */ 1627 #ifdef SCTP_DEBUG 1628 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1629 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1630 strmseq, 1631 asoc->strmin[strmno].last_sequence_delivered); 1632 } 1633 #endif 1634 /* 1635 * throw it in the stream so it gets cleaned up in 1636 * association destruction 1637 */ 1638 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1639 0, M_DONTWAIT, 1, MT_DATA); 1640 if (oper) { 1641 struct sctp_paramhdr *ph; 1642 uint32_t *ippp; 1643 1644 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1645 (3 * sizeof(uint32_t)); 1646 ph = mtod(oper, struct sctp_paramhdr *); 1647 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1648 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1649 ippp = (uint32_t *) (ph + 1); 1650 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1651 ippp++; 1652 *ippp = tsn; 1653 ippp++; 1654 *ippp = ((strmno << 16) | strmseq); 1655 1656 } 1657 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1658 sctp_abort_an_association(stcb->sctp_ep, stcb, 1659 SCTP_PEER_FAULTY, oper); 1660 *abort_flag = 1; 1661 return (0); 1662 } 1663 /************************************ 1664 * From here down we may find ch-> invalid 1665 * so its a good idea NOT to use it. 1666 *************************************/ 1667 1668 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1669 if (last_chunk == 0) { 1670 dmbuf = SCTP_M_COPYM(*m, 1671 (offset + sizeof(struct sctp_data_chunk)), 1672 the_len, M_DONTWAIT); 1673 #ifdef SCTP_MBUF_LOGGING 1674 { 1675 struct mbuf *mat; 1676 1677 mat = dmbuf; 1678 while (mat) { 1679 if (SCTP_BUF_IS_EXTENDED(mat)) { 1680 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1681 } 1682 mat = SCTP_BUF_NEXT(mat); 1683 } 1684 } 1685 #endif 1686 } else { 1687 /* We can steal the last chunk */ 1688 int l_len; 1689 1690 dmbuf = *m; 1691 /* lop off the top part */ 1692 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1693 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1694 l_len = SCTP_BUF_LEN(dmbuf); 1695 } else { 1696 /* 1697 * need to count up the size hopefully does not hit 1698 * this to often :-0 1699 */ 1700 struct mbuf *lat; 1701 1702 l_len = 0; 1703 lat = dmbuf; 1704 while (lat) { 1705 l_len += SCTP_BUF_LEN(lat); 1706 lat = SCTP_BUF_NEXT(lat); 1707 } 1708 } 1709 if (l_len > the_len) { 1710 /* Trim the end round bytes off too */ 1711 m_adj(dmbuf, -(l_len - the_len)); 1712 } 1713 } 1714 if (dmbuf == NULL) { 1715 SCTP_STAT_INCR(sctps_nomem); 1716 return (0); 1717 } 1718 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1719 asoc->fragmented_delivery_inprogress == 0 && 1720 TAILQ_EMPTY(&asoc->resetHead) && 1721 ((ordered == 0) || 1722 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1723 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1724 /* Candidate for express delivery */ 1725 /* 1726 * Its not fragmented, No PD-API is up, Nothing in the 1727 * delivery queue, Its un-ordered OR ordered and the next to 1728 * deliver AND nothing else is stuck on the stream queue, 1729 * And there is room for it in the socket buffer. Lets just 1730 * stuff it up the buffer.... 1731 */ 1732 1733 /* It would be nice to avoid this copy if we could :< */ 1734 sctp_alloc_a_readq(stcb, control); 1735 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1736 protocol_id, 1737 stcb->asoc.context, 1738 strmno, strmseq, 1739 chunk_flags, 1740 dmbuf); 1741 if (control == NULL) { 1742 goto failed_express_del; 1743 } 1744 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1); 1745 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1746 /* for ordered, bump what we delivered */ 1747 asoc->strmin[strmno].last_sequence_delivered++; 1748 } 1749 SCTP_STAT_INCR(sctps_recvexpress); 1750 #ifdef SCTP_STR_LOGGING 1751 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1752 SCTP_STR_LOG_FROM_EXPRS_DEL); 1753 #endif 1754 control = NULL; 1755 goto finish_express_del; 1756 } 1757 failed_express_del: 1758 /* If we reach here this is a new chunk */ 1759 chk = NULL; 1760 control = NULL; 1761 /* Express for fragmented delivery? */ 1762 if ((asoc->fragmented_delivery_inprogress) && 1763 (stcb->asoc.control_pdapi) && 1764 (asoc->str_of_pdapi == strmno) && 1765 (asoc->ssn_of_pdapi == strmseq) 1766 ) { 1767 control = stcb->asoc.control_pdapi; 1768 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1769 /* Can't be another first? */ 1770 goto failed_pdapi_express_del; 1771 } 1772 if (tsn == (control->sinfo_tsn + 1)) { 1773 /* Yep, we can add it on */ 1774 int end = 0; 1775 uint32_t cumack; 1776 1777 if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1778 end = 1; 1779 } 1780 cumack = asoc->cumulative_tsn; 1781 if ((cumack + 1) == tsn) 1782 cumack = tsn; 1783 1784 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1785 tsn, 1786 &stcb->sctp_socket->so_rcv)) { 1787 printf("Append fails end:%d\n", end); 1788 goto failed_pdapi_express_del; 1789 } 1790 SCTP_STAT_INCR(sctps_recvexpressm); 1791 control->sinfo_tsn = tsn; 1792 asoc->tsn_last_delivered = tsn; 1793 asoc->fragment_flags = chunk_flags; 1794 asoc->tsn_of_pdapi_last_delivered = tsn; 1795 asoc->last_flags_delivered = chunk_flags; 1796 asoc->last_strm_seq_delivered = strmseq; 1797 asoc->last_strm_no_delivered = strmno; 1798 if (end) { 1799 /* clean up the flags and such */ 1800 asoc->fragmented_delivery_inprogress = 0; 1801 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1802 asoc->strmin[strmno].last_sequence_delivered++; 1803 } 1804 stcb->asoc.control_pdapi = NULL; 1805 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1806 /* 1807 * There could be another message 1808 * ready 1809 */ 1810 need_reasm_check = 1; 1811 } 1812 } 1813 control = NULL; 1814 goto finish_express_del; 1815 } 1816 } 1817 failed_pdapi_express_del: 1818 control = NULL; 1819 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1820 sctp_alloc_a_chunk(stcb, chk); 1821 if (chk == NULL) { 1822 /* No memory so we drop the chunk */ 1823 SCTP_STAT_INCR(sctps_nomem); 1824 if (last_chunk == 0) { 1825 /* we copied it, free the copy */ 1826 sctp_m_freem(dmbuf); 1827 } 1828 return (0); 1829 } 1830 chk->rec.data.TSN_seq = tsn; 1831 chk->no_fr_allowed = 0; 1832 chk->rec.data.stream_seq = strmseq; 1833 chk->rec.data.stream_number = strmno; 1834 chk->rec.data.payloadtype = protocol_id; 1835 chk->rec.data.context = stcb->asoc.context; 1836 chk->rec.data.doing_fast_retransmit = 0; 1837 chk->rec.data.rcv_flags = chunk_flags; 1838 chk->asoc = asoc; 1839 chk->send_size = the_len; 1840 chk->whoTo = net; 1841 atomic_add_int(&net->ref_count, 1); 1842 chk->data = dmbuf; 1843 } else { 1844 sctp_alloc_a_readq(stcb, control); 1845 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1846 protocol_id, 1847 stcb->asoc.context, 1848 strmno, strmseq, 1849 chunk_flags, 1850 dmbuf); 1851 if (control == NULL) { 1852 /* No memory so we drop the chunk */ 1853 SCTP_STAT_INCR(sctps_nomem); 1854 if (last_chunk == 0) { 1855 /* we copied it, free the copy */ 1856 sctp_m_freem(dmbuf); 1857 } 1858 return (0); 1859 } 1860 control->length = the_len; 1861 } 1862 1863 /* Mark it as received */ 1864 /* Now queue it where it belongs */ 1865 if (control != NULL) { 1866 /* First a sanity check */ 1867 if (asoc->fragmented_delivery_inprogress) { 1868 /* 1869 * Ok, we have a fragmented delivery in progress if 1870 * this chunk is next to deliver OR belongs in our 1871 * view to the reassembly, the peer is evil or 1872 * broken. 1873 */ 1874 uint32_t estimate_tsn; 1875 1876 estimate_tsn = asoc->tsn_last_delivered + 1; 1877 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1878 (estimate_tsn == control->sinfo_tsn)) { 1879 /* Evil/Broke peer */ 1880 sctp_m_freem(control->data); 1881 control->data = NULL; 1882 sctp_free_remote_addr(control->whoFrom); 1883 sctp_free_a_readq(stcb, control); 1884 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1885 0, M_DONTWAIT, 1, MT_DATA); 1886 if (oper) { 1887 struct sctp_paramhdr *ph; 1888 uint32_t *ippp; 1889 1890 SCTP_BUF_LEN(oper) = 1891 sizeof(struct sctp_paramhdr) + 1892 (3 * sizeof(uint32_t)); 1893 ph = mtod(oper, struct sctp_paramhdr *); 1894 ph->param_type = 1895 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1896 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1897 ippp = (uint32_t *) (ph + 1); 1898 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1899 ippp++; 1900 *ippp = tsn; 1901 ippp++; 1902 *ippp = ((strmno << 16) | strmseq); 1903 } 1904 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1905 sctp_abort_an_association(stcb->sctp_ep, stcb, 1906 SCTP_PEER_FAULTY, oper); 1907 1908 *abort_flag = 1; 1909 return (0); 1910 } else { 1911 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1912 sctp_m_freem(control->data); 1913 control->data = NULL; 1914 sctp_free_remote_addr(control->whoFrom); 1915 sctp_free_a_readq(stcb, control); 1916 1917 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1918 0, M_DONTWAIT, 1, MT_DATA); 1919 if (oper) { 1920 struct sctp_paramhdr *ph; 1921 uint32_t *ippp; 1922 1923 SCTP_BUF_LEN(oper) = 1924 sizeof(struct sctp_paramhdr) + 1925 (3 * sizeof(uint32_t)); 1926 ph = mtod(oper, 1927 struct sctp_paramhdr *); 1928 ph->param_type = 1929 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1930 ph->param_length = 1931 htons(SCTP_BUF_LEN(oper)); 1932 ippp = (uint32_t *) (ph + 1); 1933 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 1934 ippp++; 1935 *ippp = tsn; 1936 ippp++; 1937 *ippp = ((strmno << 16) | strmseq); 1938 } 1939 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1940 sctp_abort_an_association(stcb->sctp_ep, 1941 stcb, SCTP_PEER_FAULTY, oper); 1942 1943 *abort_flag = 1; 1944 return (0); 1945 } 1946 } 1947 } else { 1948 /* No PDAPI running */ 1949 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1950 /* 1951 * Reassembly queue is NOT empty validate 1952 * that this tsn does not need to be in 1953 * reasembly queue. If it does then our peer 1954 * is broken or evil. 1955 */ 1956 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1957 sctp_m_freem(control->data); 1958 control->data = NULL; 1959 sctp_free_remote_addr(control->whoFrom); 1960 sctp_free_a_readq(stcb, control); 1961 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1962 0, M_DONTWAIT, 1, MT_DATA); 1963 if (oper) { 1964 struct sctp_paramhdr *ph; 1965 uint32_t *ippp; 1966 1967 SCTP_BUF_LEN(oper) = 1968 sizeof(struct sctp_paramhdr) + 1969 (3 * sizeof(uint32_t)); 1970 ph = mtod(oper, 1971 struct sctp_paramhdr *); 1972 ph->param_type = 1973 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1974 ph->param_length = 1975 htons(SCTP_BUF_LEN(oper)); 1976 ippp = (uint32_t *) (ph + 1); 1977 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 1978 ippp++; 1979 *ippp = tsn; 1980 ippp++; 1981 *ippp = ((strmno << 16) | strmseq); 1982 } 1983 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1984 sctp_abort_an_association(stcb->sctp_ep, 1985 stcb, SCTP_PEER_FAULTY, oper); 1986 1987 *abort_flag = 1; 1988 return (0); 1989 } 1990 } 1991 } 1992 /* ok, if we reach here we have passed the sanity checks */ 1993 if (chunk_flags & SCTP_DATA_UNORDERED) { 1994 /* queue directly into socket buffer */ 1995 sctp_add_to_readq(stcb->sctp_ep, stcb, 1996 control, 1997 &stcb->sctp_socket->so_rcv, 1); 1998 } else { 1999 /* 2000 * Special check for when streams are resetting. We 2001 * could be more smart about this and check the 2002 * actual stream to see if it is not being reset.. 2003 * that way we would not create a HOLB when amongst 2004 * streams being reset and those not being reset. 2005 * 2006 * We take complete messages that have a stream reset 2007 * intervening (aka the TSN is after where our 2008 * cum-ack needs to be) off and put them on a 2009 * pending_reply_queue. The reassembly ones we do 2010 * not have to worry about since they are all sorted 2011 * and proceessed by TSN order. It is only the 2012 * singletons I must worry about. 2013 */ 2014 struct sctp_stream_reset_list *liste; 2015 2016 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2017 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) || 2018 (tsn == ntohl(liste->tsn))) 2019 ) { 2020 /* 2021 * yep its past where we need to reset... go 2022 * ahead and queue it. 2023 */ 2024 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2025 /* first one on */ 2026 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2027 } else { 2028 struct sctp_queued_to_read *ctlOn; 2029 unsigned char inserted = 0; 2030 2031 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2032 while (ctlOn) { 2033 if (compare_with_wrap(control->sinfo_tsn, 2034 ctlOn->sinfo_tsn, MAX_TSN)) { 2035 ctlOn = TAILQ_NEXT(ctlOn, next); 2036 } else { 2037 /* found it */ 2038 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2039 inserted = 1; 2040 break; 2041 } 2042 } 2043 if (inserted == 0) { 2044 /* 2045 * must be put at end, use 2046 * prevP (all setup from 2047 * loop) to setup nextP. 2048 */ 2049 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2050 } 2051 } 2052 } else { 2053 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2054 if (*abort_flag) { 2055 return (0); 2056 } 2057 } 2058 } 2059 } else { 2060 /* Into the re-assembly queue */ 2061 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2062 if (*abort_flag) { 2063 /* 2064 * the assoc is now gone and chk was put onto the 2065 * reasm queue, which has all been freed. 2066 */ 2067 *m = NULL; 2068 return (0); 2069 } 2070 } 2071 finish_express_del: 2072 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2073 /* we have a new high score */ 2074 asoc->highest_tsn_inside_map = tsn; 2075 #ifdef SCTP_MAP_LOGGING 2076 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2077 #endif 2078 } 2079 if (tsn == (asoc->cumulative_tsn + 1)) { 2080 /* Update cum-ack */ 2081 asoc->cumulative_tsn = tsn; 2082 } 2083 if (last_chunk) { 2084 *m = NULL; 2085 } 2086 if (ordered) { 2087 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2088 } else { 2089 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2090 } 2091 SCTP_STAT_INCR(sctps_recvdata); 2092 /* Set it present please */ 2093 #ifdef SCTP_STR_LOGGING 2094 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2095 #endif 2096 #ifdef SCTP_MAP_LOGGING 2097 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2098 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2099 #endif 2100 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2101 if (need_reasm_check) { 2102 /* Another one waits ? */ 2103 sctp_deliver_reasm_check(stcb, asoc); 2104 } 2105 return (1); 2106 } 2107 2108 int8_t sctp_map_lookup_tab[256] = { 2109 -1, 0, -1, 1, -1, 0, -1, 2, 2110 -1, 0, -1, 1, -1, 0, -1, 3, 2111 -1, 0, -1, 1, -1, 0, -1, 2, 2112 -1, 0, -1, 1, -1, 0, -1, 4, 2113 -1, 0, -1, 1, -1, 0, -1, 2, 2114 -1, 0, -1, 1, -1, 0, -1, 3, 2115 -1, 0, -1, 1, -1, 0, -1, 2, 2116 -1, 0, -1, 1, -1, 0, -1, 5, 2117 -1, 0, -1, 1, -1, 0, -1, 2, 2118 -1, 0, -1, 1, -1, 0, -1, 3, 2119 -1, 0, -1, 1, -1, 0, -1, 2, 2120 -1, 0, -1, 1, -1, 0, -1, 4, 2121 -1, 0, -1, 1, -1, 0, -1, 2, 2122 -1, 0, -1, 1, -1, 0, -1, 3, 2123 -1, 0, -1, 1, -1, 0, -1, 2, 2124 -1, 0, -1, 1, -1, 0, -1, 6, 2125 -1, 0, -1, 1, -1, 0, -1, 2, 2126 -1, 0, -1, 1, -1, 0, -1, 3, 2127 -1, 0, -1, 1, -1, 0, -1, 2, 2128 -1, 0, -1, 1, -1, 0, -1, 4, 2129 -1, 0, -1, 1, -1, 0, -1, 2, 2130 -1, 0, -1, 1, -1, 0, -1, 3, 2131 -1, 0, -1, 1, -1, 0, -1, 2, 2132 -1, 0, -1, 1, -1, 0, -1, 5, 2133 -1, 0, -1, 1, -1, 0, -1, 2, 2134 -1, 0, -1, 1, -1, 0, -1, 3, 2135 -1, 0, -1, 1, -1, 0, -1, 2, 2136 -1, 0, -1, 1, -1, 0, -1, 4, 2137 -1, 0, -1, 1, -1, 0, -1, 2, 2138 -1, 0, -1, 1, -1, 0, -1, 3, 2139 -1, 0, -1, 1, -1, 0, -1, 2, 2140 -1, 0, -1, 1, -1, 0, -1, 7, 2141 }; 2142 2143 2144 void 2145 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2146 { 2147 /* 2148 * Now we also need to check the mapping array in a couple of ways. 2149 * 1) Did we move the cum-ack point? 2150 */ 2151 struct sctp_association *asoc; 2152 int i, at; 2153 int all_ones, last_all_ones = 0; 2154 int slide_from, slide_end, lgap, distance; 2155 2156 #ifdef SCTP_MAP_LOGGING 2157 uint32_t old_cumack, old_base, old_highest; 2158 unsigned char aux_array[64]; 2159 2160 #endif 2161 struct sctp_stream_reset_list *liste; 2162 2163 asoc = &stcb->asoc; 2164 at = 0; 2165 2166 #ifdef SCTP_MAP_LOGGING 2167 old_cumack = asoc->cumulative_tsn; 2168 old_base = asoc->mapping_array_base_tsn; 2169 old_highest = asoc->highest_tsn_inside_map; 2170 if (asoc->mapping_array_size < 64) 2171 memcpy(aux_array, asoc->mapping_array, 2172 asoc->mapping_array_size); 2173 else 2174 memcpy(aux_array, asoc->mapping_array, 64); 2175 #endif 2176 2177 /* 2178 * We could probably improve this a small bit by calculating the 2179 * offset of the current cum-ack as the starting point. 2180 */ 2181 all_ones = 1; 2182 at = 0; 2183 for (i = 0; i < stcb->asoc.mapping_array_size; i++) { 2184 if (asoc->mapping_array[i] == 0xff) { 2185 at += 8; 2186 last_all_ones = 1; 2187 } else { 2188 /* there is a 0 bit */ 2189 all_ones = 0; 2190 at += sctp_map_lookup_tab[asoc->mapping_array[i]]; 2191 last_all_ones = 0; 2192 break; 2193 } 2194 } 2195 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones); 2196 /* at is one off, since in the table a embedded -1 is present */ 2197 at++; 2198 2199 if (compare_with_wrap(asoc->cumulative_tsn, 2200 asoc->highest_tsn_inside_map, 2201 MAX_TSN)) { 2202 #ifdef INVARIANTS 2203 panic("huh, cumack greater than high-tsn in map"); 2204 #else 2205 printf("huh, cumack greater than high-tsn in map - should panic?\n"); 2206 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2207 #endif 2208 } 2209 if (all_ones || 2210 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) { 2211 /* The complete array was completed by a single FR */ 2212 /* higest becomes the cum-ack */ 2213 int clr; 2214 2215 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2216 /* clear the array */ 2217 if (all_ones) 2218 clr = asoc->mapping_array_size; 2219 else { 2220 clr = (at >> 3) + 1; 2221 /* 2222 * this should be the allones case but just in case 2223 * :> 2224 */ 2225 if (clr > asoc->mapping_array_size) 2226 clr = asoc->mapping_array_size; 2227 } 2228 memset(asoc->mapping_array, 0, clr); 2229 /* base becomes one ahead of the cum-ack */ 2230 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2231 #ifdef SCTP_MAP_LOGGING 2232 sctp_log_map(old_base, old_cumack, old_highest, 2233 SCTP_MAP_PREPARE_SLIDE); 2234 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2235 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2236 #endif 2237 } else if (at >= 8) { 2238 /* we can slide the mapping array down */ 2239 /* Calculate the new byte postion we can move down */ 2240 slide_from = at >> 3; 2241 /* 2242 * now calculate the ceiling of the move using our highest 2243 * TSN value 2244 */ 2245 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2246 lgap = asoc->highest_tsn_inside_map - 2247 asoc->mapping_array_base_tsn; 2248 } else { 2249 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2250 asoc->highest_tsn_inside_map + 1; 2251 } 2252 slide_end = lgap >> 3; 2253 if (slide_end < slide_from) { 2254 panic("impossible slide"); 2255 } 2256 distance = (slide_end - slide_from) + 1; 2257 #ifdef SCTP_MAP_LOGGING 2258 sctp_log_map(old_base, old_cumack, old_highest, 2259 SCTP_MAP_PREPARE_SLIDE); 2260 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2261 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2262 #endif 2263 if (distance + slide_from > asoc->mapping_array_size || 2264 distance < 0) { 2265 /* 2266 * Here we do NOT slide forward the array so that 2267 * hopefully when more data comes in to fill it up 2268 * we will be able to slide it forward. Really I 2269 * don't think this should happen :-0 2270 */ 2271 2272 #ifdef SCTP_MAP_LOGGING 2273 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2274 (uint32_t) asoc->mapping_array_size, 2275 SCTP_MAP_SLIDE_NONE); 2276 #endif 2277 } else { 2278 int ii; 2279 2280 for (ii = 0; ii < distance; ii++) { 2281 asoc->mapping_array[ii] = 2282 asoc->mapping_array[slide_from + ii]; 2283 } 2284 for (ii = distance; ii <= slide_end; ii++) { 2285 asoc->mapping_array[ii] = 0; 2286 } 2287 asoc->mapping_array_base_tsn += (slide_from << 3); 2288 #ifdef SCTP_MAP_LOGGING 2289 sctp_log_map(asoc->mapping_array_base_tsn, 2290 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2291 SCTP_MAP_SLIDE_RESULT); 2292 #endif 2293 } 2294 } 2295 /* check the special flag for stream resets */ 2296 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2297 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2298 (asoc->cumulative_tsn == liste->tsn)) 2299 ) { 2300 /* 2301 * we have finished working through the backlogged TSN's now 2302 * time to reset streams. 1: call reset function. 2: free 2303 * pending_reply space 3: distribute any chunks in 2304 * pending_reply_queue. 2305 */ 2306 struct sctp_queued_to_read *ctl; 2307 2308 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2309 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2310 SCTP_FREE(liste); 2311 liste = TAILQ_FIRST(&asoc->resetHead); 2312 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2313 if (ctl && (liste == NULL)) { 2314 /* All can be removed */ 2315 while (ctl) { 2316 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2317 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2318 if (*abort_flag) { 2319 return; 2320 } 2321 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2322 } 2323 } else if (ctl) { 2324 /* more than one in queue */ 2325 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2326 /* 2327 * if ctl->sinfo_tsn is <= liste->tsn we can 2328 * process it which is the NOT of 2329 * ctl->sinfo_tsn > liste->tsn 2330 */ 2331 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2332 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2333 if (*abort_flag) { 2334 return; 2335 } 2336 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2337 } 2338 } 2339 /* 2340 * Now service re-assembly to pick up anything that has been 2341 * held on reassembly queue? 2342 */ 2343 sctp_deliver_reasm_check(stcb, asoc); 2344 } 2345 /* 2346 * Now we need to see if we need to queue a sack or just start the 2347 * timer (if allowed). 2348 */ 2349 if (ok_to_sack) { 2350 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2351 /* 2352 * Ok special case, in SHUTDOWN-SENT case. here we 2353 * maker sure SACK timer is off and instead send a 2354 * SHUTDOWN and a SACK 2355 */ 2356 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2357 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2358 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2359 } 2360 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2361 sctp_send_sack(stcb); 2362 } else { 2363 int is_a_gap; 2364 2365 /* is there a gap now ? */ 2366 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2367 stcb->asoc.cumulative_tsn, MAX_TSN); 2368 2369 /* 2370 * CMT DAC algorithm: increase number of packets 2371 * received since last ack 2372 */ 2373 stcb->asoc.cmt_dac_pkts_rcvd++; 2374 2375 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2376 * SACK */ 2377 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2378 * longer is one */ 2379 (stcb->asoc.numduptsns) || /* we have dup's */ 2380 (is_a_gap) || /* is still a gap */ 2381 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2382 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2383 ) { 2384 2385 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) && 2386 (stcb->asoc.send_sack == 0) && 2387 (stcb->asoc.numduptsns == 0) && 2388 (stcb->asoc.delayed_ack) && 2389 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2390 2391 /* 2392 * CMT DAC algorithm: With CMT, 2393 * delay acks even in the face of 2394 * 2395 * reordering. Therefore, if acks that 2396 * do not have to be sent because of 2397 * the above reasons, will be 2398 * delayed. That is, acks that would 2399 * have been sent due to gap reports 2400 * will be delayed with DAC. Start 2401 * the delayed ack timer. 2402 */ 2403 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2404 stcb->sctp_ep, stcb, NULL); 2405 } else { 2406 /* 2407 * Ok we must build a SACK since the 2408 * timer is pending, we got our 2409 * first packet OR there are gaps or 2410 * duplicates. 2411 */ 2412 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2413 sctp_send_sack(stcb); 2414 } 2415 } else { 2416 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2417 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2418 stcb->sctp_ep, stcb, NULL); 2419 } 2420 } 2421 } 2422 } 2423 } 2424 2425 void 2426 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2427 { 2428 struct sctp_tmit_chunk *chk; 2429 uint32_t tsize; 2430 uint16_t nxt_todel; 2431 2432 if (asoc->fragmented_delivery_inprogress) { 2433 sctp_service_reassembly(stcb, asoc); 2434 } 2435 /* Can we proceed further, i.e. the PD-API is complete */ 2436 if (asoc->fragmented_delivery_inprogress) { 2437 /* no */ 2438 return; 2439 } 2440 /* 2441 * Now is there some other chunk I can deliver from the reassembly 2442 * queue. 2443 */ 2444 doit_again: 2445 chk = TAILQ_FIRST(&asoc->reasmqueue); 2446 if (chk == NULL) { 2447 asoc->size_on_reasm_queue = 0; 2448 asoc->cnt_on_reasm_queue = 0; 2449 return; 2450 } 2451 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2452 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2453 ((nxt_todel == chk->rec.data.stream_seq) || 2454 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2455 /* 2456 * Yep the first one is here. We setup to start reception, 2457 * by backing down the TSN just in case we can't deliver. 2458 */ 2459 2460 /* 2461 * Before we start though either all of the message should 2462 * be here or 1/4 the socket buffer max or nothing on the 2463 * delivery queue and something can be delivered. 2464 */ 2465 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2466 (tsize > stcb->sctp_ep->partial_delivery_point))) { 2467 asoc->fragmented_delivery_inprogress = 1; 2468 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2469 asoc->str_of_pdapi = chk->rec.data.stream_number; 2470 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2471 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2472 asoc->fragment_flags = chk->rec.data.rcv_flags; 2473 sctp_service_reassembly(stcb, asoc); 2474 if (asoc->fragmented_delivery_inprogress == 0) { 2475 goto doit_again; 2476 } 2477 } 2478 } 2479 } 2480 2481 int 2482 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2483 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2484 struct sctp_nets *net, uint32_t * high_tsn) 2485 { 2486 struct sctp_data_chunk *ch, chunk_buf; 2487 struct sctp_association *asoc; 2488 int num_chunks = 0; /* number of control chunks processed */ 2489 int stop_proc = 0; 2490 int chk_length, break_flag, last_chunk; 2491 int abort_flag = 0, was_a_gap = 0; 2492 struct mbuf *m; 2493 2494 /* set the rwnd */ 2495 sctp_set_rwnd(stcb, &stcb->asoc); 2496 2497 m = *mm; 2498 SCTP_TCB_LOCK_ASSERT(stcb); 2499 asoc = &stcb->asoc; 2500 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2501 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2502 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 2503 /* 2504 * wait a minute, this guy is gone, there is no longer a 2505 * receiver. Send peer an ABORT! 2506 */ 2507 struct mbuf *op_err; 2508 2509 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2510 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 2511 return (2); 2512 } 2513 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2514 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2515 /* there was a gap before this data was processed */ 2516 was_a_gap = 1; 2517 } 2518 /* 2519 * setup where we got the last DATA packet from for any SACK that 2520 * may need to go out. Don't bump the net. This is done ONLY when a 2521 * chunk is assigned. 2522 */ 2523 asoc->last_data_chunk_from = net; 2524 2525 /* 2526 * Now before we proceed we must figure out if this is a wasted 2527 * cluster... i.e. it is a small packet sent in and yet the driver 2528 * underneath allocated a full cluster for it. If so we must copy it 2529 * to a smaller mbuf and free up the cluster mbuf. This will help 2530 * with cluster starvation. 2531 */ 2532 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2533 /* we only handle mbufs that are singletons.. not chains */ 2534 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA); 2535 if (m) { 2536 /* ok lets see if we can copy the data up */ 2537 caddr_t *from, *to; 2538 2539 /* get the pointers and copy */ 2540 to = mtod(m, caddr_t *); 2541 from = mtod((*mm), caddr_t *); 2542 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2543 /* copy the length and free up the old */ 2544 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2545 sctp_m_freem(*mm); 2546 /* sucess, back copy */ 2547 *mm = m; 2548 } else { 2549 /* We are in trouble in the mbuf world .. yikes */ 2550 m = *mm; 2551 } 2552 } 2553 /* get pointer to the first chunk header */ 2554 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2555 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2556 if (ch == NULL) { 2557 return (1); 2558 } 2559 /* 2560 * process all DATA chunks... 2561 */ 2562 *high_tsn = asoc->cumulative_tsn; 2563 break_flag = 0; 2564 asoc->data_pkts_seen++; 2565 while (stop_proc == 0) { 2566 /* validate chunk length */ 2567 chk_length = ntohs(ch->ch.chunk_length); 2568 if (length - *offset < chk_length) { 2569 /* all done, mutulated chunk */ 2570 stop_proc = 1; 2571 break; 2572 } 2573 if (ch->ch.chunk_type == SCTP_DATA) { 2574 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2575 /* 2576 * Need to send an abort since we had a 2577 * invalid data chunk. 2578 */ 2579 struct mbuf *op_err; 2580 2581 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 2582 0, M_DONTWAIT, 1, MT_DATA); 2583 2584 if (op_err) { 2585 struct sctp_paramhdr *ph; 2586 uint32_t *ippp; 2587 2588 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 2589 (2 * sizeof(uint32_t)); 2590 ph = mtod(op_err, struct sctp_paramhdr *); 2591 ph->param_type = 2592 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2593 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2594 ippp = (uint32_t *) (ph + 1); 2595 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2596 ippp++; 2597 *ippp = asoc->cumulative_tsn; 2598 2599 } 2600 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2601 sctp_abort_association(inp, stcb, m, iphlen, sh, 2602 op_err); 2603 return (2); 2604 } 2605 #ifdef SCTP_AUDITING_ENABLED 2606 sctp_audit_log(0xB1, 0); 2607 #endif 2608 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2609 last_chunk = 1; 2610 } else { 2611 last_chunk = 0; 2612 } 2613 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2614 chk_length, net, high_tsn, &abort_flag, &break_flag, 2615 last_chunk)) { 2616 num_chunks++; 2617 } 2618 if (abort_flag) 2619 return (2); 2620 2621 if (break_flag) { 2622 /* 2623 * Set because of out of rwnd space and no 2624 * drop rep space left. 2625 */ 2626 stop_proc = 1; 2627 break; 2628 } 2629 } else { 2630 /* not a data chunk in the data region */ 2631 switch (ch->ch.chunk_type) { 2632 case SCTP_INITIATION: 2633 case SCTP_INITIATION_ACK: 2634 case SCTP_SELECTIVE_ACK: 2635 case SCTP_HEARTBEAT_REQUEST: 2636 case SCTP_HEARTBEAT_ACK: 2637 case SCTP_ABORT_ASSOCIATION: 2638 case SCTP_SHUTDOWN: 2639 case SCTP_SHUTDOWN_ACK: 2640 case SCTP_OPERATION_ERROR: 2641 case SCTP_COOKIE_ECHO: 2642 case SCTP_COOKIE_ACK: 2643 case SCTP_ECN_ECHO: 2644 case SCTP_ECN_CWR: 2645 case SCTP_SHUTDOWN_COMPLETE: 2646 case SCTP_AUTHENTICATION: 2647 case SCTP_ASCONF_ACK: 2648 case SCTP_PACKET_DROPPED: 2649 case SCTP_STREAM_RESET: 2650 case SCTP_FORWARD_CUM_TSN: 2651 case SCTP_ASCONF: 2652 /* 2653 * Now, what do we do with KNOWN chunks that 2654 * are NOT in the right place? 2655 * 2656 * For now, I do nothing but ignore them. We 2657 * may later want to add sysctl stuff to 2658 * switch out and do either an ABORT() or 2659 * possibly process them. 2660 */ 2661 if (sctp_strict_data_order) { 2662 struct mbuf *op_err; 2663 2664 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2665 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err); 2666 return (2); 2667 } 2668 break; 2669 default: 2670 /* unknown chunk type, use bit rules */ 2671 if (ch->ch.chunk_type & 0x40) { 2672 /* Add a error report to the queue */ 2673 struct mbuf *mm; 2674 struct sctp_paramhdr *phd; 2675 2676 mm = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA); 2677 if (mm) { 2678 phd = mtod(mm, struct sctp_paramhdr *); 2679 /* 2680 * We cheat and use param 2681 * type since we did not 2682 * bother to define a error 2683 * cause struct. They are 2684 * the same basic format 2685 * with different names. 2686 */ 2687 phd->param_type = 2688 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2689 phd->param_length = 2690 htons(chk_length + sizeof(*phd)); 2691 SCTP_BUF_LEN(mm) = sizeof(*phd); 2692 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, 2693 SCTP_SIZE32(chk_length), 2694 M_DONTWAIT); 2695 if (SCTP_BUF_NEXT(mm)) { 2696 sctp_queue_op_err(stcb, mm); 2697 } else { 2698 sctp_m_freem(mm); 2699 } 2700 } 2701 } 2702 if ((ch->ch.chunk_type & 0x80) == 0) { 2703 /* discard the rest of this packet */ 2704 stop_proc = 1; 2705 } /* else skip this bad chunk and 2706 * continue... */ 2707 break; 2708 }; /* switch of chunk type */ 2709 } 2710 *offset += SCTP_SIZE32(chk_length); 2711 if ((*offset >= length) || stop_proc) { 2712 /* no more data left in the mbuf chain */ 2713 stop_proc = 1; 2714 continue; 2715 } 2716 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2717 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2718 if (ch == NULL) { 2719 *offset = length; 2720 stop_proc = 1; 2721 break; 2722 2723 } 2724 } /* while */ 2725 if (break_flag) { 2726 /* 2727 * we need to report rwnd overrun drops. 2728 */ 2729 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2730 } 2731 if (num_chunks) { 2732 /* 2733 * Did we get data, if so update the time for auto-close and 2734 * give peer credit for being alive. 2735 */ 2736 SCTP_STAT_INCR(sctps_recvpktwithdata); 2737 stcb->asoc.overall_error_count = 0; 2738 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2739 } 2740 /* now service all of the reassm queue if needed */ 2741 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2742 sctp_service_queues(stcb, asoc); 2743 2744 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2745 /* Assure that we ack right away */ 2746 stcb->asoc.send_sack = 1; 2747 } 2748 /* Start a sack timer or QUEUE a SACK for sending */ 2749 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2750 (stcb->asoc.mapping_array[0] != 0xff)) { 2751 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) || 2752 (stcb->asoc.delayed_ack == 0) || 2753 (stcb->asoc.send_sack == 1)) { 2754 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2755 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2756 } 2757 sctp_send_sack(stcb); 2758 } else { 2759 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2760 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2761 stcb->sctp_ep, stcb, NULL); 2762 } 2763 } 2764 } else { 2765 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2766 } 2767 if (abort_flag) 2768 return (2); 2769 2770 return (0); 2771 } 2772 2773 static void 2774 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc, 2775 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2776 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 2777 int num_seg, int *ecn_seg_sums) 2778 { 2779 /************************************************/ 2780 /* process fragments and update sendqueue */ 2781 /************************************************/ 2782 struct sctp_sack *sack; 2783 struct sctp_gap_ack_block *frag; 2784 struct sctp_tmit_chunk *tp1; 2785 int i; 2786 unsigned int j; 2787 2788 #ifdef SCTP_FR_LOGGING 2789 int num_frs = 0; 2790 2791 #endif 2792 uint16_t frag_strt, frag_end, primary_flag_set; 2793 u_long last_frag_high; 2794 2795 /* 2796 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2797 */ 2798 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2799 primary_flag_set = 1; 2800 } else { 2801 primary_flag_set = 0; 2802 } 2803 2804 sack = &ch->sack; 2805 frag = (struct sctp_gap_ack_block *)((caddr_t)sack + 2806 sizeof(struct sctp_sack)); 2807 tp1 = NULL; 2808 last_frag_high = 0; 2809 for (i = 0; i < num_seg; i++) { 2810 frag_strt = ntohs(frag->start); 2811 frag_end = ntohs(frag->end); 2812 /* some sanity checks on the fargment offsets */ 2813 if (frag_strt > frag_end) { 2814 /* this one is malformed, skip */ 2815 frag++; 2816 continue; 2817 } 2818 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2819 MAX_TSN)) 2820 *biggest_tsn_acked = frag_end + last_tsn; 2821 2822 /* mark acked dgs and find out the highestTSN being acked */ 2823 if (tp1 == NULL) { 2824 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2825 2826 /* save the locations of the last frags */ 2827 last_frag_high = frag_end + last_tsn; 2828 } else { 2829 /* 2830 * now lets see if we need to reset the queue due to 2831 * a out-of-order SACK fragment 2832 */ 2833 if (compare_with_wrap(frag_strt + last_tsn, 2834 last_frag_high, MAX_TSN)) { 2835 /* 2836 * if the new frag starts after the last TSN 2837 * frag covered, we are ok and this one is 2838 * beyond the last one 2839 */ 2840 ; 2841 } else { 2842 /* 2843 * ok, they have reset us, so we need to 2844 * reset the queue this will cause extra 2845 * hunting but hey, they chose the 2846 * performance hit when they failed to order 2847 * there gaps.. 2848 */ 2849 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2850 } 2851 last_frag_high = frag_end + last_tsn; 2852 } 2853 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2854 while (tp1) { 2855 #ifdef SCTP_FR_LOGGING 2856 if (tp1->rec.data.doing_fast_retransmit) 2857 num_frs++; 2858 #endif 2859 2860 /* 2861 * CMT: CUCv2 algorithm. For each TSN being 2862 * processed from the sent queue, track the 2863 * next expected pseudo-cumack, or 2864 * rtx_pseudo_cumack, if required. Separate 2865 * cumack trackers for first transmissions, 2866 * and retransmissions. 2867 */ 2868 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2869 (tp1->snd_count == 1)) { 2870 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2871 tp1->whoTo->find_pseudo_cumack = 0; 2872 } 2873 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2874 (tp1->snd_count > 1)) { 2875 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2876 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2877 } 2878 if (tp1->rec.data.TSN_seq == j) { 2879 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2880 /* 2881 * must be held until 2882 * cum-ack passes 2883 */ 2884 /* 2885 * ECN Nonce: Add the nonce 2886 * value to the sender's 2887 * nonce sum 2888 */ 2889 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2890 /*- 2891 * If it is less than RESEND, it is 2892 * now no-longer in flight. 2893 * Higher values may already be set 2894 * via previous Gap Ack Blocks... 2895 * i.e. ACKED or RESEND. 2896 */ 2897 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2898 *biggest_newly_acked_tsn, MAX_TSN)) { 2899 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2900 } 2901 /* 2902 * CMT: SFR algo 2903 * (and HTNA) - set 2904 * saw_newack to 1 2905 * for dest being 2906 * newly acked. 2907 * update 2908 * this_sack_highest_ 2909 * newack if 2910 * appropriate. 2911 */ 2912 if (tp1->rec.data.chunk_was_revoked == 0) 2913 tp1->whoTo->saw_newack = 1; 2914 2915 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2916 tp1->whoTo->this_sack_highest_newack, 2917 MAX_TSN)) { 2918 tp1->whoTo->this_sack_highest_newack = 2919 tp1->rec.data.TSN_seq; 2920 } 2921 /* 2922 * CMT DAC algo: 2923 * also update 2924 * this_sack_lowest_n 2925 * ewack 2926 */ 2927 if (*this_sack_lowest_newack == 0) { 2928 #ifdef SCTP_SACK_LOGGING 2929 sctp_log_sack(*this_sack_lowest_newack, 2930 last_tsn, 2931 tp1->rec.data.TSN_seq, 2932 0, 2933 0, 2934 SCTP_LOG_TSN_ACKED); 2935 #endif 2936 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2937 } 2938 /* 2939 * CMT: CUCv2 2940 * algorithm. If 2941 * (rtx-)pseudo-cumac 2942 * k for corresp 2943 * dest is being 2944 * acked, then we 2945 * have a new 2946 * (rtx-)pseudo-cumac 2947 * k. Set 2948 * new_(rtx_)pseudo_c 2949 * umack to TRUE so 2950 * that the cwnd for 2951 * this dest can be 2952 * updated. Also 2953 * trigger search 2954 * for the next 2955 * expected 2956 * (rtx-)pseudo-cumac 2957 * k. Separate 2958 * pseudo_cumack 2959 * trackers for 2960 * first 2961 * transmissions and 2962 * retransmissions. 2963 */ 2964 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2965 if (tp1->rec.data.chunk_was_revoked == 0) { 2966 tp1->whoTo->new_pseudo_cumack = 1; 2967 } 2968 tp1->whoTo->find_pseudo_cumack = 1; 2969 } 2970 #ifdef SCTP_CWND_LOGGING 2971 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2972 #endif 2973 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2974 if (tp1->rec.data.chunk_was_revoked == 0) { 2975 tp1->whoTo->new_pseudo_cumack = 1; 2976 } 2977 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2978 } 2979 #ifdef SCTP_SACK_LOGGING 2980 sctp_log_sack(*biggest_newly_acked_tsn, 2981 last_tsn, 2982 tp1->rec.data.TSN_seq, 2983 frag_strt, 2984 frag_end, 2985 SCTP_LOG_TSN_ACKED); 2986 #endif 2987 #ifdef SCTP_FLIGHT_LOGGING 2988 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2989 tp1->whoTo->flight_size, 2990 tp1->book_size, 2991 (uintptr_t) tp1->whoTo, 2992 tp1->rec.data.TSN_seq); 2993 #endif 2994 sctp_flight_size_decrease(tp1); 2995 sctp_total_flight_decrease(stcb, tp1); 2996 2997 tp1->whoTo->net_ack += tp1->send_size; 2998 if (tp1->snd_count < 2) { 2999 /* 3000 * True 3001 * non-retran 3002 * smited 3003 * chunk */ 3004 tp1->whoTo->net_ack2 += tp1->send_size; 3005 3006 /* 3007 * update RTO 3008 * too ? */ 3009 if (tp1->do_rtt) { 3010 tp1->whoTo->RTO = 3011 sctp_calculate_rto(stcb, 3012 asoc, 3013 tp1->whoTo, 3014 &tp1->sent_rcv_time); 3015 tp1->do_rtt = 0; 3016 } 3017 } 3018 } 3019 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3020 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3021 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3022 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3023 asoc->this_sack_highest_gap, 3024 MAX_TSN)) { 3025 asoc->this_sack_highest_gap = 3026 tp1->rec.data.TSN_seq; 3027 } 3028 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3029 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3030 #ifdef SCTP_AUDITING_ENABLED 3031 sctp_audit_log(0xB2, 3032 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3033 #endif 3034 } 3035 } 3036 /* 3037 * All chunks NOT UNSENT 3038 * fall through here and are 3039 * marked 3040 */ 3041 tp1->sent = SCTP_DATAGRAM_MARKED; 3042 if (tp1->rec.data.chunk_was_revoked) { 3043 /* deflate the cwnd */ 3044 tp1->whoTo->cwnd -= tp1->book_size; 3045 tp1->rec.data.chunk_was_revoked = 0; 3046 } 3047 } 3048 break; 3049 } /* if (tp1->TSN_seq == j) */ 3050 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3051 MAX_TSN)) 3052 break; 3053 3054 tp1 = TAILQ_NEXT(tp1, sctp_next); 3055 } /* end while (tp1) */ 3056 } /* end for (j = fragStart */ 3057 frag++; /* next one */ 3058 } 3059 #ifdef SCTP_FR_LOGGING 3060 /* 3061 * if (num_frs) sctp_log_fr(*biggest_tsn_acked, 3062 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3063 */ 3064 #endif 3065 } 3066 3067 static void 3068 sctp_check_for_revoked(struct sctp_tcb *stcb, 3069 struct sctp_association *asoc, uint32_t cumack, 3070 u_long biggest_tsn_acked) 3071 { 3072 struct sctp_tmit_chunk *tp1; 3073 int tot_revoked = 0; 3074 3075 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3076 while (tp1) { 3077 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3078 MAX_TSN)) { 3079 /* 3080 * ok this guy is either ACK or MARKED. If it is 3081 * ACKED it has been previously acked but not this 3082 * time i.e. revoked. If it is MARKED it was ACK'ed 3083 * again. 3084 */ 3085 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3086 /* it has been revoked */ 3087 tp1->sent = SCTP_DATAGRAM_SENT; 3088 tp1->rec.data.chunk_was_revoked = 1; 3089 /* 3090 * We must add this stuff back in to assure 3091 * timers and such get started. 3092 */ 3093 #ifdef SCTP_FLIGHT_LOGGING 3094 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3095 tp1->whoTo->flight_size, 3096 tp1->book_size, 3097 (uintptr_t) tp1->whoTo, 3098 tp1->rec.data.TSN_seq); 3099 #endif 3100 sctp_flight_size_increase(tp1); 3101 sctp_total_flight_increase(stcb, tp1); 3102 /* 3103 * We inflate the cwnd to compensate for our 3104 * artificial inflation of the flight_size. 3105 */ 3106 tp1->whoTo->cwnd += tp1->book_size; 3107 tot_revoked++; 3108 #ifdef SCTP_SACK_LOGGING 3109 sctp_log_sack(asoc->last_acked_seq, 3110 cumack, 3111 tp1->rec.data.TSN_seq, 3112 0, 3113 0, 3114 SCTP_LOG_TSN_REVOKED); 3115 #endif 3116 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3117 /* it has been re-acked in this SACK */ 3118 tp1->sent = SCTP_DATAGRAM_ACKED; 3119 } 3120 } 3121 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3122 break; 3123 tp1 = TAILQ_NEXT(tp1, sctp_next); 3124 } 3125 if (tot_revoked > 0) { 3126 /* 3127 * Setup the ecn nonce re-sync point. We do this since once 3128 * data is revoked we begin to retransmit things, which do 3129 * NOT have the ECN bits set. This means we are now out of 3130 * sync and must wait until we get back in sync with the 3131 * peer to check ECN bits. 3132 */ 3133 tp1 = TAILQ_FIRST(&asoc->send_queue); 3134 if (tp1 == NULL) { 3135 asoc->nonce_resync_tsn = asoc->sending_seq; 3136 } else { 3137 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3138 } 3139 asoc->nonce_wait_for_ecne = 0; 3140 asoc->nonce_sum_check = 0; 3141 } 3142 } 3143 3144 static void 3145 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3146 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3147 { 3148 struct sctp_tmit_chunk *tp1; 3149 int strike_flag = 0; 3150 struct timeval now; 3151 int tot_retrans = 0; 3152 uint32_t sending_seq; 3153 struct sctp_nets *net; 3154 int num_dests_sacked = 0; 3155 3156 /* 3157 * select the sending_seq, this is either the next thing ready to be 3158 * sent but not transmitted, OR, the next seq we assign. 3159 */ 3160 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3161 if (tp1 == NULL) { 3162 sending_seq = asoc->sending_seq; 3163 } else { 3164 sending_seq = tp1->rec.data.TSN_seq; 3165 } 3166 3167 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3168 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3169 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3170 if (net->saw_newack) 3171 num_dests_sacked++; 3172 } 3173 } 3174 if (stcb->asoc.peer_supports_prsctp) { 3175 SCTP_GETTIME_TIMEVAL(&now); 3176 } 3177 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3178 while (tp1) { 3179 strike_flag = 0; 3180 if (tp1->no_fr_allowed) { 3181 /* this one had a timeout or something */ 3182 tp1 = TAILQ_NEXT(tp1, sctp_next); 3183 continue; 3184 } 3185 #ifdef SCTP_FR_LOGGING 3186 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3187 sctp_log_fr(biggest_tsn_newly_acked, 3188 tp1->rec.data.TSN_seq, 3189 tp1->sent, 3190 SCTP_FR_LOG_CHECK_STRIKE); 3191 #endif 3192 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3193 MAX_TSN) || 3194 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3195 /* done */ 3196 break; 3197 } 3198 if (stcb->asoc.peer_supports_prsctp) { 3199 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3200 /* Is it expired? */ 3201 if ( 3202 (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) 3203 ) { 3204 /* Yes so drop it */ 3205 if (tp1->data != NULL) { 3206 sctp_release_pr_sctp_chunk(stcb, tp1, 3207 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3208 &asoc->sent_queue); 3209 } 3210 tp1 = TAILQ_NEXT(tp1, sctp_next); 3211 continue; 3212 } 3213 } 3214 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3215 /* Has it been retransmitted tv_sec times? */ 3216 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3217 /* Yes, so drop it */ 3218 if (tp1->data != NULL) { 3219 sctp_release_pr_sctp_chunk(stcb, tp1, 3220 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3221 &asoc->sent_queue); 3222 } 3223 tp1 = TAILQ_NEXT(tp1, sctp_next); 3224 continue; 3225 } 3226 } 3227 } 3228 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3229 asoc->this_sack_highest_gap, MAX_TSN)) { 3230 /* we are beyond the tsn in the sack */ 3231 break; 3232 } 3233 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3234 /* either a RESEND, ACKED, or MARKED */ 3235 /* skip */ 3236 tp1 = TAILQ_NEXT(tp1, sctp_next); 3237 continue; 3238 } 3239 /* 3240 * CMT : SFR algo (covers part of DAC and HTNA as well) 3241 */ 3242 if (tp1->whoTo->saw_newack == 0) { 3243 /* 3244 * No new acks were receieved for data sent to this 3245 * dest. Therefore, according to the SFR algo for 3246 * CMT, no data sent to this dest can be marked for 3247 * FR using this SACK. 3248 */ 3249 tp1 = TAILQ_NEXT(tp1, sctp_next); 3250 continue; 3251 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3252 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3253 /* 3254 * CMT: New acks were receieved for data sent to 3255 * this dest. But no new acks were seen for data 3256 * sent after tp1. Therefore, according to the SFR 3257 * algo for CMT, tp1 cannot be marked for FR using 3258 * this SACK. This step covers part of the DAC algo 3259 * and the HTNA algo as well. 3260 */ 3261 tp1 = TAILQ_NEXT(tp1, sctp_next); 3262 continue; 3263 } 3264 /* 3265 * Here we check to see if we were have already done a FR 3266 * and if so we see if the biggest TSN we saw in the sack is 3267 * smaller than the recovery point. If so we don't strike 3268 * the tsn... otherwise we CAN strike the TSN. 3269 */ 3270 /* 3271 * @@@ JRI: Check for CMT if (accum_moved && 3272 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3273 * 0)) { 3274 */ 3275 if (accum_moved && asoc->fast_retran_loss_recovery) { 3276 /* 3277 * Strike the TSN if in fast-recovery and cum-ack 3278 * moved. 3279 */ 3280 #ifdef SCTP_FR_LOGGING 3281 sctp_log_fr(biggest_tsn_newly_acked, 3282 tp1->rec.data.TSN_seq, 3283 tp1->sent, 3284 SCTP_FR_LOG_STRIKE_CHUNK); 3285 #endif 3286 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3287 tp1->sent++; 3288 } 3289 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3290 /* 3291 * CMT DAC algorithm: If SACK flag is set to 3292 * 0, then lowest_newack test will not pass 3293 * because it would have been set to the 3294 * cumack earlier. If not already to be 3295 * rtx'd, If not a mixed sack and if tp1 is 3296 * not between two sacked TSNs, then mark by 3297 * one more. NOTE that we are marking by one 3298 * additional time since the SACK DAC flag 3299 * indicates that two packets have been 3300 * received after this missing TSN. 3301 */ 3302 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3303 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3304 #ifdef SCTP_FR_LOGGING 3305 sctp_log_fr(16 + num_dests_sacked, 3306 tp1->rec.data.TSN_seq, 3307 tp1->sent, 3308 SCTP_FR_LOG_STRIKE_CHUNK); 3309 #endif 3310 tp1->sent++; 3311 } 3312 } 3313 } else if (tp1->rec.data.doing_fast_retransmit) { 3314 /* 3315 * For those that have done a FR we must take 3316 * special consideration if we strike. I.e the 3317 * biggest_newly_acked must be higher than the 3318 * sending_seq at the time we did the FR. 3319 */ 3320 if ( 3321 #ifdef SCTP_FR_TO_ALTERNATE 3322 /* 3323 * If FR's go to new networks, then we must only do 3324 * this for singly homed asoc's. However if the FR's 3325 * go to the same network (Armando's work) then its 3326 * ok to FR multiple times. 3327 */ 3328 (asoc->numnets < 2) 3329 #else 3330 (1) 3331 #endif 3332 ) { 3333 3334 if ((compare_with_wrap(biggest_tsn_newly_acked, 3335 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3336 (biggest_tsn_newly_acked == 3337 tp1->rec.data.fast_retran_tsn)) { 3338 /* 3339 * Strike the TSN, since this ack is 3340 * beyond where things were when we 3341 * did a FR. 3342 */ 3343 #ifdef SCTP_FR_LOGGING 3344 sctp_log_fr(biggest_tsn_newly_acked, 3345 tp1->rec.data.TSN_seq, 3346 tp1->sent, 3347 SCTP_FR_LOG_STRIKE_CHUNK); 3348 #endif 3349 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3350 tp1->sent++; 3351 } 3352 strike_flag = 1; 3353 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3354 /* 3355 * CMT DAC algorithm: If 3356 * SACK flag is set to 0, 3357 * then lowest_newack test 3358 * will not pass because it 3359 * would have been set to 3360 * the cumack earlier. If 3361 * not already to be rtx'd, 3362 * If not a mixed sack and 3363 * if tp1 is not between two 3364 * sacked TSNs, then mark by 3365 * one more. NOTE that we 3366 * are marking by one 3367 * additional time since the 3368 * SACK DAC flag indicates 3369 * that two packets have 3370 * been received after this 3371 * missing TSN. 3372 */ 3373 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3374 (num_dests_sacked == 1) && 3375 compare_with_wrap(this_sack_lowest_newack, 3376 tp1->rec.data.TSN_seq, MAX_TSN)) { 3377 #ifdef SCTP_FR_LOGGING 3378 sctp_log_fr(32 + num_dests_sacked, 3379 tp1->rec.data.TSN_seq, 3380 tp1->sent, 3381 SCTP_FR_LOG_STRIKE_CHUNK); 3382 #endif 3383 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3384 tp1->sent++; 3385 3386 } 3387 } 3388 } 3389 } 3390 } 3391 /* 3392 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3393 * algo covers HTNA. 3394 */ 3395 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3396 biggest_tsn_newly_acked, MAX_TSN)) { 3397 /* 3398 * We don't strike these: This is the HTNA 3399 * algorithm i.e. we don't strike If our TSN is 3400 * larger than the Highest TSN Newly Acked. 3401 */ 3402 ; 3403 } else { 3404 /* Strike the TSN */ 3405 #ifdef SCTP_FR_LOGGING 3406 sctp_log_fr(biggest_tsn_newly_acked, 3407 tp1->rec.data.TSN_seq, 3408 tp1->sent, 3409 SCTP_FR_LOG_STRIKE_CHUNK); 3410 #endif 3411 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3412 tp1->sent++; 3413 } 3414 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3415 /* 3416 * CMT DAC algorithm: If SACK flag is set to 3417 * 0, then lowest_newack test will not pass 3418 * because it would have been set to the 3419 * cumack earlier. If not already to be 3420 * rtx'd, If not a mixed sack and if tp1 is 3421 * not between two sacked TSNs, then mark by 3422 * one more. NOTE that we are marking by one 3423 * additional time since the SACK DAC flag 3424 * indicates that two packets have been 3425 * received after this missing TSN. 3426 */ 3427 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3428 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3429 #ifdef SCTP_FR_LOGGING 3430 sctp_log_fr(48 + num_dests_sacked, 3431 tp1->rec.data.TSN_seq, 3432 tp1->sent, 3433 SCTP_FR_LOG_STRIKE_CHUNK); 3434 #endif 3435 tp1->sent++; 3436 } 3437 } 3438 } 3439 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3440 /* Increment the count to resend */ 3441 struct sctp_nets *alt; 3442 3443 /* printf("OK, we are now ready to FR this guy\n"); */ 3444 #ifdef SCTP_FR_LOGGING 3445 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3446 0, SCTP_FR_MARKED); 3447 #endif 3448 if (strike_flag) { 3449 /* This is a subsequent FR */ 3450 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3451 } 3452 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3453 if (sctp_cmt_on_off) { 3454 /* 3455 * CMT: Using RTX_SSTHRESH policy for CMT. 3456 * If CMT is being used, then pick dest with 3457 * largest ssthresh for any retransmission. 3458 */ 3459 tp1->no_fr_allowed = 1; 3460 alt = tp1->whoTo; 3461 alt = sctp_find_alternate_net(stcb, alt, 1); 3462 /* 3463 * CUCv2: If a different dest is picked for 3464 * the retransmission, then new 3465 * (rtx-)pseudo_cumack needs to be tracked 3466 * for orig dest. Let CUCv2 track new (rtx-) 3467 * pseudo-cumack always. 3468 */ 3469 tp1->whoTo->find_pseudo_cumack = 1; 3470 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3471 3472 3473 } else {/* CMT is OFF */ 3474 3475 #ifdef SCTP_FR_TO_ALTERNATE 3476 /* Can we find an alternate? */ 3477 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3478 #else 3479 /* 3480 * default behavior is to NOT retransmit 3481 * FR's to an alternate. Armando Caro's 3482 * paper details why. 3483 */ 3484 alt = tp1->whoTo; 3485 #endif 3486 } 3487 3488 tp1->rec.data.doing_fast_retransmit = 1; 3489 tot_retrans++; 3490 /* mark the sending seq for possible subsequent FR's */ 3491 /* 3492 * printf("Marking TSN for FR new value %x\n", 3493 * (uint32_t)tpi->rec.data.TSN_seq); 3494 */ 3495 if (TAILQ_EMPTY(&asoc->send_queue)) { 3496 /* 3497 * If the queue of send is empty then its 3498 * the next sequence number that will be 3499 * assigned so we subtract one from this to 3500 * get the one we last sent. 3501 */ 3502 tp1->rec.data.fast_retran_tsn = sending_seq; 3503 } else { 3504 /* 3505 * If there are chunks on the send queue 3506 * (unsent data that has made it from the 3507 * stream queues but not out the door, we 3508 * take the first one (which will have the 3509 * lowest TSN) and subtract one to get the 3510 * one we last sent. 3511 */ 3512 struct sctp_tmit_chunk *ttt; 3513 3514 ttt = TAILQ_FIRST(&asoc->send_queue); 3515 tp1->rec.data.fast_retran_tsn = 3516 ttt->rec.data.TSN_seq; 3517 } 3518 3519 if (tp1->do_rtt) { 3520 /* 3521 * this guy had a RTO calculation pending on 3522 * it, cancel it 3523 */ 3524 tp1->do_rtt = 0; 3525 } 3526 /* fix counts and things */ 3527 #ifdef SCTP_FLIGHT_LOGGING 3528 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3529 tp1->whoTo->flight_size, 3530 tp1->book_size, 3531 (uintptr_t) tp1->whoTo, 3532 tp1->rec.data.TSN_seq); 3533 #endif 3534 tp1->whoTo->net_ack++; 3535 sctp_flight_size_decrease(tp1); 3536 3537 #ifdef SCTP_LOG_RWND 3538 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3539 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh); 3540 #endif 3541 /* add back to the rwnd */ 3542 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3543 3544 /* remove from the total flight */ 3545 sctp_total_flight_decrease(stcb, tp1); 3546 if (alt != tp1->whoTo) { 3547 /* yes, there is an alternate. */ 3548 sctp_free_remote_addr(tp1->whoTo); 3549 tp1->whoTo = alt; 3550 atomic_add_int(&alt->ref_count, 1); 3551 } 3552 } 3553 tp1 = TAILQ_NEXT(tp1, sctp_next); 3554 } /* while (tp1) */ 3555 3556 if (tot_retrans > 0) { 3557 /* 3558 * Setup the ecn nonce re-sync point. We do this since once 3559 * we go to FR something we introduce a Karn's rule scenario 3560 * and won't know the totals for the ECN bits. 3561 */ 3562 asoc->nonce_resync_tsn = sending_seq; 3563 asoc->nonce_wait_for_ecne = 0; 3564 asoc->nonce_sum_check = 0; 3565 } 3566 } 3567 3568 struct sctp_tmit_chunk * 3569 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3570 struct sctp_association *asoc) 3571 { 3572 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3573 struct timeval now; 3574 int now_filled = 0; 3575 3576 if (asoc->peer_supports_prsctp == 0) { 3577 return (NULL); 3578 } 3579 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3580 while (tp1) { 3581 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3582 tp1->sent != SCTP_DATAGRAM_RESEND) { 3583 /* no chance to advance, out of here */ 3584 break; 3585 } 3586 if (!PR_SCTP_ENABLED(tp1->flags)) { 3587 /* 3588 * We can't fwd-tsn past any that are reliable aka 3589 * retransmitted until the asoc fails. 3590 */ 3591 break; 3592 } 3593 if (!now_filled) { 3594 SCTP_GETTIME_TIMEVAL(&now); 3595 now_filled = 1; 3596 } 3597 tp2 = TAILQ_NEXT(tp1, sctp_next); 3598 /* 3599 * now we got a chunk which is marked for another 3600 * retransmission to a PR-stream but has run out its chances 3601 * already maybe OR has been marked to skip now. Can we skip 3602 * it if its a resend? 3603 */ 3604 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3605 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3606 /* 3607 * Now is this one marked for resend and its time is 3608 * now up? 3609 */ 3610 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3611 /* Yes so drop it */ 3612 if (tp1->data) { 3613 sctp_release_pr_sctp_chunk(stcb, tp1, 3614 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3615 &asoc->sent_queue); 3616 } 3617 } else { 3618 /* 3619 * No, we are done when hit one for resend 3620 * whos time as not expired. 3621 */ 3622 break; 3623 } 3624 } 3625 /* 3626 * Ok now if this chunk is marked to drop it we can clean up 3627 * the chunk, advance our peer ack point and we can check 3628 * the next chunk. 3629 */ 3630 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3631 /* advance PeerAckPoint goes forward */ 3632 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3633 a_adv = tp1; 3634 /* 3635 * we don't want to de-queue it here. Just wait for 3636 * the next peer SACK to come with a new cumTSN and 3637 * then the chunk will be droped in the normal 3638 * fashion. 3639 */ 3640 if (tp1->data) { 3641 sctp_free_bufspace(stcb, asoc, tp1, 1); 3642 /* 3643 * Maybe there should be another 3644 * notification type 3645 */ 3646 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3647 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3648 tp1); 3649 sctp_m_freem(tp1->data); 3650 tp1->data = NULL; 3651 if (stcb->sctp_socket) { 3652 sctp_sowwakeup(stcb->sctp_ep, 3653 stcb->sctp_socket); 3654 #ifdef SCTP_WAKE_LOGGING 3655 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3656 #endif 3657 } 3658 } 3659 } else { 3660 /* 3661 * If it is still in RESEND we can advance no 3662 * further 3663 */ 3664 break; 3665 } 3666 /* 3667 * If we hit here we just dumped tp1, move to next tsn on 3668 * sent queue. 3669 */ 3670 tp1 = tp2; 3671 } 3672 return (a_adv); 3673 } 3674 3675 #ifdef SCTP_HIGH_SPEED 3676 struct sctp_hs_raise_drop { 3677 int32_t cwnd; 3678 int32_t increase; 3679 int32_t drop_percent; 3680 }; 3681 3682 #define SCTP_HS_TABLE_SIZE 73 3683 3684 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 3685 {38, 1, 50}, /* 0 */ 3686 {118, 2, 44}, /* 1 */ 3687 {221, 3, 41}, /* 2 */ 3688 {347, 4, 38}, /* 3 */ 3689 {495, 5, 37}, /* 4 */ 3690 {663, 6, 35}, /* 5 */ 3691 {851, 7, 34}, /* 6 */ 3692 {1058, 8, 33}, /* 7 */ 3693 {1284, 9, 32}, /* 8 */ 3694 {1529, 10, 31}, /* 9 */ 3695 {1793, 11, 30}, /* 10 */ 3696 {2076, 12, 29}, /* 11 */ 3697 {2378, 13, 28}, /* 12 */ 3698 {2699, 14, 28}, /* 13 */ 3699 {3039, 15, 27}, /* 14 */ 3700 {3399, 16, 27}, /* 15 */ 3701 {3778, 17, 26}, /* 16 */ 3702 {4177, 18, 26}, /* 17 */ 3703 {4596, 19, 25}, /* 18 */ 3704 {5036, 20, 25}, /* 19 */ 3705 {5497, 21, 24}, /* 20 */ 3706 {5979, 22, 24}, /* 21 */ 3707 {6483, 23, 23}, /* 22 */ 3708 {7009, 24, 23}, /* 23 */ 3709 {7558, 25, 22}, /* 24 */ 3710 {8130, 26, 22}, /* 25 */ 3711 {8726, 27, 22}, /* 26 */ 3712 {9346, 28, 21}, /* 27 */ 3713 {9991, 29, 21}, /* 28 */ 3714 {10661, 30, 21}, /* 29 */ 3715 {11358, 31, 20}, /* 30 */ 3716 {12082, 32, 20}, /* 31 */ 3717 {12834, 33, 20}, /* 32 */ 3718 {13614, 34, 19}, /* 33 */ 3719 {14424, 35, 19}, /* 34 */ 3720 {15265, 36, 19}, /* 35 */ 3721 {16137, 37, 19}, /* 36 */ 3722 {17042, 38, 18}, /* 37 */ 3723 {17981, 39, 18}, /* 38 */ 3724 {18955, 40, 18}, /* 39 */ 3725 {19965, 41, 17}, /* 40 */ 3726 {21013, 42, 17}, /* 41 */ 3727 {22101, 43, 17}, /* 42 */ 3728 {23230, 44, 17}, /* 43 */ 3729 {24402, 45, 16}, /* 44 */ 3730 {25618, 46, 16}, /* 45 */ 3731 {26881, 47, 16}, /* 46 */ 3732 {28193, 48, 16}, /* 47 */ 3733 {29557, 49, 15}, /* 48 */ 3734 {30975, 50, 15}, /* 49 */ 3735 {32450, 51, 15}, /* 50 */ 3736 {33986, 52, 15}, /* 51 */ 3737 {35586, 53, 14}, /* 52 */ 3738 {37253, 54, 14}, /* 53 */ 3739 {38992, 55, 14}, /* 54 */ 3740 {40808, 56, 14}, /* 55 */ 3741 {42707, 57, 13}, /* 56 */ 3742 {44694, 58, 13}, /* 57 */ 3743 {46776, 59, 13}, /* 58 */ 3744 {48961, 60, 13}, /* 59 */ 3745 {51258, 61, 13}, /* 60 */ 3746 {53677, 62, 12}, /* 61 */ 3747 {56230, 63, 12}, /* 62 */ 3748 {58932, 64, 12}, /* 63 */ 3749 {61799, 65, 12}, /* 64 */ 3750 {64851, 66, 11}, /* 65 */ 3751 {68113, 67, 11}, /* 66 */ 3752 {71617, 68, 11}, /* 67 */ 3753 {75401, 69, 10}, /* 68 */ 3754 {79517, 70, 10}, /* 69 */ 3755 {84035, 71, 10}, /* 70 */ 3756 {89053, 72, 10}, /* 71 */ 3757 {94717, 73, 9} /* 72 */ 3758 }; 3759 3760 static void 3761 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 3762 { 3763 int cur_val, i, indx, incr; 3764 3765 cur_val = net->cwnd >> 10; 3766 indx = SCTP_HS_TABLE_SIZE - 1; 3767 3768 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3769 /* normal mode */ 3770 if (net->net_ack > net->mtu) { 3771 net->cwnd += net->mtu; 3772 #ifdef SCTP_CWND_MONITOR 3773 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 3774 #endif 3775 } else { 3776 net->cwnd += net->net_ack; 3777 #ifdef SCTP_CWND_MONITOR 3778 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 3779 #endif 3780 } 3781 } else { 3782 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 3783 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 3784 indx = i; 3785 break; 3786 } 3787 } 3788 net->last_hs_used = indx; 3789 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 3790 net->cwnd += incr; 3791 #ifdef SCTP_CWND_MONITOR 3792 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 3793 #endif 3794 } 3795 } 3796 3797 static void 3798 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 3799 { 3800 int cur_val, i, indx; 3801 3802 #ifdef SCTP_CWND_MONITOR 3803 int old_cwnd = net->cwnd; 3804 3805 #endif 3806 3807 cur_val = net->cwnd >> 10; 3808 indx = net->last_hs_used; 3809 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3810 /* normal mode */ 3811 net->ssthresh = net->cwnd / 2; 3812 if (net->ssthresh < (net->mtu * 2)) { 3813 net->ssthresh = 2 * net->mtu; 3814 } 3815 net->cwnd = net->ssthresh; 3816 } else { 3817 /* drop by the proper amount */ 3818 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 3819 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 3820 net->cwnd = net->ssthresh; 3821 /* now where are we */ 3822 indx = net->last_hs_used; 3823 cur_val = net->cwnd >> 10; 3824 /* reset where we are in the table */ 3825 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3826 /* feel out of hs */ 3827 net->last_hs_used = 0; 3828 } else { 3829 for (i = indx; i >= 1; i--) { 3830 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 3831 break; 3832 } 3833 } 3834 net->last_hs_used = indx; 3835 } 3836 } 3837 #ifdef SCTP_CWND_MONITOR 3838 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 3839 #endif 3840 3841 } 3842 3843 #endif 3844 3845 3846 static __inline void 3847 sctp_cwnd_update(struct sctp_tcb *stcb, 3848 struct sctp_association *asoc, 3849 int accum_moved, int reneged_all, int will_exit) 3850 { 3851 struct sctp_nets *net; 3852 3853 /******************************/ 3854 /* update cwnd and Early FR */ 3855 /******************************/ 3856 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3857 3858 #ifdef JANA_CMT_FAST_RECOVERY 3859 /* 3860 * CMT fast recovery code. Need to debug. 3861 */ 3862 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 3863 if (compare_with_wrap(asoc->last_acked_seq, 3864 net->fast_recovery_tsn, MAX_TSN) || 3865 (asoc->last_acked_seq == net->fast_recovery_tsn) || 3866 compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) || 3867 (net->pseudo_cumack == net->fast_recovery_tsn)) { 3868 net->will_exit_fast_recovery = 1; 3869 } 3870 } 3871 #endif 3872 if (sctp_early_fr) { 3873 /* 3874 * So, first of all do we need to have a Early FR 3875 * timer running? 3876 */ 3877 if (((TAILQ_FIRST(&asoc->sent_queue)) && 3878 (net->ref_count > 1) && 3879 (net->flight_size < net->cwnd)) || 3880 (reneged_all)) { 3881 /* 3882 * yes, so in this case stop it if its 3883 * running, and then restart it. Reneging 3884 * all is a special case where we want to 3885 * run the Early FR timer and then force the 3886 * last few unacked to be sent, causing us 3887 * to illicit a sack with gaps to force out 3888 * the others. 3889 */ 3890 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 3891 SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 3892 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 3893 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 3894 } 3895 SCTP_STAT_INCR(sctps_earlyfrstrid); 3896 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3897 } else { 3898 /* No, stop it if its running */ 3899 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 3900 SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 3901 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 3902 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 3903 } 3904 } 3905 } 3906 /* if nothing was acked on this destination skip it */ 3907 if (net->net_ack == 0) { 3908 #ifdef SCTP_CWND_LOGGING 3909 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 3910 #endif 3911 continue; 3912 } 3913 if (net->net_ack2 > 0) { 3914 /* 3915 * Karn's rule applies to clearing error count, this 3916 * is optional. 3917 */ 3918 net->error_count = 0; 3919 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 3920 SCTP_ADDR_NOT_REACHABLE) { 3921 /* addr came good */ 3922 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 3923 net->dest_state |= SCTP_ADDR_REACHABLE; 3924 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 3925 SCTP_RECEIVED_SACK, (void *)net); 3926 /* now was it the primary? if so restore */ 3927 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 3928 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 3929 } 3930 } 3931 } 3932 #ifdef JANA_CMT_FAST_RECOVERY 3933 /* 3934 * CMT fast recovery code 3935 */ 3936 /* 3937 * if (sctp_cmt_on_off == 1 && 3938 * net->fast_retran_loss_recovery && 3939 * net->will_exit_fast_recovery == 0) { // @@@ Do something 3940 * } else if (sctp_cmt_on_off == 0 && 3941 * asoc->fast_retran_loss_recovery && will_exit == 0) { 3942 */ 3943 #endif 3944 3945 if (asoc->fast_retran_loss_recovery && will_exit == 0 && sctp_cmt_on_off == 0) { 3946 /* 3947 * If we are in loss recovery we skip any cwnd 3948 * update 3949 */ 3950 goto skip_cwnd_update; 3951 } 3952 /* 3953 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 3954 * moved. 3955 */ 3956 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) { 3957 /* If the cumulative ack moved we can proceed */ 3958 if (net->cwnd <= net->ssthresh) { 3959 /* We are in slow start */ 3960 if (net->flight_size + net->net_ack >= 3961 net->cwnd) { 3962 #ifdef SCTP_HIGH_SPEED 3963 sctp_hs_cwnd_increase(stcb, net); 3964 #else 3965 if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) { 3966 net->cwnd += (net->mtu * sctp_L2_abc_variable); 3967 #ifdef SCTP_CWND_MONITOR 3968 sctp_log_cwnd(stcb, net, net->mtu, 3969 SCTP_CWND_LOG_FROM_SS); 3970 #endif 3971 3972 } else { 3973 net->cwnd += net->net_ack; 3974 #ifdef SCTP_CWND_MONITOR 3975 sctp_log_cwnd(stcb, net, net->net_ack, 3976 SCTP_CWND_LOG_FROM_SS); 3977 #endif 3978 3979 } 3980 #endif 3981 } else { 3982 unsigned int dif; 3983 3984 dif = net->cwnd - (net->flight_size + 3985 net->net_ack); 3986 #ifdef SCTP_CWND_LOGGING 3987 sctp_log_cwnd(stcb, net, net->net_ack, 3988 SCTP_CWND_LOG_NOADV_SS); 3989 #endif 3990 } 3991 } else { 3992 /* We are in congestion avoidance */ 3993 if (net->flight_size + net->net_ack >= 3994 net->cwnd) { 3995 /* 3996 * add to pba only if we had a 3997 * cwnd's worth (or so) in flight OR 3998 * the burst limit was applied. 3999 */ 4000 net->partial_bytes_acked += 4001 net->net_ack; 4002 4003 /* 4004 * Do we need to increase (if pba is 4005 * > cwnd)? 4006 */ 4007 if (net->partial_bytes_acked >= 4008 net->cwnd) { 4009 if (net->cwnd < 4010 net->partial_bytes_acked) { 4011 net->partial_bytes_acked -= 4012 net->cwnd; 4013 } else { 4014 net->partial_bytes_acked = 4015 0; 4016 } 4017 net->cwnd += net->mtu; 4018 #ifdef SCTP_CWND_MONITOR 4019 sctp_log_cwnd(stcb, net, net->mtu, 4020 SCTP_CWND_LOG_FROM_CA); 4021 #endif 4022 } 4023 #ifdef SCTP_CWND_LOGGING 4024 else { 4025 sctp_log_cwnd(stcb, net, net->net_ack, 4026 SCTP_CWND_LOG_NOADV_CA); 4027 } 4028 #endif 4029 } else { 4030 unsigned int dif; 4031 4032 #ifdef SCTP_CWND_LOGGING 4033 sctp_log_cwnd(stcb, net, net->net_ack, 4034 SCTP_CWND_LOG_NOADV_CA); 4035 #endif 4036 dif = net->cwnd - (net->flight_size + 4037 net->net_ack); 4038 } 4039 } 4040 } else { 4041 #ifdef SCTP_CWND_LOGGING 4042 sctp_log_cwnd(stcb, net, net->mtu, 4043 SCTP_CWND_LOG_NO_CUMACK); 4044 #endif 4045 } 4046 skip_cwnd_update: 4047 /* 4048 * NOW, according to Karn's rule do we need to restore the 4049 * RTO timer back? Check our net_ack2. If not set then we 4050 * have a ambiguity.. i.e. all data ack'd was sent to more 4051 * than one place. 4052 */ 4053 if (net->net_ack2) { 4054 /* restore any doubled timers */ 4055 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 4056 if (net->RTO < stcb->asoc.minrto) { 4057 net->RTO = stcb->asoc.minrto; 4058 } 4059 if (net->RTO > stcb->asoc.maxrto) { 4060 net->RTO = stcb->asoc.maxrto; 4061 } 4062 } 4063 } 4064 } 4065 4066 static void 4067 sctp_fs_audit(struct sctp_association *asoc) 4068 { 4069 struct sctp_tmit_chunk *chk; 4070 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 4071 4072 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 4073 if (chk->sent < SCTP_DATAGRAM_RESEND) { 4074 inflight++; 4075 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 4076 resend++; 4077 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 4078 inbetween++; 4079 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 4080 above++; 4081 } else { 4082 acked++; 4083 } 4084 } 4085 4086 if ((inflight > 0) || (inbetween > 0)) { 4087 #ifdef INVARIANTS 4088 panic("Flight size-express incorrect? \n"); 4089 #else 4090 printf("Flight size-express incorrect inflight:%d inbetween:%d\n", 4091 inflight, inbetween); 4092 #endif 4093 } 4094 } 4095 4096 4097 static void 4098 sctp_window_probe_recovery(struct sctp_tcb *stcb, 4099 struct sctp_association *asoc, 4100 struct sctp_nets *net, 4101 struct sctp_tmit_chunk *tp1) 4102 { 4103 struct sctp_tmit_chunk *chk; 4104 4105 /* First setup this one and get it moved back */ 4106 tp1->sent = SCTP_DATAGRAM_UNSENT; 4107 tp1->window_probe = 0; 4108 #ifdef SCTP_FLIGHT_LOGGING 4109 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 4110 tp1->whoTo->flight_size, 4111 tp1->book_size, 4112 (uintptr_t) tp1->whoTo, 4113 tp1->rec.data.TSN_seq); 4114 #endif 4115 sctp_flight_size_decrease(tp1); 4116 sctp_total_flight_decrease(stcb, tp1); 4117 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4118 TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next); 4119 asoc->sent_queue_cnt--; 4120 asoc->send_queue_cnt++; 4121 /* 4122 * Now all guys marked for RESEND on the sent_queue must be moved 4123 * back too. 4124 */ 4125 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 4126 if (chk->sent == SCTP_DATAGRAM_RESEND) { 4127 /* Another chunk to move */ 4128 chk->sent = SCTP_DATAGRAM_UNSENT; 4129 chk->window_probe = 0; 4130 /* It should not be in flight */ 4131 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 4132 TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next); 4133 asoc->sent_queue_cnt--; 4134 asoc->send_queue_cnt++; 4135 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4136 } 4137 } 4138 } 4139 4140 4141 void 4142 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 4143 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 4144 { 4145 struct sctp_nets *net; 4146 struct sctp_association *asoc; 4147 struct sctp_tmit_chunk *tp1, *tp2; 4148 uint32_t old_rwnd; 4149 int win_probe_recovery = 0; 4150 int win_probe_recovered = 0; 4151 int j, done_once;; 4152 4153 SCTP_TCB_LOCK_ASSERT(stcb); 4154 asoc = &stcb->asoc; 4155 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) { 4156 /* old ack */ 4157 return; 4158 } 4159 /* First setup for CC stuff */ 4160 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4161 net->prev_cwnd = net->cwnd; 4162 net->net_ack = 0; 4163 net->net_ack2 = 0; 4164 4165 /* 4166 * CMT: Reset CUC and Fast recovery algo variables before 4167 * SACK processing 4168 */ 4169 net->new_pseudo_cumack = 0; 4170 net->will_exit_fast_recovery = 0; 4171 } 4172 if (sctp_strict_sacks) { 4173 uint32_t send_s; 4174 4175 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4176 tp1 = TAILQ_LAST(&asoc->sent_queue, 4177 sctpchunk_listhead); 4178 send_s = tp1->rec.data.TSN_seq + 1; 4179 } else { 4180 send_s = asoc->sending_seq; 4181 } 4182 if ((cumack == send_s) || 4183 compare_with_wrap(cumack, send_s, MAX_TSN)) { 4184 #ifndef INVARIANTS 4185 struct mbuf *oper; 4186 4187 #endif 4188 #ifdef INVARIANTS 4189 panic("Impossible sack 1"); 4190 #else 4191 *abort_now = 1; 4192 /* XXX */ 4193 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4194 0, M_DONTWAIT, 1, MT_DATA); 4195 if (oper) { 4196 struct sctp_paramhdr *ph; 4197 uint32_t *ippp; 4198 4199 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4200 sizeof(uint32_t); 4201 ph = mtod(oper, struct sctp_paramhdr *); 4202 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4203 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4204 ippp = (uint32_t *) (ph + 1); 4205 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4206 } 4207 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4208 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4209 return; 4210 #endif 4211 } 4212 } 4213 old_rwnd = asoc->peers_rwnd; 4214 asoc->this_sack_highest_gap = cumack; 4215 stcb->asoc.overall_error_count = 0; 4216 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) { 4217 /* process the new consecutive TSN first */ 4218 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4219 while (tp1) { 4220 tp2 = TAILQ_NEXT(tp1, sctp_next); 4221 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 4222 MAX_TSN) || 4223 cumack == tp1->rec.data.TSN_seq) { 4224 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4225 /* 4226 * ECN Nonce: Add the nonce to the 4227 * sender's nonce sum 4228 */ 4229 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4230 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4231 /* 4232 * If it is less than ACKED, 4233 * it is now no-longer in 4234 * flight. Higher values may 4235 * occur during marking 4236 */ 4237 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4238 #ifdef SCTP_FLIGHT_LOGGING 4239 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4240 tp1->whoTo->flight_size, 4241 tp1->book_size, 4242 (uintptr_t) tp1->whoTo, 4243 tp1->rec.data.TSN_seq); 4244 #endif 4245 4246 sctp_flight_size_decrease(tp1); 4247 sctp_total_flight_decrease(stcb, tp1); 4248 } 4249 tp1->whoTo->net_ack += tp1->send_size; 4250 if (tp1->snd_count < 2) { 4251 /* 4252 * True 4253 * non-retransmited 4254 * chunk 4255 */ 4256 tp1->whoTo->net_ack2 += 4257 tp1->send_size; 4258 4259 /* update RTO too? */ 4260 if (tp1->do_rtt) { 4261 tp1->whoTo->RTO = 4262 sctp_calculate_rto(stcb, 4263 asoc, tp1->whoTo, 4264 &tp1->sent_rcv_time); 4265 tp1->do_rtt = 0; 4266 } 4267 } 4268 /* 4269 * CMT: CUCv2 algorithm. 4270 * From the cumack'd TSNs, 4271 * for each TSN being acked 4272 * for the first time, set 4273 * the following variables 4274 * for the corresp 4275 * destination. 4276 * new_pseudo_cumack will 4277 * trigger a cwnd update. 4278 * find_(rtx_)pseudo_cumack 4279 * will trigger search for 4280 * the next expected 4281 * (rtx-)pseudo-cumack. 4282 */ 4283 tp1->whoTo->new_pseudo_cumack = 1; 4284 tp1->whoTo->find_pseudo_cumack = 1; 4285 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4286 4287 #ifdef SCTP_CWND_LOGGING 4288 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4289 #endif 4290 } 4291 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4292 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4293 } 4294 if (tp1->rec.data.chunk_was_revoked) { 4295 /* deflate the cwnd */ 4296 tp1->whoTo->cwnd -= tp1->book_size; 4297 tp1->rec.data.chunk_was_revoked = 0; 4298 } 4299 tp1->sent = SCTP_DATAGRAM_ACKED; 4300 } 4301 } else { 4302 break; 4303 } 4304 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4305 if (tp1->data) { 4306 sctp_free_bufspace(stcb, asoc, tp1, 1); 4307 sctp_m_freem(tp1->data); 4308 } 4309 #ifdef SCTP_SACK_LOGGING 4310 sctp_log_sack(asoc->last_acked_seq, 4311 cumack, 4312 tp1->rec.data.TSN_seq, 4313 0, 4314 0, 4315 SCTP_LOG_FREE_SENT); 4316 #endif 4317 tp1->data = NULL; 4318 asoc->sent_queue_cnt--; 4319 sctp_free_remote_addr(tp1->whoTo); 4320 sctp_free_a_chunk(stcb, tp1); 4321 tp1 = tp2; 4322 } 4323 } 4324 if (stcb->sctp_socket) { 4325 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4326 #ifdef SCTP_WAKE_LOGGING 4327 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 4328 #endif 4329 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4330 #ifdef SCTP_WAKE_LOGGING 4331 } else { 4332 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 4333 #endif 4334 } 4335 4336 4337 if (asoc->last_acked_seq != cumack) 4338 sctp_cwnd_update(stcb, asoc, 1, 0, 0); 4339 4340 asoc->last_acked_seq = cumack; 4341 4342 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4343 /* nothing left in-flight */ 4344 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4345 net->flight_size = 0; 4346 net->partial_bytes_acked = 0; 4347 } 4348 asoc->total_flight = 0; 4349 asoc->total_flight_count = 0; 4350 } 4351 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4352 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4353 asoc->advanced_peer_ack_point = cumack; 4354 } 4355 /* ECN Nonce updates */ 4356 if (asoc->ecn_nonce_allowed) { 4357 if (asoc->nonce_sum_check) { 4358 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4359 if (asoc->nonce_wait_for_ecne == 0) { 4360 struct sctp_tmit_chunk *lchk; 4361 4362 lchk = TAILQ_FIRST(&asoc->send_queue); 4363 asoc->nonce_wait_for_ecne = 1; 4364 if (lchk) { 4365 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4366 } else { 4367 asoc->nonce_wait_tsn = asoc->sending_seq; 4368 } 4369 } else { 4370 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4371 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4372 /* 4373 * Misbehaving peer. We need 4374 * to react to this guy 4375 */ 4376 asoc->ecn_allowed = 0; 4377 asoc->ecn_nonce_allowed = 0; 4378 } 4379 } 4380 } 4381 } else { 4382 /* See if Resynchronization Possible */ 4383 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4384 asoc->nonce_sum_check = 1; 4385 /* 4386 * now we must calculate what the base is. 4387 * We do this based on two things, we know 4388 * the total's for all the segments 4389 * gap-acked in the SACK (none), We also 4390 * know the SACK's nonce sum, its in 4391 * nonce_sum_flag. So we can build a truth 4392 * table to back-calculate the new value of 4393 * asoc->nonce_sum_expect_base: 4394 * 4395 * SACK-flag-Value Seg-Sums Base 0 0 0 4396 * 1 0 1 0 1 1 1 4397 * 1 0 4398 */ 4399 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4400 } 4401 } 4402 } 4403 /* RWND update */ 4404 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4405 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4406 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4407 /* SWS sender side engages */ 4408 asoc->peers_rwnd = 0; 4409 } 4410 if (asoc->peers_rwnd > old_rwnd) { 4411 win_probe_recovery = 1; 4412 } 4413 /* Now assure a timer where data is queued at */ 4414 done_once = 0; 4415 again: 4416 j = 0; 4417 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4418 if (win_probe_recovery && (net->window_probe)) { 4419 net->window_probe = 0; 4420 win_probe_recovered = 1; 4421 /* 4422 * Find first chunk that was used with window probe 4423 * and clear the sent 4424 */ 4425 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4426 if (tp1->window_probe) { 4427 /* move back to data send queue */ 4428 sctp_window_probe_recovery(stcb, asoc, net, tp1); 4429 break; 4430 } 4431 } 4432 } 4433 if (net->flight_size) { 4434 int to_ticks; 4435 4436 if (net->RTO == 0) { 4437 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4438 } else { 4439 to_ticks = MSEC_TO_TICKS(net->RTO); 4440 } 4441 j++; 4442 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4443 sctp_timeout_handler, &net->rxt_timer); 4444 } else { 4445 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4446 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4447 stcb, net, 4448 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4449 } 4450 if (sctp_early_fr) { 4451 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4452 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4453 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4454 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4455 } 4456 } 4457 } 4458 } 4459 if ((j == 0) && 4460 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4461 (asoc->sent_queue_retran_cnt == 0) && 4462 (win_probe_recovered == 0) && 4463 (done_once == 0)) { 4464 /* huh, this should not happen */ 4465 sctp_fs_audit(asoc); 4466 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4467 net->flight_size = 0; 4468 } 4469 asoc->total_flight = 0; 4470 asoc->total_flight_count = 0; 4471 asoc->sent_queue_retran_cnt = 0; 4472 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4473 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4474 sctp_flight_size_increase(tp1); 4475 sctp_total_flight_increase(stcb, tp1); 4476 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4477 asoc->sent_queue_retran_cnt++; 4478 } 4479 } 4480 done_once = 1; 4481 goto again; 4482 } 4483 /**********************************/ 4484 /* Now what about shutdown issues */ 4485 /**********************************/ 4486 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4487 /* nothing left on sendqueue.. consider done */ 4488 /* clean up */ 4489 if ((asoc->stream_queue_cnt == 1) && 4490 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4491 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4492 (asoc->locked_on_sending) 4493 ) { 4494 struct sctp_stream_queue_pending *sp; 4495 4496 /* 4497 * I may be in a state where we got all across.. but 4498 * cannot write more due to a shutdown... we abort 4499 * since the user did not indicate EOR in this case. 4500 * The sp will be cleaned during free of the asoc. 4501 */ 4502 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4503 sctp_streamhead); 4504 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4505 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4506 asoc->locked_on_sending = NULL; 4507 asoc->stream_queue_cnt--; 4508 } 4509 } 4510 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4511 (asoc->stream_queue_cnt == 0)) { 4512 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4513 /* Need to abort here */ 4514 struct mbuf *oper; 4515 4516 abort_out_now: 4517 *abort_now = 1; 4518 /* XXX */ 4519 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4520 0, M_DONTWAIT, 1, MT_DATA); 4521 if (oper) { 4522 struct sctp_paramhdr *ph; 4523 uint32_t *ippp; 4524 4525 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4526 sizeof(uint32_t); 4527 ph = mtod(oper, struct sctp_paramhdr *); 4528 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4529 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4530 ippp = (uint32_t *) (ph + 1); 4531 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4532 } 4533 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4534 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4535 } else { 4536 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4537 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4538 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4539 } 4540 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4541 sctp_stop_timers_for_shutdown(stcb); 4542 sctp_send_shutdown(stcb, 4543 stcb->asoc.primary_destination); 4544 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4545 stcb->sctp_ep, stcb, asoc->primary_destination); 4546 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4547 stcb->sctp_ep, stcb, asoc->primary_destination); 4548 } 4549 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4550 (asoc->stream_queue_cnt == 0)) { 4551 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4552 goto abort_out_now; 4553 } 4554 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4555 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4556 sctp_send_shutdown_ack(stcb, 4557 stcb->asoc.primary_destination); 4558 4559 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4560 stcb->sctp_ep, stcb, asoc->primary_destination); 4561 } 4562 } 4563 #ifdef SCTP_SACK_RWND_LOGGING 4564 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4565 rwnd, 4566 stcb->asoc.peers_rwnd, 4567 stcb->asoc.total_flight, 4568 stcb->asoc.total_output_queue_size); 4569 4570 #endif 4571 } 4572 4573 4574 4575 void 4576 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4577 struct sctp_nets *net_from, int *abort_now) 4578 { 4579 struct sctp_association *asoc; 4580 struct sctp_sack *sack; 4581 struct sctp_tmit_chunk *tp1, *tp2; 4582 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4583 this_sack_lowest_newack; 4584 uint32_t sav_cum_ack; 4585 uint16_t num_seg, num_dup; 4586 uint16_t wake_him = 0; 4587 unsigned int sack_length; 4588 uint32_t send_s = 0; 4589 long j; 4590 int accum_moved = 0; 4591 int will_exit_fast_recovery = 0; 4592 uint32_t a_rwnd, old_rwnd; 4593 int win_probe_recovery = 0; 4594 int win_probe_recovered = 0; 4595 struct sctp_nets *net = NULL; 4596 int nonce_sum_flag, ecn_seg_sums = 0; 4597 int done_once; 4598 uint8_t reneged_all = 0; 4599 uint8_t cmt_dac_flag; 4600 4601 /* 4602 * we take any chance we can to service our queues since we cannot 4603 * get awoken when the socket is read from :< 4604 */ 4605 /* 4606 * Now perform the actual SACK handling: 1) Verify that it is not an 4607 * old sack, if so discard. 2) If there is nothing left in the send 4608 * queue (cum-ack is equal to last acked) then you have a duplicate 4609 * too, update any rwnd change and verify no timers are running. 4610 * then return. 3) Process any new consequtive data i.e. cum-ack 4611 * moved process these first and note that it moved. 4) Process any 4612 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4613 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4614 * sync up flightsizes and things, stop all timers and also check 4615 * for shutdown_pending state. If so then go ahead and send off the 4616 * shutdown. If in shutdown recv, send off the shutdown-ack and 4617 * start that timer, Ret. 9) Strike any non-acked things and do FR 4618 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4619 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4620 * if in shutdown_recv state. 4621 */ 4622 SCTP_TCB_LOCK_ASSERT(stcb); 4623 sack = &ch->sack; 4624 /* CMT DAC algo */ 4625 this_sack_lowest_newack = 0; 4626 j = 0; 4627 sack_length = ntohs(ch->ch.chunk_length); 4628 if (sack_length < sizeof(struct sctp_sack_chunk)) { 4629 #ifdef SCTP_DEBUG 4630 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 4631 printf("Bad size on sack chunk .. to small\n"); 4632 } 4633 #endif 4634 return; 4635 } 4636 /* ECN Nonce */ 4637 SCTP_STAT_INCR(sctps_slowpath_sack); 4638 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4639 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4640 num_seg = ntohs(sack->num_gap_ack_blks); 4641 a_rwnd = (uint32_t) ntohl(sack->a_rwnd); 4642 4643 /* CMT DAC algo */ 4644 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4645 num_dup = ntohs(sack->num_dup_tsns); 4646 4647 old_rwnd = stcb->asoc.peers_rwnd; 4648 stcb->asoc.overall_error_count = 0; 4649 asoc = &stcb->asoc; 4650 #ifdef SCTP_SACK_LOGGING 4651 sctp_log_sack(asoc->last_acked_seq, 4652 cum_ack, 4653 0, 4654 num_seg, 4655 num_dup, 4656 SCTP_LOG_NEW_SACK); 4657 #endif 4658 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 4659 if (num_dup) { 4660 int off_to_dup, iii; 4661 uint32_t *dupdata; 4662 4663 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4664 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4665 dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup); 4666 for (iii = 0; iii < num_dup; iii++) { 4667 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4668 dupdata++; 4669 4670 } 4671 } else { 4672 printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4673 off_to_dup, num_dup, sack_length, num_seg); 4674 } 4675 } 4676 #endif 4677 if (sctp_strict_sacks) { 4678 /* reality check */ 4679 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4680 tp1 = TAILQ_LAST(&asoc->sent_queue, 4681 sctpchunk_listhead); 4682 send_s = tp1->rec.data.TSN_seq + 1; 4683 } else { 4684 send_s = asoc->sending_seq; 4685 } 4686 if (cum_ack == send_s || 4687 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4688 #ifndef INVARIANTS 4689 struct mbuf *oper; 4690 4691 #endif 4692 #ifdef INVARIANTS 4693 hopeless_peer: 4694 panic("Impossible sack 1"); 4695 #else 4696 4697 4698 /* 4699 * no way, we have not even sent this TSN out yet. 4700 * Peer is hopelessly messed up with us. 4701 */ 4702 hopeless_peer: 4703 *abort_now = 1; 4704 /* XXX */ 4705 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4706 0, M_DONTWAIT, 1, MT_DATA); 4707 if (oper) { 4708 struct sctp_paramhdr *ph; 4709 uint32_t *ippp; 4710 4711 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4712 sizeof(uint32_t); 4713 ph = mtod(oper, struct sctp_paramhdr *); 4714 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4715 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4716 ippp = (uint32_t *) (ph + 1); 4717 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4718 } 4719 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4720 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4721 return; 4722 #endif 4723 } 4724 } 4725 /**********************/ 4726 /* 1) check the range */ 4727 /**********************/ 4728 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4729 /* acking something behind */ 4730 return; 4731 } 4732 sav_cum_ack = asoc->last_acked_seq; 4733 4734 /* update the Rwnd of the peer */ 4735 if (TAILQ_EMPTY(&asoc->sent_queue) && 4736 TAILQ_EMPTY(&asoc->send_queue) && 4737 (asoc->stream_queue_cnt == 0) 4738 ) { 4739 /* nothing left on send/sent and strmq */ 4740 #ifdef SCTP_LOG_RWND 4741 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4742 asoc->peers_rwnd, 0, 0, a_rwnd); 4743 #endif 4744 asoc->peers_rwnd = a_rwnd; 4745 if (asoc->sent_queue_retran_cnt) { 4746 asoc->sent_queue_retran_cnt = 0; 4747 } 4748 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4749 /* SWS sender side engages */ 4750 asoc->peers_rwnd = 0; 4751 } 4752 /* stop any timers */ 4753 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4754 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4755 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4756 if (sctp_early_fr) { 4757 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4758 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4759 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4760 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4761 } 4762 } 4763 net->partial_bytes_acked = 0; 4764 net->flight_size = 0; 4765 } 4766 asoc->total_flight = 0; 4767 asoc->total_flight_count = 0; 4768 return; 4769 } 4770 /* 4771 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4772 * things. The total byte count acked is tracked in netAckSz AND 4773 * netAck2 is used to track the total bytes acked that are un- 4774 * amibguious and were never retransmitted. We track these on a per 4775 * destination address basis. 4776 */ 4777 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4778 net->prev_cwnd = net->cwnd; 4779 net->net_ack = 0; 4780 net->net_ack2 = 0; 4781 4782 /* 4783 * CMT: Reset CUC and Fast recovery algo variables before 4784 * SACK processing 4785 */ 4786 net->new_pseudo_cumack = 0; 4787 net->will_exit_fast_recovery = 0; 4788 } 4789 /* process the new consecutive TSN first */ 4790 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4791 while (tp1) { 4792 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4793 MAX_TSN) || 4794 last_tsn == tp1->rec.data.TSN_seq) { 4795 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4796 /* 4797 * ECN Nonce: Add the nonce to the sender's 4798 * nonce sum 4799 */ 4800 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4801 accum_moved = 1; 4802 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4803 /* 4804 * If it is less than ACKED, it is 4805 * now no-longer in flight. Higher 4806 * values may occur during marking 4807 */ 4808 if ((tp1->whoTo->dest_state & 4809 SCTP_ADDR_UNCONFIRMED) && 4810 (tp1->snd_count < 2)) { 4811 /* 4812 * If there was no retran 4813 * and the address is 4814 * un-confirmed and we sent 4815 * there and are now 4816 * sacked.. its confirmed, 4817 * mark it so. 4818 */ 4819 tp1->whoTo->dest_state &= 4820 ~SCTP_ADDR_UNCONFIRMED; 4821 } 4822 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4823 #ifdef SCTP_FLIGHT_LOGGING 4824 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4825 tp1->whoTo->flight_size, 4826 tp1->book_size, 4827 (uintptr_t) tp1->whoTo, 4828 tp1->rec.data.TSN_seq); 4829 #endif 4830 sctp_flight_size_decrease(tp1); 4831 sctp_total_flight_decrease(stcb, tp1); 4832 } 4833 tp1->whoTo->net_ack += tp1->send_size; 4834 4835 /* CMT SFR and DAC algos */ 4836 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4837 tp1->whoTo->saw_newack = 1; 4838 4839 if (tp1->snd_count < 2) { 4840 /* 4841 * True non-retransmited 4842 * chunk 4843 */ 4844 tp1->whoTo->net_ack2 += 4845 tp1->send_size; 4846 4847 /* update RTO too? */ 4848 if (tp1->do_rtt) { 4849 tp1->whoTo->RTO = 4850 sctp_calculate_rto(stcb, 4851 asoc, tp1->whoTo, 4852 &tp1->sent_rcv_time); 4853 tp1->do_rtt = 0; 4854 } 4855 } 4856 /* 4857 * CMT: CUCv2 algorithm. From the 4858 * cumack'd TSNs, for each TSN being 4859 * acked for the first time, set the 4860 * following variables for the 4861 * corresp destination. 4862 * new_pseudo_cumack will trigger a 4863 * cwnd update. 4864 * find_(rtx_)pseudo_cumack will 4865 * trigger search for the next 4866 * expected (rtx-)pseudo-cumack. 4867 */ 4868 tp1->whoTo->new_pseudo_cumack = 1; 4869 tp1->whoTo->find_pseudo_cumack = 1; 4870 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4871 4872 4873 #ifdef SCTP_SACK_LOGGING 4874 sctp_log_sack(asoc->last_acked_seq, 4875 cum_ack, 4876 tp1->rec.data.TSN_seq, 4877 0, 4878 0, 4879 SCTP_LOG_TSN_ACKED); 4880 #endif 4881 #ifdef SCTP_CWND_LOGGING 4882 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4883 #endif 4884 } 4885 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4886 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4887 #ifdef SCTP_AUDITING_ENABLED 4888 sctp_audit_log(0xB3, 4889 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4890 #endif 4891 } 4892 if (tp1->rec.data.chunk_was_revoked) { 4893 /* deflate the cwnd */ 4894 tp1->whoTo->cwnd -= tp1->book_size; 4895 tp1->rec.data.chunk_was_revoked = 0; 4896 } 4897 tp1->sent = SCTP_DATAGRAM_ACKED; 4898 } 4899 } else { 4900 break; 4901 } 4902 tp1 = TAILQ_NEXT(tp1, sctp_next); 4903 } 4904 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4905 /* always set this up to cum-ack */ 4906 asoc->this_sack_highest_gap = last_tsn; 4907 4908 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4909 4910 /* skip corrupt segments */ 4911 goto skip_segments; 4912 } 4913 if (num_seg > 0) { 4914 4915 /* 4916 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4917 * to be greater than the cumack. Also reset saw_newack to 0 4918 * for all dests. 4919 */ 4920 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4921 net->saw_newack = 0; 4922 net->this_sack_highest_newack = last_tsn; 4923 } 4924 4925 /* 4926 * thisSackHighestGap will increase while handling NEW 4927 * segments this_sack_highest_newack will increase while 4928 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4929 * used for CMT DAC algo. saw_newack will also change. 4930 */ 4931 sctp_handle_segments(stcb, asoc, ch, last_tsn, 4932 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4933 num_seg, &ecn_seg_sums); 4934 4935 if (sctp_strict_sacks) { 4936 /* 4937 * validate the biggest_tsn_acked in the gap acks if 4938 * strict adherence is wanted. 4939 */ 4940 if ((biggest_tsn_acked == send_s) || 4941 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4942 /* 4943 * peer is either confused or we are under 4944 * attack. We must abort. 4945 */ 4946 goto hopeless_peer; 4947 } 4948 } 4949 } 4950 skip_segments: 4951 /*******************************************/ 4952 /* cancel ALL T3-send timer if accum moved */ 4953 /*******************************************/ 4954 if (sctp_cmt_on_off) { 4955 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4956 if (net->new_pseudo_cumack) 4957 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4958 stcb, net, 4959 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4960 4961 } 4962 } else { 4963 if (accum_moved) { 4964 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4965 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4966 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4967 } 4968 } 4969 } 4970 /********************************************/ 4971 /* drop the acked chunks from the sendqueue */ 4972 /********************************************/ 4973 asoc->last_acked_seq = cum_ack; 4974 4975 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4976 if (tp1 == NULL) 4977 goto done_with_it; 4978 do { 4979 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4980 MAX_TSN)) { 4981 break; 4982 } 4983 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4984 /* no more sent on list */ 4985 break; 4986 } 4987 tp2 = TAILQ_NEXT(tp1, sctp_next); 4988 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4989 /* 4990 * Friendlier printf in lieu of panic now that I think its 4991 * fixed 4992 */ 4993 4994 if (tp1->pr_sctp_on) { 4995 if (asoc->pr_sctp_cnt != 0) 4996 asoc->pr_sctp_cnt--; 4997 } 4998 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4999 (asoc->total_flight > 0)) { 5000 #ifdef INVARIANTS 5001 panic("Warning flight size is postive and should be 0"); 5002 #else 5003 5004 printf("Warning flight size incorrect should be 0 is %d\n", 5005 asoc->total_flight); 5006 #endif 5007 asoc->total_flight = 0; 5008 } 5009 if (tp1->data) { 5010 sctp_free_bufspace(stcb, asoc, tp1, 1); 5011 sctp_m_freem(tp1->data); 5012 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 5013 asoc->sent_queue_cnt_removeable--; 5014 } 5015 } 5016 #ifdef SCTP_SACK_LOGGING 5017 sctp_log_sack(asoc->last_acked_seq, 5018 cum_ack, 5019 tp1->rec.data.TSN_seq, 5020 0, 5021 0, 5022 SCTP_LOG_FREE_SENT); 5023 #endif 5024 tp1->data = NULL; 5025 asoc->sent_queue_cnt--; 5026 sctp_free_remote_addr(tp1->whoTo); 5027 5028 sctp_free_a_chunk(stcb, tp1); 5029 wake_him++; 5030 tp1 = tp2; 5031 } while (tp1 != NULL); 5032 5033 done_with_it: 5034 if ((wake_him) && (stcb->sctp_socket)) { 5035 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 5036 #ifdef SCTP_WAKE_LOGGING 5037 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 5038 #endif 5039 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 5040 #ifdef SCTP_WAKE_LOGGING 5041 } else { 5042 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 5043 #endif 5044 } 5045 5046 if (asoc->fast_retran_loss_recovery && accum_moved) { 5047 if (compare_with_wrap(asoc->last_acked_seq, 5048 asoc->fast_recovery_tsn, MAX_TSN) || 5049 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 5050 /* Setup so we will exit RFC2582 fast recovery */ 5051 will_exit_fast_recovery = 1; 5052 } 5053 } 5054 /* 5055 * Check for revoked fragments: 5056 * 5057 * if Previous sack - Had no frags then we can't have any revoked if 5058 * Previous sack - Had frag's then - If we now have frags aka 5059 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 5060 * some of them. else - The peer revoked all ACKED fragments, since 5061 * we had some before and now we have NONE. 5062 */ 5063 5064 if (num_seg) 5065 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 5066 else if (asoc->saw_sack_with_frags) { 5067 int cnt_revoked = 0; 5068 5069 tp1 = TAILQ_FIRST(&asoc->sent_queue); 5070 if (tp1 != NULL) { 5071 /* Peer revoked all dg's marked or acked */ 5072 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5073 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 5074 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 5075 tp1->sent = SCTP_DATAGRAM_SENT; 5076 #ifdef SCTP_FLIGHT_LOGGING 5077 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 5078 tp1->whoTo->flight_size, 5079 tp1->book_size, 5080 (uintptr_t) tp1->whoTo, 5081 tp1->rec.data.TSN_seq); 5082 #endif 5083 sctp_flight_size_increase(tp1); 5084 sctp_total_flight_increase(stcb, tp1); 5085 tp1->rec.data.chunk_was_revoked = 1; 5086 /* 5087 * To ensure that this increase in 5088 * flightsize, which is artificial, 5089 * does not throttle the sender, we 5090 * also increase the cwnd 5091 * artificially. 5092 */ 5093 tp1->whoTo->cwnd += tp1->book_size; 5094 cnt_revoked++; 5095 } 5096 } 5097 if (cnt_revoked) { 5098 reneged_all = 1; 5099 } 5100 } 5101 asoc->saw_sack_with_frags = 0; 5102 } 5103 if (num_seg) 5104 asoc->saw_sack_with_frags = 1; 5105 else 5106 asoc->saw_sack_with_frags = 0; 5107 5108 5109 sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 5110 5111 if (TAILQ_EMPTY(&asoc->sent_queue)) { 5112 /* nothing left in-flight */ 5113 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5114 /* stop all timers */ 5115 if (sctp_early_fr) { 5116 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 5117 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 5118 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5119 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 5120 } 5121 } 5122 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5123 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 5124 net->flight_size = 0; 5125 net->partial_bytes_acked = 0; 5126 } 5127 asoc->total_flight = 0; 5128 asoc->total_flight_count = 0; 5129 } 5130 /**********************************/ 5131 /* Now what about shutdown issues */ 5132 /**********************************/ 5133 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 5134 /* nothing left on sendqueue.. consider done */ 5135 #ifdef SCTP_LOG_RWND 5136 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5137 asoc->peers_rwnd, 0, 0, a_rwnd); 5138 #endif 5139 asoc->peers_rwnd = a_rwnd; 5140 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5141 /* SWS sender side engages */ 5142 asoc->peers_rwnd = 0; 5143 } 5144 /* clean up */ 5145 if ((asoc->stream_queue_cnt == 1) && 5146 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5147 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 5148 (asoc->locked_on_sending) 5149 ) { 5150 struct sctp_stream_queue_pending *sp; 5151 5152 /* 5153 * I may be in a state where we got all across.. but 5154 * cannot write more due to a shutdown... we abort 5155 * since the user did not indicate EOR in this case. 5156 */ 5157 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 5158 sctp_streamhead); 5159 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 5160 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 5161 asoc->locked_on_sending = NULL; 5162 asoc->stream_queue_cnt--; 5163 } 5164 } 5165 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5166 (asoc->stream_queue_cnt == 0)) { 5167 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 5168 /* Need to abort here */ 5169 struct mbuf *oper; 5170 5171 abort_out_now: 5172 *abort_now = 1; 5173 /* XXX */ 5174 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 5175 0, M_DONTWAIT, 1, MT_DATA); 5176 if (oper) { 5177 struct sctp_paramhdr *ph; 5178 uint32_t *ippp; 5179 5180 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 5181 sizeof(uint32_t); 5182 ph = mtod(oper, struct sctp_paramhdr *); 5183 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 5184 ph->param_length = htons(SCTP_BUF_LEN(oper)); 5185 ippp = (uint32_t *) (ph + 1); 5186 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 5187 } 5188 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 5189 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 5190 return; 5191 } else { 5192 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 5193 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5194 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5195 } 5196 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 5197 sctp_stop_timers_for_shutdown(stcb); 5198 sctp_send_shutdown(stcb, 5199 stcb->asoc.primary_destination); 5200 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5201 stcb->sctp_ep, stcb, asoc->primary_destination); 5202 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5203 stcb->sctp_ep, stcb, asoc->primary_destination); 5204 } 5205 return; 5206 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5207 (asoc->stream_queue_cnt == 0)) { 5208 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 5209 goto abort_out_now; 5210 } 5211 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5212 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 5213 sctp_send_shutdown_ack(stcb, 5214 stcb->asoc.primary_destination); 5215 5216 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5217 stcb->sctp_ep, stcb, asoc->primary_destination); 5218 return; 5219 } 5220 } 5221 /* 5222 * Now here we are going to recycle net_ack for a different use... 5223 * HEADS UP. 5224 */ 5225 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5226 net->net_ack = 0; 5227 } 5228 5229 /* 5230 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5231 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5232 * automatically ensure that. 5233 */ 5234 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) { 5235 this_sack_lowest_newack = cum_ack; 5236 } 5237 if (num_seg > 0) { 5238 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5239 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5240 } 5241 /*********************************************/ 5242 /* Here we perform PR-SCTP procedures */ 5243 /* (section 4.2) */ 5244 /*********************************************/ 5245 /* C1. update advancedPeerAckPoint */ 5246 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 5247 asoc->advanced_peer_ack_point = cum_ack; 5248 } 5249 /* C2. try to further move advancedPeerAckPoint ahead */ 5250 5251 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 5252 struct sctp_tmit_chunk *lchk; 5253 5254 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5255 /* C3. See if we need to send a Fwd-TSN */ 5256 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 5257 MAX_TSN)) { 5258 /* 5259 * ISSUE with ECN, see FWD-TSN processing for notes 5260 * on issues that will occur when the ECN NONCE 5261 * stuff is put into SCTP for cross checking. 5262 */ 5263 send_forward_tsn(stcb, asoc); 5264 5265 /* 5266 * ECN Nonce: Disable Nonce Sum check when FWD TSN 5267 * is sent and store resync tsn 5268 */ 5269 asoc->nonce_sum_check = 0; 5270 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 5271 if (lchk) { 5272 /* Assure a timer is up */ 5273 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5274 stcb->sctp_ep, stcb, lchk->whoTo); 5275 } 5276 } 5277 } 5278 /* 5279 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) && 5280 * (net->fast_retran_loss_recovery == 0))) 5281 */ 5282 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5283 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) { 5284 /* out of a RFC2582 Fast recovery window? */ 5285 if (net->net_ack > 0) { 5286 /* 5287 * per section 7.2.3, are there any 5288 * destinations that had a fast retransmit 5289 * to them. If so what we need to do is 5290 * adjust ssthresh and cwnd. 5291 */ 5292 struct sctp_tmit_chunk *lchk; 5293 5294 #ifdef SCTP_HIGH_SPEED 5295 sctp_hs_cwnd_decrease(stcb, net); 5296 #else 5297 #ifdef SCTP_CWND_MONITOR 5298 int old_cwnd = net->cwnd; 5299 5300 #endif 5301 net->ssthresh = net->cwnd / 2; 5302 if (net->ssthresh < (net->mtu * 2)) { 5303 net->ssthresh = 2 * net->mtu; 5304 } 5305 net->cwnd = net->ssthresh; 5306 #ifdef SCTP_CWND_MONITOR 5307 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 5308 SCTP_CWND_LOG_FROM_FR); 5309 #endif 5310 #endif 5311 5312 lchk = TAILQ_FIRST(&asoc->send_queue); 5313 5314 net->partial_bytes_acked = 0; 5315 /* Turn on fast recovery window */ 5316 asoc->fast_retran_loss_recovery = 1; 5317 if (lchk == NULL) { 5318 /* Mark end of the window */ 5319 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 5320 } else { 5321 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5322 } 5323 5324 /* 5325 * CMT fast recovery -- per destination 5326 * recovery variable. 5327 */ 5328 net->fast_retran_loss_recovery = 1; 5329 5330 if (lchk == NULL) { 5331 /* Mark end of the window */ 5332 net->fast_recovery_tsn = asoc->sending_seq - 1; 5333 } else { 5334 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5335 } 5336 5337 5338 5339 /* 5340 * Disable Nonce Sum Checking and store the 5341 * resync tsn 5342 */ 5343 asoc->nonce_sum_check = 0; 5344 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 5345 5346 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 5347 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5348 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5349 stcb->sctp_ep, stcb, net); 5350 } 5351 } else if (net->net_ack > 0) { 5352 /* 5353 * Mark a peg that we WOULD have done a cwnd 5354 * reduction but RFC2582 prevented this action. 5355 */ 5356 SCTP_STAT_INCR(sctps_fastretransinrtt); 5357 } 5358 } 5359 5360 5361 /****************************************************************** 5362 * Here we do the stuff with ECN Nonce checking. 5363 * We basically check to see if the nonce sum flag was incorrect 5364 * or if resynchronization needs to be done. Also if we catch a 5365 * misbehaving receiver we give him the kick. 5366 ******************************************************************/ 5367 5368 if (asoc->ecn_nonce_allowed) { 5369 if (asoc->nonce_sum_check) { 5370 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 5371 if (asoc->nonce_wait_for_ecne == 0) { 5372 struct sctp_tmit_chunk *lchk; 5373 5374 lchk = TAILQ_FIRST(&asoc->send_queue); 5375 asoc->nonce_wait_for_ecne = 1; 5376 if (lchk) { 5377 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 5378 } else { 5379 asoc->nonce_wait_tsn = asoc->sending_seq; 5380 } 5381 } else { 5382 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 5383 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 5384 /* 5385 * Misbehaving peer. We need 5386 * to react to this guy 5387 */ 5388 asoc->ecn_allowed = 0; 5389 asoc->ecn_nonce_allowed = 0; 5390 } 5391 } 5392 } 5393 } else { 5394 /* See if Resynchronization Possible */ 5395 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 5396 asoc->nonce_sum_check = 1; 5397 /* 5398 * now we must calculate what the base is. 5399 * We do this based on two things, we know 5400 * the total's for all the segments 5401 * gap-acked in the SACK, its stored in 5402 * ecn_seg_sums. We also know the SACK's 5403 * nonce sum, its in nonce_sum_flag. So we 5404 * can build a truth table to back-calculate 5405 * the new value of 5406 * asoc->nonce_sum_expect_base: 5407 * 5408 * SACK-flag-Value Seg-Sums Base 0 0 0 5409 * 1 0 1 0 1 1 1 5410 * 1 0 5411 */ 5412 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 5413 } 5414 } 5415 } 5416 /* Now are we exiting loss recovery ? */ 5417 if (will_exit_fast_recovery) { 5418 /* Ok, we must exit fast recovery */ 5419 asoc->fast_retran_loss_recovery = 0; 5420 } 5421 if ((asoc->sat_t3_loss_recovery) && 5422 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 5423 MAX_TSN) || 5424 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 5425 /* end satellite t3 loss recovery */ 5426 asoc->sat_t3_loss_recovery = 0; 5427 } 5428 /* 5429 * CMT Fast recovery 5430 */ 5431 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5432 if (net->will_exit_fast_recovery) { 5433 /* Ok, we must exit fast recovery */ 5434 net->fast_retran_loss_recovery = 0; 5435 } 5436 } 5437 5438 /* Adjust and set the new rwnd value */ 5439 #ifdef SCTP_LOG_RWND 5440 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5441 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 5442 #endif 5443 5444 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5445 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 5446 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5447 /* SWS sender side engages */ 5448 asoc->peers_rwnd = 0; 5449 } 5450 if (asoc->peers_rwnd > old_rwnd) { 5451 win_probe_recovery = 1; 5452 } 5453 /* 5454 * Now we must setup so we have a timer up for anyone with 5455 * outstanding data. 5456 */ 5457 done_once = 0; 5458 again: 5459 j = 0; 5460 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5461 if (win_probe_recovery && (net->window_probe)) { 5462 net->window_probe = 0; 5463 win_probe_recovered = 1; 5464 /*- 5465 * Find first chunk that was used with 5466 * window probe and clear the event. Put 5467 * it back into the send queue as if has 5468 * not been sent. 5469 */ 5470 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5471 if (tp1->window_probe) { 5472 sctp_window_probe_recovery(stcb, asoc, net, tp1); 5473 break; 5474 } 5475 } 5476 } 5477 if (net->flight_size) { 5478 j++; 5479 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5480 stcb->sctp_ep, stcb, net); 5481 } else { 5482 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5483 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5484 stcb, net, 5485 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 5486 } 5487 if (sctp_early_fr) { 5488 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 5489 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 5490 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5491 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 5492 } 5493 } 5494 } 5495 } 5496 if ((j == 0) && 5497 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5498 (asoc->sent_queue_retran_cnt == 0) && 5499 (win_probe_recovered == 0) && 5500 (done_once == 0)) { 5501 /* huh, this should not happen */ 5502 sctp_fs_audit(asoc); 5503 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5504 net->flight_size = 0; 5505 } 5506 asoc->total_flight = 0; 5507 asoc->total_flight_count = 0; 5508 asoc->sent_queue_retran_cnt = 0; 5509 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5510 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5511 sctp_flight_size_increase(tp1); 5512 sctp_total_flight_increase(stcb, tp1); 5513 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5514 asoc->sent_queue_retran_cnt++; 5515 } 5516 } 5517 done_once = 1; 5518 goto again; 5519 } 5520 #ifdef SCTP_SACK_RWND_LOGGING 5521 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5522 a_rwnd, 5523 stcb->asoc.peers_rwnd, 5524 stcb->asoc.total_flight, 5525 stcb->asoc.total_output_queue_size); 5526 5527 #endif 5528 5529 } 5530 5531 void 5532 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5533 struct sctp_nets *netp, int *abort_flag) 5534 { 5535 /* Copy cum-ack */ 5536 uint32_t cum_ack, a_rwnd; 5537 5538 cum_ack = ntohl(cp->cumulative_tsn_ack); 5539 /* Arrange so a_rwnd does NOT change */ 5540 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5541 5542 /* Now call the express sack handling */ 5543 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5544 } 5545 5546 static void 5547 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5548 struct sctp_stream_in *strmin) 5549 { 5550 struct sctp_queued_to_read *ctl, *nctl; 5551 struct sctp_association *asoc; 5552 int tt; 5553 5554 asoc = &stcb->asoc; 5555 tt = strmin->last_sequence_delivered; 5556 /* 5557 * First deliver anything prior to and including the stream no that 5558 * came in 5559 */ 5560 ctl = TAILQ_FIRST(&strmin->inqueue); 5561 while (ctl) { 5562 nctl = TAILQ_NEXT(ctl, next); 5563 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5564 (tt == ctl->sinfo_ssn)) { 5565 /* this is deliverable now */ 5566 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5567 /* subtract pending on streams */ 5568 asoc->size_on_all_streams -= ctl->length; 5569 sctp_ucount_decr(asoc->cnt_on_all_streams); 5570 /* deliver it to at least the delivery-q */ 5571 if (stcb->sctp_socket) { 5572 sctp_add_to_readq(stcb->sctp_ep, stcb, 5573 ctl, 5574 &stcb->sctp_socket->so_rcv, 1); 5575 } 5576 } else { 5577 /* no more delivery now. */ 5578 break; 5579 } 5580 ctl = nctl; 5581 } 5582 /* 5583 * now we must deliver things in queue the normal way if any are 5584 * now ready. 5585 */ 5586 tt = strmin->last_sequence_delivered + 1; 5587 ctl = TAILQ_FIRST(&strmin->inqueue); 5588 while (ctl) { 5589 nctl = TAILQ_NEXT(ctl, next); 5590 if (tt == ctl->sinfo_ssn) { 5591 /* this is deliverable now */ 5592 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5593 /* subtract pending on streams */ 5594 asoc->size_on_all_streams -= ctl->length; 5595 sctp_ucount_decr(asoc->cnt_on_all_streams); 5596 /* deliver it to at least the delivery-q */ 5597 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5598 if (stcb->sctp_socket) { 5599 sctp_add_to_readq(stcb->sctp_ep, stcb, 5600 ctl, 5601 &stcb->sctp_socket->so_rcv, 1); 5602 } 5603 tt = strmin->last_sequence_delivered + 1; 5604 } else { 5605 break; 5606 } 5607 ctl = nctl; 5608 } 5609 } 5610 5611 void 5612 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5613 struct sctp_forward_tsn_chunk *fwd, int *abort_flag) 5614 { 5615 /* 5616 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5617 * forward TSN, when the SACK comes back that acknowledges the 5618 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5619 * get quite tricky since we may have sent more data interveneing 5620 * and must carefully account for what the SACK says on the nonce 5621 * and any gaps that are reported. This work will NOT be done here, 5622 * but I note it here since it is really related to PR-SCTP and 5623 * FWD-TSN's 5624 */ 5625 5626 /* The pr-sctp fwd tsn */ 5627 /* 5628 * here we will perform all the data receiver side steps for 5629 * processing FwdTSN, as required in by pr-sctp draft: 5630 * 5631 * Assume we get FwdTSN(x): 5632 * 5633 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5634 * others we have 3) examine and update re-ordering queue on 5635 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5636 * report where we are. 5637 */ 5638 struct sctp_strseq *stseq; 5639 struct sctp_association *asoc; 5640 uint32_t new_cum_tsn, gap, back_out_htsn; 5641 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5642 struct sctp_stream_in *strm; 5643 struct sctp_tmit_chunk *chk, *at; 5644 5645 cumack_set_flag = 0; 5646 asoc = &stcb->asoc; 5647 cnt_gone = 0; 5648 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5649 #ifdef SCTP_DEBUG 5650 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 5651 printf("Bad size too small/big fwd-tsn\n"); 5652 } 5653 #endif 5654 return; 5655 } 5656 m_size = (stcb->asoc.mapping_array_size << 3); 5657 /*************************************************************/ 5658 /* 1. Here we update local cumTSN and shift the bitmap array */ 5659 /*************************************************************/ 5660 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5661 5662 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5663 asoc->cumulative_tsn == new_cum_tsn) { 5664 /* Already got there ... */ 5665 return; 5666 } 5667 back_out_htsn = asoc->highest_tsn_inside_map; 5668 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5669 MAX_TSN)) { 5670 asoc->highest_tsn_inside_map = new_cum_tsn; 5671 #ifdef SCTP_MAP_LOGGING 5672 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5673 #endif 5674 } 5675 /* 5676 * now we know the new TSN is more advanced, let's find the actual 5677 * gap 5678 */ 5679 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5680 MAX_TSN)) || 5681 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5682 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5683 } else { 5684 /* try to prevent underflow here */ 5685 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5686 } 5687 5688 if (gap > m_size || gap < 0) { 5689 asoc->highest_tsn_inside_map = back_out_htsn; 5690 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5691 /* 5692 * out of range (of single byte chunks in the rwnd I 5693 * give out) too questionable. better to drop it 5694 * silently 5695 */ 5696 return; 5697 } 5698 if (asoc->highest_tsn_inside_map > 5699 asoc->mapping_array_base_tsn) { 5700 gap = asoc->highest_tsn_inside_map - 5701 asoc->mapping_array_base_tsn; 5702 } else { 5703 gap = asoc->highest_tsn_inside_map + 5704 (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5705 } 5706 cumack_set_flag = 1; 5707 } 5708 for (i = 0; i <= gap; i++) { 5709 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5710 } 5711 /* 5712 * Now after marking all, slide thing forward but no sack please. 5713 */ 5714 sctp_sack_check(stcb, 0, 0, abort_flag); 5715 if (*abort_flag) 5716 return; 5717 5718 if (cumack_set_flag) { 5719 /* 5720 * fwd-tsn went outside my gap array - not a common 5721 * occurance. Do the same thing we do when a cookie-echo 5722 * arrives. 5723 */ 5724 asoc->highest_tsn_inside_map = new_cum_tsn - 1; 5725 asoc->mapping_array_base_tsn = new_cum_tsn; 5726 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 5727 #ifdef SCTP_MAP_LOGGING 5728 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5729 #endif 5730 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5731 } 5732 /*************************************************************/ 5733 /* 2. Clear up re-assembly queue */ 5734 /*************************************************************/ 5735 5736 /* 5737 * First service it if pd-api is up, just in case we can progress it 5738 * forward 5739 */ 5740 if (asoc->fragmented_delivery_inprogress) { 5741 sctp_service_reassembly(stcb, asoc); 5742 } 5743 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5744 /* For each one on here see if we need to toss it */ 5745 /* 5746 * For now large messages held on the reasmqueue that are 5747 * complete will be tossed too. We could in theory do more 5748 * work to spin through and stop after dumping one msg aka 5749 * seeing the start of a new msg at the head, and call the 5750 * delivery function... to see if it can be delivered... But 5751 * for now we just dump everything on the queue. 5752 */ 5753 chk = TAILQ_FIRST(&asoc->reasmqueue); 5754 while (chk) { 5755 at = TAILQ_NEXT(chk, sctp_next); 5756 if (compare_with_wrap(asoc->cumulative_tsn, 5757 chk->rec.data.TSN_seq, MAX_TSN) || 5758 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5759 /* It needs to be tossed */ 5760 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5761 if (compare_with_wrap(chk->rec.data.TSN_seq, 5762 asoc->tsn_last_delivered, MAX_TSN)) { 5763 asoc->tsn_last_delivered = 5764 chk->rec.data.TSN_seq; 5765 asoc->str_of_pdapi = 5766 chk->rec.data.stream_number; 5767 asoc->ssn_of_pdapi = 5768 chk->rec.data.stream_seq; 5769 asoc->fragment_flags = 5770 chk->rec.data.rcv_flags; 5771 } 5772 asoc->size_on_reasm_queue -= chk->send_size; 5773 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5774 cnt_gone++; 5775 5776 /* Clear up any stream problem */ 5777 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5778 SCTP_DATA_UNORDERED && 5779 (compare_with_wrap(chk->rec.data.stream_seq, 5780 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5781 MAX_SEQ))) { 5782 /* 5783 * We must dump forward this streams 5784 * sequence number if the chunk is 5785 * not unordered that is being 5786 * skipped. There is a chance that 5787 * if the peer does not include the 5788 * last fragment in its FWD-TSN we 5789 * WILL have a problem here since 5790 * you would have a partial chunk in 5791 * queue that may not be 5792 * deliverable. Also if a Partial 5793 * delivery API as started the user 5794 * may get a partial chunk. The next 5795 * read returning a new chunk... 5796 * really ugly but I see no way 5797 * around it! Maybe a notify?? 5798 */ 5799 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5800 chk->rec.data.stream_seq; 5801 } 5802 if (chk->data) { 5803 sctp_m_freem(chk->data); 5804 chk->data = NULL; 5805 } 5806 sctp_free_remote_addr(chk->whoTo); 5807 sctp_free_a_chunk(stcb, chk); 5808 } else { 5809 /* 5810 * Ok we have gone beyond the end of the 5811 * fwd-tsn's mark. Some checks... 5812 */ 5813 if ((asoc->fragmented_delivery_inprogress) && 5814 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5815 uint32_t str_seq; 5816 5817 /* 5818 * Special case PD-API is up and 5819 * what we fwd-tsn' over includes 5820 * one that had the LAST_FRAG. We no 5821 * longer need to do the PD-API. 5822 */ 5823 asoc->fragmented_delivery_inprogress = 0; 5824 5825 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5826 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5827 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq); 5828 5829 } 5830 break; 5831 } 5832 chk = at; 5833 } 5834 } 5835 if (asoc->fragmented_delivery_inprogress) { 5836 /* 5837 * Ok we removed cnt_gone chunks in the PD-API queue that 5838 * were being delivered. So now we must turn off the flag. 5839 */ 5840 uint32_t str_seq; 5841 5842 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5843 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5844 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq); 5845 asoc->fragmented_delivery_inprogress = 0; 5846 } 5847 /*************************************************************/ 5848 /* 3. Update the PR-stream re-ordering queues */ 5849 /*************************************************************/ 5850 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd)); 5851 fwd_sz -= sizeof(*fwd); 5852 { 5853 /* New method. */ 5854 int num_str, i; 5855 5856 num_str = fwd_sz / sizeof(struct sctp_strseq); 5857 for (i = 0; i < num_str; i++) { 5858 uint16_t st; 5859 unsigned char *xx; 5860 5861 /* Convert */ 5862 xx = (unsigned char *)&stseq[i]; 5863 st = ntohs(stseq[i].stream); 5864 stseq[i].stream = st; 5865 st = ntohs(stseq[i].sequence); 5866 stseq[i].sequence = st; 5867 /* now process */ 5868 if (stseq[i].stream > asoc->streamincnt) { 5869 /* 5870 * It is arguable if we should continue. 5871 * Since the peer sent bogus stream info we 5872 * may be in deep trouble.. a return may be 5873 * a better choice? 5874 */ 5875 continue; 5876 } 5877 strm = &asoc->strmin[stseq[i].stream]; 5878 if (compare_with_wrap(stseq[i].sequence, 5879 strm->last_sequence_delivered, MAX_SEQ)) { 5880 /* Update the sequence number */ 5881 strm->last_sequence_delivered = 5882 stseq[i].sequence; 5883 } 5884 /* now kick the stream the new way */ 5885 sctp_kick_prsctp_reorder_queue(stcb, strm); 5886 } 5887 } 5888 if (TAILQ_FIRST(&asoc->reasmqueue)) { 5889 /* now lets kick out and check for more fragmented delivery */ 5890 sctp_deliver_reasm_check(stcb, &stcb->asoc); 5891 } 5892 } 5893