1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 37 #include "opt_ipsec.h" 38 #include "opt_inet6.h" 39 #include "opt_inet.h" 40 41 #include "opt_sctp.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 51 #include <net/if.h> 52 #include <net/route.h> 53 54 55 #include <sys/limits.h> 56 #include <machine/cpu.h> 57 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/ip.h> 61 #ifdef INET6 62 #include <netinet/ip6.h> 63 #endif /* INET6 */ 64 #include <netinet/in_pcb.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 #ifdef INET6 68 #include <netinet6/ip6_var.h> 69 #endif /* INET6 */ 70 #include <netinet/ip_icmp.h> 71 #include <netinet/icmp_var.h> 72 73 #include <netinet/sctp_os.h> 74 #include <netinet/sctp_var.h> 75 #include <netinet/sctp_pcb.h> 76 #include <netinet/sctp_header.h> 77 #include <netinet/sctputil.h> 78 #include <netinet/sctp_output.h> 79 #include <netinet/sctp_input.h> 80 #include <netinet/sctp_indata.h> 81 #include <netinet/sctp_uio.h> 82 #include <netinet/sctp_timer.h> 83 #ifdef IPSEC 84 #include <netinet6/ipsec.h> 85 #include <netkey/key.h> 86 #endif /* IPSEC */ 87 88 89 #ifdef SCTP_DEBUG 90 extern uint32_t sctp_debug_on; 91 92 #endif 93 94 /* 95 * NOTES: On the outbound side of things I need to check the sack timer to 96 * see if I should generate a sack into the chunk queue (if I have data to 97 * send that is and will be sending it .. for bundling. 98 * 99 * The callback in sctp_usrreq.c will get called when the socket is read from. 100 * This will cause sctp_service_queues() to get called on the top entry in 101 * the list. 102 */ 103 104 extern int sctp_strict_sacks; 105 106 __inline void 107 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 108 { 109 uint32_t calc, calc_w_oh; 110 111 /* 112 * This is really set wrong with respect to a 1-2-m socket. Since 113 * the sb_cc is the count that everyone as put up. When we re-write 114 * sctp_soreceive then we will fix this so that ONLY this 115 * associations data is taken into account. 116 */ 117 if (stcb->sctp_socket == NULL) 118 return; 119 120 if (stcb->asoc.sb_cc == 0 && 121 asoc->size_on_reasm_queue == 0 && 122 asoc->size_on_all_streams == 0) { 123 /* Full rwnd granted */ 124 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat, 125 SCTP_MINIMAL_RWND); 126 return; 127 } 128 /* get actual space */ 129 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 130 131 /* 132 * take out what has NOT been put on socket queue and we yet hold 133 * for putting up. 134 */ 135 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 136 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 137 138 if (calc == 0) { 139 /* out of space */ 140 asoc->my_rwnd = 0; 141 return; 142 } 143 /* what is the overhead of all these rwnd's */ 144 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 145 asoc->my_rwnd = calc; 146 if (calc_w_oh == 0) { 147 /* 148 * If our overhead is greater than the advertised rwnd, we 149 * clamp the rwnd to 1. This lets us still accept inbound 150 * segments, but hopefully will shut the sender down when he 151 * finally gets the message. 152 */ 153 asoc->my_rwnd = 1; 154 } else { 155 /* SWS threshold */ 156 if (asoc->my_rwnd && 157 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 158 /* SWS engaged, tell peer none left */ 159 asoc->my_rwnd = 1; 160 } 161 } 162 } 163 164 /* Calculate what the rwnd would be */ 165 166 __inline uint32_t 167 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 168 { 169 uint32_t calc = 0, calc_w_oh; 170 171 /* 172 * This is really set wrong with respect to a 1-2-m socket. Since 173 * the sb_cc is the count that everyone as put up. When we re-write 174 * sctp_soreceive then we will fix this so that ONLY this 175 * associations data is taken into account. 176 */ 177 if (stcb->sctp_socket == NULL) 178 return (calc); 179 180 if (stcb->asoc.sb_cc == 0 && 181 asoc->size_on_reasm_queue == 0 && 182 asoc->size_on_all_streams == 0) { 183 /* Full rwnd granted */ 184 calc = max(stcb->sctp_socket->so_rcv.sb_hiwat, 185 SCTP_MINIMAL_RWND); 186 return (calc); 187 } 188 /* get actual space */ 189 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 190 191 /* 192 * take out what has NOT been put on socket queue and we yet hold 193 * for putting up. 194 */ 195 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 196 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 197 198 if (calc == 0) { 199 /* out of space */ 200 return (calc); 201 } 202 /* what is the overhead of all these rwnd's */ 203 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 204 if (calc_w_oh == 0) { 205 /* 206 * If our overhead is greater than the advertised rwnd, we 207 * clamp the rwnd to 1. This lets us still accept inbound 208 * segments, but hopefully will shut the sender down when he 209 * finally gets the message. 210 */ 211 calc = 1; 212 } else { 213 /* SWS threshold */ 214 if (calc && 215 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 216 /* SWS engaged, tell peer none left */ 217 calc = 1; 218 } 219 } 220 return (calc); 221 } 222 223 224 225 /* 226 * Build out our readq entry based on the incoming packet. 227 */ 228 struct sctp_queued_to_read * 229 sctp_build_readq_entry(struct sctp_tcb *stcb, 230 struct sctp_nets *net, 231 uint32_t tsn, uint32_t ppid, 232 uint32_t context, uint16_t stream_no, 233 uint16_t stream_seq, uint8_t flags, 234 struct mbuf *dm) 235 { 236 struct sctp_queued_to_read *read_queue_e = NULL; 237 238 sctp_alloc_a_readq(stcb, read_queue_e); 239 if (read_queue_e == NULL) { 240 goto failed_build; 241 } 242 read_queue_e->sinfo_stream = stream_no; 243 read_queue_e->sinfo_ssn = stream_seq; 244 read_queue_e->sinfo_flags = (flags << 8); 245 read_queue_e->sinfo_ppid = ppid; 246 read_queue_e->sinfo_context = stcb->asoc.context; 247 read_queue_e->sinfo_timetolive = 0; 248 read_queue_e->sinfo_tsn = tsn; 249 read_queue_e->sinfo_cumtsn = tsn; 250 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 251 read_queue_e->whoFrom = net; 252 read_queue_e->length = 0; 253 atomic_add_int(&net->ref_count, 1); 254 read_queue_e->data = dm; 255 read_queue_e->tail_mbuf = NULL; 256 read_queue_e->stcb = stcb; 257 read_queue_e->port_from = stcb->rport; 258 read_queue_e->do_not_ref_stcb = 0; 259 read_queue_e->end_added = 0; 260 read_queue_e->pdapi_aborted = 0; 261 failed_build: 262 return (read_queue_e); 263 } 264 265 266 /* 267 * Build out our readq entry based on the incoming packet. 268 */ 269 static struct sctp_queued_to_read * 270 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 271 struct sctp_tmit_chunk *chk) 272 { 273 struct sctp_queued_to_read *read_queue_e = NULL; 274 275 sctp_alloc_a_readq(stcb, read_queue_e); 276 if (read_queue_e == NULL) { 277 goto failed_build; 278 } 279 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 280 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 281 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 282 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 283 read_queue_e->sinfo_context = stcb->asoc.context; 284 read_queue_e->sinfo_timetolive = 0; 285 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 286 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 287 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 288 read_queue_e->whoFrom = chk->whoTo; 289 read_queue_e->length = 0; 290 atomic_add_int(&chk->whoTo->ref_count, 1); 291 read_queue_e->data = chk->data; 292 read_queue_e->tail_mbuf = NULL; 293 read_queue_e->stcb = stcb; 294 read_queue_e->port_from = stcb->rport; 295 read_queue_e->do_not_ref_stcb = 0; 296 read_queue_e->end_added = 0; 297 read_queue_e->pdapi_aborted = 0; 298 failed_build: 299 return (read_queue_e); 300 } 301 302 303 struct mbuf * 304 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 305 struct sctp_sndrcvinfo *sinfo) 306 { 307 struct sctp_sndrcvinfo *outinfo; 308 struct cmsghdr *cmh; 309 struct mbuf *ret; 310 int len; 311 int use_extended = 0; 312 313 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 314 /* user does not want the sndrcv ctl */ 315 return (NULL); 316 } 317 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 318 use_extended = 1; 319 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 320 } else { 321 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 322 } 323 324 325 ret = sctp_get_mbuf_for_msg(len, 326 1, M_DONTWAIT, 1, MT_DATA); 327 328 if (ret == NULL) { 329 /* No space */ 330 return (ret); 331 } 332 /* We need a CMSG header followed by the struct */ 333 cmh = mtod(ret, struct cmsghdr *); 334 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 335 cmh->cmsg_level = IPPROTO_SCTP; 336 if (use_extended) { 337 cmh->cmsg_type = SCTP_EXTRCV; 338 cmh->cmsg_len = len; 339 memcpy(outinfo, sinfo, len); 340 } else { 341 cmh->cmsg_type = SCTP_SNDRCV; 342 cmh->cmsg_len = len; 343 *outinfo = *sinfo; 344 } 345 ret->m_len = cmh->cmsg_len; 346 ret->m_pkthdr.len = ret->m_len; 347 return (ret); 348 } 349 350 /* 351 * We are delivering currently from the reassembly queue. We must continue to 352 * deliver until we either: 1) run out of space. 2) run out of sequential 353 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 354 */ 355 static void 356 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 357 { 358 struct sctp_tmit_chunk *chk; 359 struct mbuf *m; 360 uint16_t nxt_todel; 361 uint16_t stream_no; 362 int end = 0; 363 int cntDel; 364 365 cntDel = stream_no = 0; 366 struct sctp_queued_to_read *control, *ctl, *ctlat; 367 368 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 369 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 370 ) { 371 /* socket above is long gone */ 372 asoc->fragmented_delivery_inprogress = 0; 373 chk = TAILQ_FIRST(&asoc->reasmqueue); 374 while (chk) { 375 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 376 asoc->size_on_reasm_queue -= chk->send_size; 377 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 378 /* 379 * Lose the data pointer, since its in the socket 380 * buffer 381 */ 382 if (chk->data) { 383 sctp_m_freem(chk->data); 384 chk->data = NULL; 385 } 386 /* Now free the address and data */ 387 sctp_free_remote_addr(chk->whoTo); 388 sctp_free_a_chunk(stcb, chk); 389 chk = TAILQ_FIRST(&asoc->reasmqueue); 390 } 391 return; 392 } 393 SCTP_TCB_LOCK_ASSERT(stcb); 394 do { 395 chk = TAILQ_FIRST(&asoc->reasmqueue); 396 if (chk == NULL) { 397 return; 398 } 399 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 400 /* Can't deliver more :< */ 401 return; 402 } 403 stream_no = chk->rec.data.stream_number; 404 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 405 if (nxt_todel != chk->rec.data.stream_seq && 406 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 407 /* 408 * Not the next sequence to deliver in its stream OR 409 * unordered 410 */ 411 return; 412 } 413 if ((chk->data->m_flags & M_PKTHDR) == 0) { 414 m = sctp_get_mbuf_for_msg(1, 415 1, M_DONTWAIT, 1, MT_DATA); 416 if (m == NULL) { 417 /* no room! */ 418 return; 419 } 420 m->m_pkthdr.len = chk->send_size; 421 m->m_len = 0; 422 m->m_next = chk->data; 423 chk->data = m; 424 } 425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 426 if (chk->data->m_next == NULL) { 427 /* hopefully we hit here most of the time */ 428 chk->data->m_flags |= M_EOR; 429 } else { 430 /* 431 * Add the flag to the LAST mbuf in the 432 * chain 433 */ 434 m = chk->data; 435 while (m->m_next != NULL) { 436 m = m->m_next; 437 } 438 m->m_flags |= M_EOR; 439 } 440 } 441 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 442 443 control = sctp_build_readq_entry_chk(stcb, chk); 444 if (control == NULL) { 445 /* out of memory? */ 446 return; 447 } 448 /* save it off for our future deliveries */ 449 stcb->asoc.control_pdapi = control; 450 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 451 end = 1; 452 else 453 end = 0; 454 sctp_add_to_readq(stcb->sctp_ep, 455 stcb, control, &stcb->sctp_socket->so_rcv, end); 456 cntDel++; 457 } else { 458 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 459 end = 1; 460 else 461 end = 0; 462 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 463 stcb->asoc.control_pdapi, 464 chk->data, end, chk->rec.data.TSN_seq, 465 &stcb->sctp_socket->so_rcv)) { 466 /* 467 * something is very wrong, either 468 * control_pdapi is NULL, or the tail_mbuf 469 * is corrupt, or there is a EOM already on 470 * the mbuf chain. 471 */ 472 if (stcb->asoc.control_pdapi == NULL) { 473 panic("This should not happen control_pdapi NULL?"); 474 } 475 if (stcb->asoc.control_pdapi->tail_mbuf == NULL) { 476 panic("This should not happen, tail_mbuf not being maintained?"); 477 } 478 /* if we did not panic, it was a EOM */ 479 panic("Bad chunking ??"); 480 } 481 cntDel++; 482 } 483 /* pull it we did it */ 484 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 485 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 486 asoc->fragmented_delivery_inprogress = 0; 487 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 488 asoc->strmin[stream_no].last_sequence_delivered++; 489 } 490 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 491 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 492 } 493 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 494 /* 495 * turn the flag back on since we just delivered 496 * yet another one. 497 */ 498 asoc->fragmented_delivery_inprogress = 1; 499 } 500 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 501 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 502 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 503 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 504 505 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 506 asoc->size_on_reasm_queue -= chk->send_size; 507 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 508 /* free up the chk */ 509 chk->data = NULL; 510 sctp_free_remote_addr(chk->whoTo); 511 sctp_free_a_chunk(stcb, chk); 512 513 if (asoc->fragmented_delivery_inprogress == 0) { 514 /* 515 * Now lets see if we can deliver the next one on 516 * the stream 517 */ 518 uint16_t nxt_todel; 519 struct sctp_stream_in *strm; 520 521 strm = &asoc->strmin[stream_no]; 522 nxt_todel = strm->last_sequence_delivered + 1; 523 ctl = TAILQ_FIRST(&strm->inqueue); 524 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 525 while (ctl != NULL) { 526 /* Deliver more if we can. */ 527 if (nxt_todel == ctl->sinfo_ssn) { 528 ctlat = TAILQ_NEXT(ctl, next); 529 TAILQ_REMOVE(&strm->inqueue, ctl, next); 530 asoc->size_on_all_streams -= ctl->length; 531 sctp_ucount_decr(asoc->cnt_on_all_streams); 532 strm->last_sequence_delivered++; 533 sctp_add_to_readq(stcb->sctp_ep, stcb, 534 ctl, 535 &stcb->sctp_socket->so_rcv, 1); 536 ctl = ctlat; 537 } else { 538 break; 539 } 540 nxt_todel = strm->last_sequence_delivered + 1; 541 } 542 } 543 return; 544 } 545 chk = TAILQ_FIRST(&asoc->reasmqueue); 546 } while (chk); 547 } 548 549 /* 550 * Queue the chunk either right into the socket buffer if it is the next one 551 * to go OR put it in the correct place in the delivery queue. If we do 552 * append to the so_buf, keep doing so until we are out of order. One big 553 * question still remains, what to do when the socket buffer is FULL?? 554 */ 555 static void 556 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 557 struct sctp_queued_to_read *control, int *abort_flag) 558 { 559 /* 560 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 561 * all the data in one stream this could happen quite rapidly. One 562 * could use the TSN to keep track of things, but this scheme breaks 563 * down in the other type of stream useage that could occur. Send a 564 * single msg to stream 0, send 4Billion messages to stream 1, now 565 * send a message to stream 0. You have a situation where the TSN 566 * has wrapped but not in the stream. Is this worth worrying about 567 * or should we just change our queue sort at the bottom to be by 568 * TSN. 569 * 570 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 571 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 572 * assignment this could happen... and I don't see how this would be 573 * a violation. So for now I am undecided an will leave the sort by 574 * SSN alone. Maybe a hybred approach is the answer 575 * 576 */ 577 struct sctp_stream_in *strm; 578 struct sctp_queued_to_read *at; 579 int queue_needed; 580 uint16_t nxt_todel; 581 struct mbuf *oper; 582 583 queue_needed = 1; 584 asoc->size_on_all_streams += control->length; 585 sctp_ucount_incr(asoc->cnt_on_all_streams); 586 strm = &asoc->strmin[control->sinfo_stream]; 587 nxt_todel = strm->last_sequence_delivered + 1; 588 #ifdef SCTP_STR_LOGGING 589 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 590 #endif 591 #ifdef SCTP_DEBUG 592 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 593 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 594 (uint32_t) control->sinfo_stream, 595 (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel); 596 } 597 #endif 598 if (compare_with_wrap(strm->last_sequence_delivered, 599 control->sinfo_ssn, MAX_SEQ) || 600 (strm->last_sequence_delivered == control->sinfo_ssn)) { 601 /* The incoming sseq is behind where we last delivered? */ 602 #ifdef SCTP_DEBUG 603 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 604 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 605 control->sinfo_ssn, 606 strm->last_sequence_delivered); 607 } 608 #endif 609 /* 610 * throw it in the stream so it gets cleaned up in 611 * association destruction 612 */ 613 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 614 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 615 0, M_DONTWAIT, 1, MT_DATA); 616 if (oper) { 617 struct sctp_paramhdr *ph; 618 uint32_t *ippp; 619 620 oper->m_len = sizeof(struct sctp_paramhdr) + 621 (sizeof(uint32_t) * 3); 622 ph = mtod(oper, struct sctp_paramhdr *); 623 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 624 ph->param_length = htons(oper->m_len); 625 ippp = (uint32_t *) (ph + 1); 626 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 627 ippp++; 628 *ippp = control->sinfo_tsn; 629 ippp++; 630 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 631 } 632 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 633 sctp_abort_an_association(stcb->sctp_ep, stcb, 634 SCTP_PEER_FAULTY, oper); 635 636 *abort_flag = 1; 637 return; 638 639 } 640 if (nxt_todel == control->sinfo_ssn) { 641 /* can be delivered right away? */ 642 #ifdef SCTP_STR_LOGGING 643 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 644 #endif 645 queue_needed = 0; 646 asoc->size_on_all_streams -= control->length; 647 sctp_ucount_decr(asoc->cnt_on_all_streams); 648 strm->last_sequence_delivered++; 649 sctp_add_to_readq(stcb->sctp_ep, stcb, 650 control, 651 &stcb->sctp_socket->so_rcv, 1); 652 control = TAILQ_FIRST(&strm->inqueue); 653 while (control != NULL) { 654 /* all delivered */ 655 nxt_todel = strm->last_sequence_delivered + 1; 656 if (nxt_todel == control->sinfo_ssn) { 657 at = TAILQ_NEXT(control, next); 658 TAILQ_REMOVE(&strm->inqueue, control, next); 659 asoc->size_on_all_streams -= control->length; 660 sctp_ucount_decr(asoc->cnt_on_all_streams); 661 strm->last_sequence_delivered++; 662 /* 663 * We ignore the return of deliver_data here 664 * since we always can hold the chunk on the 665 * d-queue. And we have a finite number that 666 * can be delivered from the strq. 667 */ 668 #ifdef SCTP_STR_LOGGING 669 sctp_log_strm_del(control, NULL, 670 SCTP_STR_LOG_FROM_IMMED_DEL); 671 #endif 672 sctp_add_to_readq(stcb->sctp_ep, stcb, 673 control, 674 &stcb->sctp_socket->so_rcv, 1); 675 control = at; 676 continue; 677 } 678 break; 679 } 680 } 681 if (queue_needed) { 682 /* 683 * Ok, we did not deliver this guy, find the correct place 684 * to put it on the queue. 685 */ 686 if (TAILQ_EMPTY(&strm->inqueue)) { 687 /* Empty queue */ 688 #ifdef SCTP_STR_LOGGING 689 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 690 #endif 691 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 692 } else { 693 TAILQ_FOREACH(at, &strm->inqueue, next) { 694 if (compare_with_wrap(at->sinfo_ssn, 695 control->sinfo_ssn, MAX_SEQ)) { 696 /* 697 * one in queue is bigger than the 698 * new one, insert before this one 699 */ 700 #ifdef SCTP_STR_LOGGING 701 sctp_log_strm_del(control, at, 702 SCTP_STR_LOG_FROM_INSERT_MD); 703 #endif 704 TAILQ_INSERT_BEFORE(at, control, next); 705 break; 706 } else if (at->sinfo_ssn == control->sinfo_ssn) { 707 /* 708 * Gak, He sent me a duplicate str 709 * seq number 710 */ 711 /* 712 * foo bar, I guess I will just free 713 * this new guy, should we abort 714 * too? FIX ME MAYBE? Or it COULD be 715 * that the SSN's have wrapped. 716 * Maybe I should compare to TSN 717 * somehow... sigh for now just blow 718 * away the chunk! 719 */ 720 721 if (control->data) 722 sctp_m_freem(control->data); 723 control->data = NULL; 724 asoc->size_on_all_streams -= control->length; 725 sctp_ucount_decr(asoc->cnt_on_all_streams); 726 sctp_free_remote_addr(control->whoFrom); 727 sctp_free_a_readq(stcb, control); 728 return; 729 } else { 730 if (TAILQ_NEXT(at, next) == NULL) { 731 /* 732 * We are at the end, insert 733 * it after this one 734 */ 735 #ifdef SCTP_STR_LOGGING 736 sctp_log_strm_del(control, at, 737 SCTP_STR_LOG_FROM_INSERT_TL); 738 #endif 739 TAILQ_INSERT_AFTER(&strm->inqueue, 740 at, control, next); 741 break; 742 } 743 } 744 } 745 } 746 } 747 } 748 749 /* 750 * Returns two things: You get the total size of the deliverable parts of the 751 * first fragmented message on the reassembly queue. And you get a 1 back if 752 * all of the message is ready or a 0 back if the message is still incomplete 753 */ 754 static int 755 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 756 { 757 struct sctp_tmit_chunk *chk; 758 uint32_t tsn; 759 760 *t_size = 0; 761 chk = TAILQ_FIRST(&asoc->reasmqueue); 762 if (chk == NULL) { 763 /* nothing on the queue */ 764 return (0); 765 } 766 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 767 /* Not a first on the queue */ 768 return (0); 769 } 770 tsn = chk->rec.data.TSN_seq; 771 while (chk) { 772 if (tsn != chk->rec.data.TSN_seq) { 773 return (0); 774 } 775 *t_size += chk->send_size; 776 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 777 return (1); 778 } 779 tsn++; 780 chk = TAILQ_NEXT(chk, sctp_next); 781 } 782 return (0); 783 } 784 785 static void 786 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 787 { 788 struct sctp_tmit_chunk *chk; 789 uint16_t nxt_todel; 790 uint32_t tsize; 791 792 chk = TAILQ_FIRST(&asoc->reasmqueue); 793 if (chk == NULL) { 794 /* Huh? */ 795 asoc->size_on_reasm_queue = 0; 796 asoc->cnt_on_reasm_queue = 0; 797 return; 798 } 799 if (asoc->fragmented_delivery_inprogress == 0) { 800 nxt_todel = 801 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 802 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 803 (nxt_todel == chk->rec.data.stream_seq || 804 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 805 /* 806 * Yep the first one is here and its ok to deliver 807 * but should we? 808 */ 809 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 810 (tsize > stcb->sctp_ep->partial_delivery_point))) { 811 812 /* 813 * Yes, we setup to start reception, by 814 * backing down the TSN just in case we 815 * can't deliver. If we 816 */ 817 asoc->fragmented_delivery_inprogress = 1; 818 asoc->tsn_last_delivered = 819 chk->rec.data.TSN_seq - 1; 820 asoc->str_of_pdapi = 821 chk->rec.data.stream_number; 822 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 823 asoc->pdapi_ppid = chk->rec.data.payloadtype; 824 asoc->fragment_flags = chk->rec.data.rcv_flags; 825 sctp_service_reassembly(stcb, asoc); 826 } 827 } 828 } else { 829 sctp_service_reassembly(stcb, asoc); 830 } 831 } 832 833 /* 834 * Dump onto the re-assembly queue, in its proper place. After dumping on the 835 * queue, see if anthing can be delivered. If so pull it off (or as much as 836 * we can. If we run out of space then we must dump what we can and set the 837 * appropriate flag to say we queued what we could. 838 */ 839 static void 840 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 841 struct sctp_tmit_chunk *chk, int *abort_flag) 842 { 843 struct mbuf *oper; 844 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 845 u_char last_flags; 846 struct sctp_tmit_chunk *at, *prev, *next; 847 848 prev = next = NULL; 849 cum_ackp1 = asoc->tsn_last_delivered + 1; 850 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 851 /* This is the first one on the queue */ 852 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 853 /* 854 * we do not check for delivery of anything when only one 855 * fragment is here 856 */ 857 asoc->size_on_reasm_queue = chk->send_size; 858 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 859 if (chk->rec.data.TSN_seq == cum_ackp1) { 860 if (asoc->fragmented_delivery_inprogress == 0 && 861 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 862 SCTP_DATA_FIRST_FRAG) { 863 /* 864 * An empty queue, no delivery inprogress, 865 * we hit the next one and it does NOT have 866 * a FIRST fragment mark. 867 */ 868 #ifdef SCTP_DEBUG 869 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 870 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 871 } 872 #endif 873 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 874 0, M_DONTWAIT, 1, MT_DATA); 875 876 if (oper) { 877 struct sctp_paramhdr *ph; 878 uint32_t *ippp; 879 880 oper->m_len = 881 sizeof(struct sctp_paramhdr) + 882 (sizeof(uint32_t) * 3); 883 ph = mtod(oper, struct sctp_paramhdr *); 884 ph->param_type = 885 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 886 ph->param_length = htons(oper->m_len); 887 ippp = (uint32_t *) (ph + 1); 888 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 889 ippp++; 890 *ippp = chk->rec.data.TSN_seq; 891 ippp++; 892 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 893 894 } 895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 896 sctp_abort_an_association(stcb->sctp_ep, stcb, 897 SCTP_PEER_FAULTY, oper); 898 *abort_flag = 1; 899 } else if (asoc->fragmented_delivery_inprogress && 900 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 901 /* 902 * We are doing a partial delivery and the 903 * NEXT chunk MUST be either the LAST or 904 * MIDDLE fragment NOT a FIRST 905 */ 906 #ifdef SCTP_DEBUG 907 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 908 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 909 } 910 #endif 911 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 912 0, M_DONTWAIT, 1, MT_DATA); 913 if (oper) { 914 struct sctp_paramhdr *ph; 915 uint32_t *ippp; 916 917 oper->m_len = 918 sizeof(struct sctp_paramhdr) + 919 (3 * sizeof(uint32_t)); 920 ph = mtod(oper, struct sctp_paramhdr *); 921 ph->param_type = 922 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 923 ph->param_length = htons(oper->m_len); 924 ippp = (uint32_t *) (ph + 1); 925 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 926 ippp++; 927 *ippp = chk->rec.data.TSN_seq; 928 ippp++; 929 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 930 } 931 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 932 sctp_abort_an_association(stcb->sctp_ep, stcb, 933 SCTP_PEER_FAULTY, oper); 934 *abort_flag = 1; 935 } else if (asoc->fragmented_delivery_inprogress) { 936 /* 937 * Here we are ok with a MIDDLE or LAST 938 * piece 939 */ 940 if (chk->rec.data.stream_number != 941 asoc->str_of_pdapi) { 942 /* Got to be the right STR No */ 943 #ifdef SCTP_DEBUG 944 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 945 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n", 946 chk->rec.data.stream_number, 947 asoc->str_of_pdapi); 948 } 949 #endif 950 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 951 0, M_DONTWAIT, 1, MT_DATA); 952 if (oper) { 953 struct sctp_paramhdr *ph; 954 uint32_t *ippp; 955 956 oper->m_len = 957 sizeof(struct sctp_paramhdr) + 958 (sizeof(uint32_t) * 3); 959 ph = mtod(oper, 960 struct sctp_paramhdr *); 961 ph->param_type = 962 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 963 ph->param_length = 964 htons(oper->m_len); 965 ippp = (uint32_t *) (ph + 1); 966 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 967 ippp++; 968 *ippp = chk->rec.data.TSN_seq; 969 ippp++; 970 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 971 } 972 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 973 sctp_abort_an_association(stcb->sctp_ep, 974 stcb, SCTP_PEER_FAULTY, oper); 975 *abort_flag = 1; 976 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 977 SCTP_DATA_UNORDERED && 978 chk->rec.data.stream_seq != 979 asoc->ssn_of_pdapi) { 980 /* Got to be the right STR Seq */ 981 #ifdef SCTP_DEBUG 982 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 983 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n", 984 chk->rec.data.stream_seq, 985 asoc->ssn_of_pdapi); 986 } 987 #endif 988 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 989 0, M_DONTWAIT, 1, MT_DATA); 990 if (oper) { 991 struct sctp_paramhdr *ph; 992 uint32_t *ippp; 993 994 oper->m_len = 995 sizeof(struct sctp_paramhdr) + 996 (3 * sizeof(uint32_t)); 997 ph = mtod(oper, 998 struct sctp_paramhdr *); 999 ph->param_type = 1000 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1001 ph->param_length = 1002 htons(oper->m_len); 1003 ippp = (uint32_t *) (ph + 1); 1004 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1005 ippp++; 1006 *ippp = chk->rec.data.TSN_seq; 1007 ippp++; 1008 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1009 1010 } 1011 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 1012 sctp_abort_an_association(stcb->sctp_ep, 1013 stcb, SCTP_PEER_FAULTY, oper); 1014 *abort_flag = 1; 1015 } 1016 } 1017 } 1018 return; 1019 } 1020 /* Find its place */ 1021 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1022 if (compare_with_wrap(at->rec.data.TSN_seq, 1023 chk->rec.data.TSN_seq, MAX_TSN)) { 1024 /* 1025 * one in queue is bigger than the new one, insert 1026 * before this one 1027 */ 1028 /* A check */ 1029 asoc->size_on_reasm_queue += chk->send_size; 1030 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1031 next = at; 1032 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1033 break; 1034 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1035 /* Gak, He sent me a duplicate str seq number */ 1036 /* 1037 * foo bar, I guess I will just free this new guy, 1038 * should we abort too? FIX ME MAYBE? Or it COULD be 1039 * that the SSN's have wrapped. Maybe I should 1040 * compare to TSN somehow... sigh for now just blow 1041 * away the chunk! 1042 */ 1043 if (chk->data) { 1044 sctp_m_freem(chk->data); 1045 chk->data = NULL; 1046 } 1047 sctp_free_remote_addr(chk->whoTo); 1048 sctp_free_a_chunk(stcb, chk); 1049 return; 1050 } else { 1051 last_flags = at->rec.data.rcv_flags; 1052 last_tsn = at->rec.data.TSN_seq; 1053 prev = at; 1054 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1055 /* 1056 * We are at the end, insert it after this 1057 * one 1058 */ 1059 /* check it first */ 1060 asoc->size_on_reasm_queue += chk->send_size; 1061 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1062 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1063 break; 1064 } 1065 } 1066 } 1067 /* Now the audits */ 1068 if (prev) { 1069 prev_tsn = chk->rec.data.TSN_seq - 1; 1070 if (prev_tsn == prev->rec.data.TSN_seq) { 1071 /* 1072 * Ok the one I am dropping onto the end is the 1073 * NEXT. A bit of valdiation here. 1074 */ 1075 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1076 SCTP_DATA_FIRST_FRAG || 1077 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1078 SCTP_DATA_MIDDLE_FRAG) { 1079 /* 1080 * Insert chk MUST be a MIDDLE or LAST 1081 * fragment 1082 */ 1083 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1084 SCTP_DATA_FIRST_FRAG) { 1085 #ifdef SCTP_DEBUG 1086 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1087 printf("Prev check - It can be a midlle or last but not a first\n"); 1088 printf("Gak, Evil plot, it's a FIRST!\n"); 1089 } 1090 #endif 1091 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1092 0, M_DONTWAIT, 1, MT_DATA); 1093 if (oper) { 1094 struct sctp_paramhdr *ph; 1095 uint32_t *ippp; 1096 1097 oper->m_len = 1098 sizeof(struct sctp_paramhdr) + 1099 (3 * sizeof(uint32_t)); 1100 ph = mtod(oper, 1101 struct sctp_paramhdr *); 1102 ph->param_type = 1103 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1104 ph->param_length = 1105 htons(oper->m_len); 1106 ippp = (uint32_t *) (ph + 1); 1107 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1108 ippp++; 1109 *ippp = chk->rec.data.TSN_seq; 1110 ippp++; 1111 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1112 1113 } 1114 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1115 sctp_abort_an_association(stcb->sctp_ep, 1116 stcb, SCTP_PEER_FAULTY, oper); 1117 *abort_flag = 1; 1118 return; 1119 } 1120 if (chk->rec.data.stream_number != 1121 prev->rec.data.stream_number) { 1122 /* 1123 * Huh, need the correct STR here, 1124 * they must be the same. 1125 */ 1126 #ifdef SCTP_DEBUG 1127 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1128 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1129 chk->rec.data.stream_number, 1130 prev->rec.data.stream_number); 1131 } 1132 #endif 1133 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1134 0, M_DONTWAIT, 1, MT_DATA); 1135 if (oper) { 1136 struct sctp_paramhdr *ph; 1137 uint32_t *ippp; 1138 1139 oper->m_len = 1140 sizeof(struct sctp_paramhdr) + 1141 (3 * sizeof(uint32_t)); 1142 ph = mtod(oper, 1143 struct sctp_paramhdr *); 1144 ph->param_type = 1145 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1146 ph->param_length = 1147 htons(oper->m_len); 1148 ippp = (uint32_t *) (ph + 1); 1149 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1150 ippp++; 1151 *ippp = chk->rec.data.TSN_seq; 1152 ippp++; 1153 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1154 } 1155 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1156 sctp_abort_an_association(stcb->sctp_ep, 1157 stcb, SCTP_PEER_FAULTY, oper); 1158 1159 *abort_flag = 1; 1160 return; 1161 } 1162 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1163 chk->rec.data.stream_seq != 1164 prev->rec.data.stream_seq) { 1165 /* 1166 * Huh, need the correct STR here, 1167 * they must be the same. 1168 */ 1169 #ifdef SCTP_DEBUG 1170 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1171 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1172 chk->rec.data.stream_seq, 1173 prev->rec.data.stream_seq); 1174 } 1175 #endif 1176 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1177 0, M_DONTWAIT, 1, MT_DATA); 1178 if (oper) { 1179 struct sctp_paramhdr *ph; 1180 uint32_t *ippp; 1181 1182 oper->m_len = 1183 sizeof(struct sctp_paramhdr) + 1184 (3 * sizeof(uint32_t)); 1185 ph = mtod(oper, 1186 struct sctp_paramhdr *); 1187 ph->param_type = 1188 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1189 ph->param_length = 1190 htons(oper->m_len); 1191 ippp = (uint32_t *) (ph + 1); 1192 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1193 ippp++; 1194 *ippp = chk->rec.data.TSN_seq; 1195 ippp++; 1196 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1197 } 1198 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1199 sctp_abort_an_association(stcb->sctp_ep, 1200 stcb, SCTP_PEER_FAULTY, oper); 1201 1202 *abort_flag = 1; 1203 return; 1204 } 1205 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1206 SCTP_DATA_LAST_FRAG) { 1207 /* Insert chk MUST be a FIRST */ 1208 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1209 SCTP_DATA_FIRST_FRAG) { 1210 #ifdef SCTP_DEBUG 1211 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1212 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1213 } 1214 #endif 1215 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1216 0, M_DONTWAIT, 1, MT_DATA); 1217 if (oper) { 1218 struct sctp_paramhdr *ph; 1219 uint32_t *ippp; 1220 1221 oper->m_len = 1222 sizeof(struct sctp_paramhdr) + 1223 (3 * sizeof(uint32_t)); 1224 ph = mtod(oper, 1225 struct sctp_paramhdr *); 1226 ph->param_type = 1227 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1228 ph->param_length = 1229 htons(oper->m_len); 1230 ippp = (uint32_t *) (ph + 1); 1231 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1232 ippp++; 1233 *ippp = chk->rec.data.TSN_seq; 1234 ippp++; 1235 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1236 1237 } 1238 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1239 sctp_abort_an_association(stcb->sctp_ep, 1240 stcb, SCTP_PEER_FAULTY, oper); 1241 1242 *abort_flag = 1; 1243 return; 1244 } 1245 } 1246 } 1247 } 1248 if (next) { 1249 post_tsn = chk->rec.data.TSN_seq + 1; 1250 if (post_tsn == next->rec.data.TSN_seq) { 1251 /* 1252 * Ok the one I am inserting ahead of is my NEXT 1253 * one. A bit of valdiation here. 1254 */ 1255 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1256 /* Insert chk MUST be a last fragment */ 1257 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1258 != SCTP_DATA_LAST_FRAG) { 1259 #ifdef SCTP_DEBUG 1260 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1261 printf("Next chk - Next is FIRST, we must be LAST\n"); 1262 printf("Gak, Evil plot, its not a last!\n"); 1263 } 1264 #endif 1265 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1266 0, M_DONTWAIT, 1, MT_DATA); 1267 if (oper) { 1268 struct sctp_paramhdr *ph; 1269 uint32_t *ippp; 1270 1271 oper->m_len = 1272 sizeof(struct sctp_paramhdr) + 1273 (3 * sizeof(uint32_t)); 1274 ph = mtod(oper, 1275 struct sctp_paramhdr *); 1276 ph->param_type = 1277 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1278 ph->param_length = 1279 htons(oper->m_len); 1280 ippp = (uint32_t *) (ph + 1); 1281 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1282 ippp++; 1283 *ippp = chk->rec.data.TSN_seq; 1284 ippp++; 1285 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1286 } 1287 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1288 sctp_abort_an_association(stcb->sctp_ep, 1289 stcb, SCTP_PEER_FAULTY, oper); 1290 1291 *abort_flag = 1; 1292 return; 1293 } 1294 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1295 SCTP_DATA_MIDDLE_FRAG || 1296 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1297 SCTP_DATA_LAST_FRAG) { 1298 /* 1299 * Insert chk CAN be MIDDLE or FIRST NOT 1300 * LAST 1301 */ 1302 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1303 SCTP_DATA_LAST_FRAG) { 1304 #ifdef SCTP_DEBUG 1305 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1306 printf("Next chk - Next is a MIDDLE/LAST\n"); 1307 printf("Gak, Evil plot, new prev chunk is a LAST\n"); 1308 } 1309 #endif 1310 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1311 0, M_DONTWAIT, 1, MT_DATA); 1312 if (oper) { 1313 struct sctp_paramhdr *ph; 1314 uint32_t *ippp; 1315 1316 oper->m_len = 1317 sizeof(struct sctp_paramhdr) + 1318 (3 * sizeof(uint32_t)); 1319 ph = mtod(oper, 1320 struct sctp_paramhdr *); 1321 ph->param_type = 1322 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1323 ph->param_length = 1324 htons(oper->m_len); 1325 ippp = (uint32_t *) (ph + 1); 1326 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1327 ippp++; 1328 *ippp = chk->rec.data.TSN_seq; 1329 ippp++; 1330 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1331 1332 } 1333 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1334 sctp_abort_an_association(stcb->sctp_ep, 1335 stcb, SCTP_PEER_FAULTY, oper); 1336 1337 *abort_flag = 1; 1338 return; 1339 } 1340 if (chk->rec.data.stream_number != 1341 next->rec.data.stream_number) { 1342 /* 1343 * Huh, need the correct STR here, 1344 * they must be the same. 1345 */ 1346 #ifdef SCTP_DEBUG 1347 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1348 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1349 chk->rec.data.stream_number, 1350 next->rec.data.stream_number); 1351 } 1352 #endif 1353 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1354 0, M_DONTWAIT, 1, MT_DATA); 1355 if (oper) { 1356 struct sctp_paramhdr *ph; 1357 uint32_t *ippp; 1358 1359 oper->m_len = 1360 sizeof(struct sctp_paramhdr) + 1361 (3 * sizeof(uint32_t)); 1362 ph = mtod(oper, 1363 struct sctp_paramhdr *); 1364 ph->param_type = 1365 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1366 ph->param_length = 1367 htons(oper->m_len); 1368 ippp = (uint32_t *) (ph + 1); 1369 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1370 ippp++; 1371 *ippp = chk->rec.data.TSN_seq; 1372 ippp++; 1373 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1374 1375 } 1376 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1377 sctp_abort_an_association(stcb->sctp_ep, 1378 stcb, SCTP_PEER_FAULTY, oper); 1379 1380 *abort_flag = 1; 1381 return; 1382 } 1383 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1384 chk->rec.data.stream_seq != 1385 next->rec.data.stream_seq) { 1386 /* 1387 * Huh, need the correct STR here, 1388 * they must be the same. 1389 */ 1390 #ifdef SCTP_DEBUG 1391 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1392 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1393 chk->rec.data.stream_seq, 1394 next->rec.data.stream_seq); 1395 } 1396 #endif 1397 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1398 0, M_DONTWAIT, 1, MT_DATA); 1399 if (oper) { 1400 struct sctp_paramhdr *ph; 1401 uint32_t *ippp; 1402 1403 oper->m_len = 1404 sizeof(struct sctp_paramhdr) + 1405 (3 * sizeof(uint32_t)); 1406 ph = mtod(oper, 1407 struct sctp_paramhdr *); 1408 ph->param_type = 1409 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1410 ph->param_length = 1411 htons(oper->m_len); 1412 ippp = (uint32_t *) (ph + 1); 1413 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1414 ippp++; 1415 *ippp = chk->rec.data.TSN_seq; 1416 ippp++; 1417 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1418 } 1419 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1420 sctp_abort_an_association(stcb->sctp_ep, 1421 stcb, SCTP_PEER_FAULTY, oper); 1422 1423 *abort_flag = 1; 1424 return; 1425 1426 } 1427 } 1428 } 1429 } 1430 /* Do we need to do some delivery? check */ 1431 sctp_deliver_reasm_check(stcb, asoc); 1432 } 1433 1434 /* 1435 * This is an unfortunate routine. It checks to make sure a evil guy is not 1436 * stuffing us full of bad packet fragments. A broken peer could also do this 1437 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1438 * :< more cycles. 1439 */ 1440 static int 1441 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1442 uint32_t TSN_seq) 1443 { 1444 struct sctp_tmit_chunk *at; 1445 uint32_t tsn_est; 1446 1447 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1448 if (compare_with_wrap(TSN_seq, 1449 at->rec.data.TSN_seq, MAX_TSN)) { 1450 /* is it one bigger? */ 1451 tsn_est = at->rec.data.TSN_seq + 1; 1452 if (tsn_est == TSN_seq) { 1453 /* yep. It better be a last then */ 1454 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1455 SCTP_DATA_LAST_FRAG) { 1456 /* 1457 * Ok this guy belongs next to a guy 1458 * that is NOT last, it should be a 1459 * middle/last, not a complete 1460 * chunk. 1461 */ 1462 return (1); 1463 } else { 1464 /* 1465 * This guy is ok since its a LAST 1466 * and the new chunk is a fully 1467 * self- contained one. 1468 */ 1469 return (0); 1470 } 1471 } 1472 } else if (TSN_seq == at->rec.data.TSN_seq) { 1473 /* Software error since I have a dup? */ 1474 return (1); 1475 } else { 1476 /* 1477 * Ok, 'at' is larger than new chunk but does it 1478 * need to be right before it. 1479 */ 1480 tsn_est = TSN_seq + 1; 1481 if (tsn_est == at->rec.data.TSN_seq) { 1482 /* Yep, It better be a first */ 1483 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1484 SCTP_DATA_FIRST_FRAG) { 1485 return (1); 1486 } else { 1487 return (0); 1488 } 1489 } 1490 } 1491 } 1492 return (0); 1493 } 1494 1495 1496 extern unsigned int sctp_max_chunks_on_queue; 1497 static int 1498 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1499 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1500 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1501 int *break_flag, int last_chunk) 1502 { 1503 /* Process a data chunk */ 1504 /* struct sctp_tmit_chunk *chk; */ 1505 struct sctp_tmit_chunk *chk; 1506 uint32_t tsn, gap; 1507 struct mbuf *dmbuf; 1508 int indx, the_len; 1509 uint16_t strmno, strmseq; 1510 struct mbuf *oper; 1511 struct sctp_queued_to_read *control; 1512 1513 chk = NULL; 1514 tsn = ntohl(ch->dp.tsn); 1515 #ifdef SCTP_MAP_LOGGING 1516 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE); 1517 #endif 1518 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1519 asoc->cumulative_tsn == tsn) { 1520 /* It is a duplicate */ 1521 SCTP_STAT_INCR(sctps_recvdupdata); 1522 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1523 /* Record a dup for the next outbound sack */ 1524 asoc->dup_tsns[asoc->numduptsns] = tsn; 1525 asoc->numduptsns++; 1526 } 1527 return (0); 1528 } 1529 /* Calculate the number of TSN's between the base and this TSN */ 1530 if (tsn >= asoc->mapping_array_base_tsn) { 1531 gap = tsn - asoc->mapping_array_base_tsn; 1532 } else { 1533 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1534 } 1535 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1536 /* Can't hold the bit in the mapping at max array, toss it */ 1537 return (0); 1538 } 1539 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1540 if (sctp_expand_mapping_array(asoc)) { 1541 /* Can't expand, drop it */ 1542 return (0); 1543 } 1544 } 1545 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1546 *high_tsn = tsn; 1547 } 1548 /* See if we have received this one already */ 1549 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1550 SCTP_STAT_INCR(sctps_recvdupdata); 1551 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1552 /* Record a dup for the next outbound sack */ 1553 asoc->dup_tsns[asoc->numduptsns] = tsn; 1554 asoc->numduptsns++; 1555 } 1556 if (!callout_pending(&asoc->dack_timer.timer)) { 1557 /* 1558 * By starting the timer we assure that we WILL sack 1559 * at the end of the packet when sctp_sack_check 1560 * gets called. 1561 */ 1562 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, 1563 stcb, NULL); 1564 } 1565 return (0); 1566 } 1567 /* 1568 * Check to see about the GONE flag, duplicates would cause a sack 1569 * to be sent up above 1570 */ 1571 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1572 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1573 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1574 ) { 1575 /* 1576 * wait a minute, this guy is gone, there is no longer a 1577 * receiver. Send peer an ABORT! 1578 */ 1579 struct mbuf *op_err; 1580 1581 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1582 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 1583 *abort_flag = 1; 1584 return (0); 1585 } 1586 /* 1587 * Now before going further we see if there is room. If NOT then we 1588 * MAY let one through only IF this TSN is the one we are waiting 1589 * for on a partial delivery API. 1590 */ 1591 1592 /* now do the tests */ 1593 if (((asoc->cnt_on_all_streams + 1594 asoc->cnt_on_reasm_queue + 1595 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1596 (((int)asoc->my_rwnd) <= 0)) { 1597 /* 1598 * When we have NO room in the rwnd we check to make sure 1599 * the reader is doing its job... 1600 */ 1601 if (stcb->sctp_socket->so_rcv.sb_cc) { 1602 /* some to read, wake-up */ 1603 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1604 } 1605 /* now is it in the mapping array of what we have accepted? */ 1606 if (compare_with_wrap(tsn, 1607 asoc->highest_tsn_inside_map, MAX_TSN)) { 1608 1609 /* Nope not in the valid range dump it */ 1610 #ifdef SCTP_DEBUG 1611 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1612 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n", 1613 (u_long)tsn, (u_long)asoc->my_rwnd, 1614 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)); 1615 1616 } 1617 #endif 1618 sctp_set_rwnd(stcb, asoc); 1619 if ((asoc->cnt_on_all_streams + 1620 asoc->cnt_on_reasm_queue + 1621 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1622 SCTP_STAT_INCR(sctps_datadropchklmt); 1623 } else { 1624 SCTP_STAT_INCR(sctps_datadroprwnd); 1625 } 1626 indx = *break_flag; 1627 *break_flag = 1; 1628 return (0); 1629 } 1630 } 1631 strmno = ntohs(ch->dp.stream_id); 1632 if (strmno >= asoc->streamincnt) { 1633 struct sctp_paramhdr *phdr; 1634 struct mbuf *mb; 1635 1636 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1637 1, M_DONTWAIT, 1, MT_DATA); 1638 if (mb != NULL) { 1639 /* add some space up front so prepend will work well */ 1640 mb->m_data += sizeof(struct sctp_chunkhdr); 1641 phdr = mtod(mb, struct sctp_paramhdr *); 1642 /* 1643 * Error causes are just param's and this one has 1644 * two back to back phdr, one with the error type 1645 * and size, the other with the streamid and a rsvd 1646 */ 1647 mb->m_pkthdr.len = mb->m_len = 1648 (sizeof(struct sctp_paramhdr) * 2); 1649 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1650 phdr->param_length = 1651 htons(sizeof(struct sctp_paramhdr) * 2); 1652 phdr++; 1653 /* We insert the stream in the type field */ 1654 phdr->param_type = ch->dp.stream_id; 1655 /* And set the length to 0 for the rsvd field */ 1656 phdr->param_length = 0; 1657 sctp_queue_op_err(stcb, mb); 1658 } 1659 SCTP_STAT_INCR(sctps_badsid); 1660 return (0); 1661 } 1662 /* 1663 * Before we continue lets validate that we are not being fooled by 1664 * an evil attacker. We can only have 4k chunks based on our TSN 1665 * spread allowed by the mapping array 512 * 8 bits, so there is no 1666 * way our stream sequence numbers could have wrapped. We of course 1667 * only validate the FIRST fragment so the bit must be set. 1668 */ 1669 strmseq = ntohs(ch->dp.stream_sequence); 1670 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) && 1671 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1672 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1673 strmseq, MAX_SEQ) || 1674 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1675 /* The incoming sseq is behind where we last delivered? */ 1676 #ifdef SCTP_DEBUG 1677 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1678 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1679 strmseq, 1680 asoc->strmin[strmno].last_sequence_delivered); 1681 } 1682 #endif 1683 /* 1684 * throw it in the stream so it gets cleaned up in 1685 * association destruction 1686 */ 1687 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1688 0, M_DONTWAIT, 1, MT_DATA); 1689 if (oper) { 1690 struct sctp_paramhdr *ph; 1691 uint32_t *ippp; 1692 1693 oper->m_len = sizeof(struct sctp_paramhdr) + 1694 (3 * sizeof(uint32_t)); 1695 ph = mtod(oper, struct sctp_paramhdr *); 1696 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1697 ph->param_length = htons(oper->m_len); 1698 ippp = (uint32_t *) (ph + 1); 1699 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1700 ippp++; 1701 *ippp = tsn; 1702 ippp++; 1703 *ippp = ((strmno << 16) | strmseq); 1704 1705 } 1706 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1707 sctp_abort_an_association(stcb->sctp_ep, stcb, 1708 SCTP_PEER_FAULTY, oper); 1709 *abort_flag = 1; 1710 return (0); 1711 } 1712 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1713 if (last_chunk == 0) { 1714 dmbuf = sctp_m_copym(*m, 1715 (offset + sizeof(struct sctp_data_chunk)), 1716 the_len, M_DONTWAIT); 1717 #ifdef SCTP_MBUF_LOGGING 1718 { 1719 struct mbuf *mat; 1720 1721 mat = dmbuf; 1722 while (mat) { 1723 if (mat->m_flags & M_EXT) { 1724 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1725 } 1726 mat = mat->m_next; 1727 } 1728 } 1729 #endif 1730 } else { 1731 /* We can steal the last chunk */ 1732 dmbuf = *m; 1733 /* lop off the top part */ 1734 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1735 if (dmbuf->m_pkthdr.len > the_len) { 1736 /* Trim the end round bytes off too */ 1737 m_adj(dmbuf, -(dmbuf->m_pkthdr.len - the_len)); 1738 } 1739 } 1740 if (dmbuf == NULL) { 1741 SCTP_STAT_INCR(sctps_nomem); 1742 return (0); 1743 } 1744 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1745 asoc->fragmented_delivery_inprogress == 0 && 1746 TAILQ_EMPTY(&asoc->resetHead) && 1747 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) || 1748 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1749 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1750 /* Candidate for express delivery */ 1751 /* 1752 * Its not fragmented, No PD-API is up, Nothing in the 1753 * delivery queue, Its un-ordered OR ordered and the next to 1754 * deliver AND nothing else is stuck on the stream queue, 1755 * And there is room for it in the socket buffer. Lets just 1756 * stuff it up the buffer.... 1757 */ 1758 1759 /* It would be nice to avoid this copy if we could :< */ 1760 sctp_alloc_a_readq(stcb, control); 1761 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1762 ch->dp.protocol_id, 1763 stcb->asoc.context, 1764 strmno, strmseq, 1765 ch->ch.chunk_flags, 1766 dmbuf); 1767 if (control == NULL) { 1768 goto failed_express_del; 1769 } 1770 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1); 1771 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1772 /* for ordered, bump what we delivered */ 1773 asoc->strmin[strmno].last_sequence_delivered++; 1774 } 1775 SCTP_STAT_INCR(sctps_recvexpress); 1776 #ifdef SCTP_STR_LOGGING 1777 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1778 SCTP_STR_LOG_FROM_EXPRS_DEL); 1779 #endif 1780 control = NULL; 1781 goto finish_express_del; 1782 } 1783 failed_express_del: 1784 /* If we reach here this is a new chunk */ 1785 chk = NULL; 1786 control = NULL; 1787 /* Express for fragmented delivery? */ 1788 if ((asoc->fragmented_delivery_inprogress) && 1789 (stcb->asoc.control_pdapi) && 1790 (asoc->str_of_pdapi == strmno) && 1791 (asoc->ssn_of_pdapi == strmseq) 1792 ) { 1793 control = stcb->asoc.control_pdapi; 1794 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1795 /* Can't be another first? */ 1796 goto failed_pdapi_express_del; 1797 } 1798 if (tsn == (control->sinfo_tsn + 1)) { 1799 /* Yep, we can add it on */ 1800 int end = 0; 1801 uint32_t cumack; 1802 1803 if (ch->ch.chunk_flags & SCTP_DATA_LAST_FRAG) { 1804 end = 1; 1805 } 1806 cumack = asoc->cumulative_tsn; 1807 if ((cumack + 1) == tsn) 1808 cumack = tsn; 1809 1810 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1811 tsn, 1812 &stcb->sctp_socket->so_rcv)) { 1813 printf("Append fails end:%d\n", end); 1814 goto failed_pdapi_express_del; 1815 } 1816 SCTP_STAT_INCR(sctps_recvexpressm); 1817 control->sinfo_tsn = tsn; 1818 asoc->tsn_last_delivered = tsn; 1819 asoc->fragment_flags = ch->ch.chunk_flags; 1820 asoc->tsn_of_pdapi_last_delivered = tsn; 1821 asoc->last_flags_delivered = ch->ch.chunk_flags; 1822 asoc->last_strm_seq_delivered = strmseq; 1823 asoc->last_strm_no_delivered = strmno; 1824 1825 if (end) { 1826 /* clean up the flags and such */ 1827 asoc->fragmented_delivery_inprogress = 0; 1828 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1829 asoc->strmin[strmno].last_sequence_delivered++; 1830 } 1831 stcb->asoc.control_pdapi = NULL; 1832 } 1833 control = NULL; 1834 goto finish_express_del; 1835 } 1836 } 1837 failed_pdapi_express_del: 1838 control = NULL; 1839 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1840 sctp_alloc_a_chunk(stcb, chk); 1841 if (chk == NULL) { 1842 /* No memory so we drop the chunk */ 1843 SCTP_STAT_INCR(sctps_nomem); 1844 if (last_chunk == 0) { 1845 /* we copied it, free the copy */ 1846 sctp_m_freem(dmbuf); 1847 } 1848 return (0); 1849 } 1850 chk->rec.data.TSN_seq = tsn; 1851 chk->no_fr_allowed = 0; 1852 chk->rec.data.stream_seq = strmseq; 1853 chk->rec.data.stream_number = strmno; 1854 chk->rec.data.payloadtype = ch->dp.protocol_id; 1855 chk->rec.data.context = stcb->asoc.context; 1856 chk->rec.data.doing_fast_retransmit = 0; 1857 chk->rec.data.rcv_flags = ch->ch.chunk_flags; 1858 chk->asoc = asoc; 1859 chk->send_size = the_len; 1860 chk->whoTo = net; 1861 atomic_add_int(&net->ref_count, 1); 1862 chk->data = dmbuf; 1863 } else { 1864 sctp_alloc_a_readq(stcb, control); 1865 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1866 ch->dp.protocol_id, 1867 stcb->asoc.context, 1868 strmno, strmseq, 1869 ch->ch.chunk_flags, 1870 dmbuf); 1871 if (control == NULL) { 1872 /* No memory so we drop the chunk */ 1873 SCTP_STAT_INCR(sctps_nomem); 1874 if (last_chunk == 0) { 1875 /* we copied it, free the copy */ 1876 sctp_m_freem(dmbuf); 1877 } 1878 return (0); 1879 } 1880 control->length = the_len; 1881 } 1882 1883 /* Mark it as received */ 1884 /* Now queue it where it belongs */ 1885 if (control != NULL) { 1886 /* First a sanity check */ 1887 if (asoc->fragmented_delivery_inprogress) { 1888 /* 1889 * Ok, we have a fragmented delivery in progress if 1890 * this chunk is next to deliver OR belongs in our 1891 * view to the reassembly, the peer is evil or 1892 * broken. 1893 */ 1894 uint32_t estimate_tsn; 1895 1896 estimate_tsn = asoc->tsn_last_delivered + 1; 1897 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1898 (estimate_tsn == control->sinfo_tsn)) { 1899 /* Evil/Broke peer */ 1900 sctp_m_freem(control->data); 1901 control->data = NULL; 1902 sctp_free_remote_addr(control->whoFrom); 1903 sctp_free_a_readq(stcb, control); 1904 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1905 0, M_DONTWAIT, 1, MT_DATA); 1906 if (oper) { 1907 struct sctp_paramhdr *ph; 1908 uint32_t *ippp; 1909 1910 oper->m_len = 1911 sizeof(struct sctp_paramhdr) + 1912 (3 * sizeof(uint32_t)); 1913 ph = mtod(oper, struct sctp_paramhdr *); 1914 ph->param_type = 1915 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1916 ph->param_length = htons(oper->m_len); 1917 ippp = (uint32_t *) (ph + 1); 1918 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1919 ippp++; 1920 *ippp = tsn; 1921 ippp++; 1922 *ippp = ((strmno << 16) | strmseq); 1923 } 1924 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1925 sctp_abort_an_association(stcb->sctp_ep, stcb, 1926 SCTP_PEER_FAULTY, oper); 1927 1928 *abort_flag = 1; 1929 return (0); 1930 } else { 1931 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1932 sctp_m_freem(control->data); 1933 control->data = NULL; 1934 sctp_free_remote_addr(control->whoFrom); 1935 sctp_free_a_readq(stcb, control); 1936 1937 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1938 0, M_DONTWAIT, 1, MT_DATA); 1939 if (oper) { 1940 struct sctp_paramhdr *ph; 1941 uint32_t *ippp; 1942 1943 oper->m_len = 1944 sizeof(struct sctp_paramhdr) + 1945 (3 * sizeof(uint32_t)); 1946 ph = mtod(oper, 1947 struct sctp_paramhdr *); 1948 ph->param_type = 1949 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1950 ph->param_length = 1951 htons(oper->m_len); 1952 ippp = (uint32_t *) (ph + 1); 1953 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 1954 ippp++; 1955 *ippp = tsn; 1956 ippp++; 1957 *ippp = ((strmno << 16) | strmseq); 1958 } 1959 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1960 sctp_abort_an_association(stcb->sctp_ep, 1961 stcb, SCTP_PEER_FAULTY, oper); 1962 1963 *abort_flag = 1; 1964 return (0); 1965 } 1966 } 1967 } else { 1968 /* No PDAPI running */ 1969 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1970 /* 1971 * Reassembly queue is NOT empty validate 1972 * that this tsn does not need to be in 1973 * reasembly queue. If it does then our peer 1974 * is broken or evil. 1975 */ 1976 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1977 sctp_m_freem(control->data); 1978 control->data = NULL; 1979 sctp_free_remote_addr(control->whoFrom); 1980 sctp_free_a_readq(stcb, control); 1981 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1982 0, M_DONTWAIT, 1, MT_DATA); 1983 if (oper) { 1984 struct sctp_paramhdr *ph; 1985 uint32_t *ippp; 1986 1987 oper->m_len = 1988 sizeof(struct sctp_paramhdr) + 1989 (3 * sizeof(uint32_t)); 1990 ph = mtod(oper, 1991 struct sctp_paramhdr *); 1992 ph->param_type = 1993 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1994 ph->param_length = 1995 htons(oper->m_len); 1996 ippp = (uint32_t *) (ph + 1); 1997 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 1998 ippp++; 1999 *ippp = tsn; 2000 ippp++; 2001 *ippp = ((strmno << 16) | strmseq); 2002 } 2003 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 2004 sctp_abort_an_association(stcb->sctp_ep, 2005 stcb, SCTP_PEER_FAULTY, oper); 2006 2007 *abort_flag = 1; 2008 return (0); 2009 } 2010 } 2011 } 2012 /* ok, if we reach here we have passed the sanity checks */ 2013 if (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) { 2014 /* queue directly into socket buffer */ 2015 sctp_add_to_readq(stcb->sctp_ep, stcb, 2016 control, 2017 &stcb->sctp_socket->so_rcv, 1); 2018 } else { 2019 /* 2020 * Special check for when streams are resetting. We 2021 * could be more smart about this and check the 2022 * actual stream to see if it is not being reset.. 2023 * that way we would not create a HOLB when amongst 2024 * streams being reset and those not being reset. 2025 * 2026 * We take complete messages that have a stream reset 2027 * intervening (aka the TSN is after where our 2028 * cum-ack needs to be) off and put them on a 2029 * pending_reply_queue. The reassembly ones we do 2030 * not have to worry about since they are all sorted 2031 * and proceessed by TSN order. It is only the 2032 * singletons I must worry about. 2033 */ 2034 struct sctp_stream_reset_list *liste; 2035 2036 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2037 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) || 2038 (tsn == ntohl(liste->tsn))) 2039 ) { 2040 /* 2041 * yep its past where we need to reset... go 2042 * ahead and queue it. 2043 */ 2044 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2045 /* first one on */ 2046 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2047 } else { 2048 struct sctp_queued_to_read *ctlOn; 2049 unsigned char inserted = 0; 2050 2051 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2052 while (ctlOn) { 2053 if (compare_with_wrap(control->sinfo_tsn, 2054 ctlOn->sinfo_tsn, MAX_TSN)) { 2055 ctlOn = TAILQ_NEXT(ctlOn, next); 2056 } else { 2057 /* found it */ 2058 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2059 inserted = 1; 2060 break; 2061 } 2062 } 2063 if (inserted == 0) { 2064 /* 2065 * must be put at end, use 2066 * prevP (all setup from 2067 * loop) to setup nextP. 2068 */ 2069 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2070 } 2071 } 2072 } else { 2073 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2074 if (*abort_flag) { 2075 return (0); 2076 } 2077 } 2078 } 2079 } else { 2080 /* Into the re-assembly queue */ 2081 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2082 if (*abort_flag) { 2083 /* 2084 * the assoc is now gone and chk was put onto the 2085 * reasm queue, which has all been freed. 2086 */ 2087 *m = NULL; 2088 return (0); 2089 } 2090 } 2091 finish_express_del: 2092 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2093 /* we have a new high score */ 2094 asoc->highest_tsn_inside_map = tsn; 2095 #ifdef SCTP_MAP_LOGGING 2096 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2097 #endif 2098 } 2099 if (tsn == (asoc->cumulative_tsn + 1)) { 2100 /* Update cum-ack */ 2101 asoc->cumulative_tsn = tsn; 2102 } 2103 if (last_chunk) { 2104 *m = NULL; 2105 } 2106 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) { 2107 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2108 } else { 2109 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2110 } 2111 SCTP_STAT_INCR(sctps_recvdata); 2112 /* Set it present please */ 2113 #ifdef SCTP_STR_LOGGING 2114 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2115 #endif 2116 #ifdef SCTP_MAP_LOGGING 2117 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2118 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2119 #endif 2120 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2121 return (1); 2122 } 2123 2124 int8_t sctp_map_lookup_tab[256] = { 2125 -1, 0, -1, 1, -1, 0, -1, 2, 2126 -1, 0, -1, 1, -1, 0, -1, 3, 2127 -1, 0, -1, 1, -1, 0, -1, 2, 2128 -1, 0, -1, 1, -1, 0, -1, 4, 2129 -1, 0, -1, 1, -1, 0, -1, 2, 2130 -1, 0, -1, 1, -1, 0, -1, 3, 2131 -1, 0, -1, 1, -1, 0, -1, 2, 2132 -1, 0, -1, 1, -1, 0, -1, 5, 2133 -1, 0, -1, 1, -1, 0, -1, 2, 2134 -1, 0, -1, 1, -1, 0, -1, 3, 2135 -1, 0, -1, 1, -1, 0, -1, 2, 2136 -1, 0, -1, 1, -1, 0, -1, 4, 2137 -1, 0, -1, 1, -1, 0, -1, 2, 2138 -1, 0, -1, 1, -1, 0, -1, 3, 2139 -1, 0, -1, 1, -1, 0, -1, 2, 2140 -1, 0, -1, 1, -1, 0, -1, 6, 2141 -1, 0, -1, 1, -1, 0, -1, 2, 2142 -1, 0, -1, 1, -1, 0, -1, 3, 2143 -1, 0, -1, 1, -1, 0, -1, 2, 2144 -1, 0, -1, 1, -1, 0, -1, 4, 2145 -1, 0, -1, 1, -1, 0, -1, 2, 2146 -1, 0, -1, 1, -1, 0, -1, 3, 2147 -1, 0, -1, 1, -1, 0, -1, 2, 2148 -1, 0, -1, 1, -1, 0, -1, 5, 2149 -1, 0, -1, 1, -1, 0, -1, 2, 2150 -1, 0, -1, 1, -1, 0, -1, 3, 2151 -1, 0, -1, 1, -1, 0, -1, 2, 2152 -1, 0, -1, 1, -1, 0, -1, 4, 2153 -1, 0, -1, 1, -1, 0, -1, 2, 2154 -1, 0, -1, 1, -1, 0, -1, 3, 2155 -1, 0, -1, 1, -1, 0, -1, 2, 2156 -1, 0, -1, 1, -1, 0, -1, 7, 2157 }; 2158 2159 2160 void 2161 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2162 { 2163 /* 2164 * Now we also need to check the mapping array in a couple of ways. 2165 * 1) Did we move the cum-ack point? 2166 */ 2167 struct sctp_association *asoc; 2168 int i, at; 2169 int all_ones; 2170 int slide_from, slide_end, lgap, distance; 2171 2172 #ifdef SCTP_MAP_LOGGING 2173 uint32_t old_cumack, old_base, old_highest; 2174 unsigned char aux_array[64]; 2175 2176 #endif 2177 struct sctp_stream_reset_list *liste; 2178 2179 asoc = &stcb->asoc; 2180 at = 0; 2181 2182 #ifdef SCTP_MAP_LOGGING 2183 old_cumack = asoc->cumulative_tsn; 2184 old_base = asoc->mapping_array_base_tsn; 2185 old_highest = asoc->highest_tsn_inside_map; 2186 if (asoc->mapping_array_size < 64) 2187 memcpy(aux_array, asoc->mapping_array, 2188 asoc->mapping_array_size); 2189 else 2190 memcpy(aux_array, asoc->mapping_array, 64); 2191 #endif 2192 2193 /* 2194 * We could probably improve this a small bit by calculating the 2195 * offset of the current cum-ack as the starting point. 2196 */ 2197 all_ones = 1; 2198 at = 0; 2199 for (i = 0; i < stcb->asoc.mapping_array_size; i++) { 2200 if (asoc->mapping_array[i] == 0xff) { 2201 at += 8; 2202 } else { 2203 /* there is a 0 bit */ 2204 all_ones = 0; 2205 at += sctp_map_lookup_tab[asoc->mapping_array[i]]; 2206 break; 2207 } 2208 } 2209 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + at; 2210 /* at is one off, since in the table a embedded -1 is present */ 2211 at++; 2212 2213 if (compare_with_wrap(asoc->cumulative_tsn, 2214 asoc->highest_tsn_inside_map, 2215 MAX_TSN)) { 2216 #ifdef INVARIANTS 2217 panic("huh, cumack greater than high-tsn in map"); 2218 #else 2219 printf("huh, cumack greater than high-tsn in map - should panic?\n"); 2220 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2221 #endif 2222 } 2223 if (all_ones || 2224 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) { 2225 /* The complete array was completed by a single FR */ 2226 /* higest becomes the cum-ack */ 2227 int clr; 2228 2229 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2230 /* clear the array */ 2231 if (all_ones) 2232 clr = asoc->mapping_array_size; 2233 else { 2234 clr = (at >> 3) + 1; 2235 /* 2236 * this should be the allones case but just in case 2237 * :> 2238 */ 2239 if (clr > asoc->mapping_array_size) 2240 clr = asoc->mapping_array_size; 2241 } 2242 memset(asoc->mapping_array, 0, clr); 2243 /* base becomes one ahead of the cum-ack */ 2244 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2245 #ifdef SCTP_MAP_LOGGING 2246 sctp_log_map(old_base, old_cumack, old_highest, 2247 SCTP_MAP_PREPARE_SLIDE); 2248 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2249 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2250 #endif 2251 } else if (at >= 8) { 2252 /* we can slide the mapping array down */ 2253 /* Calculate the new byte postion we can move down */ 2254 slide_from = at >> 3; 2255 /* 2256 * now calculate the ceiling of the move using our highest 2257 * TSN value 2258 */ 2259 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2260 lgap = asoc->highest_tsn_inside_map - 2261 asoc->mapping_array_base_tsn; 2262 } else { 2263 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2264 asoc->highest_tsn_inside_map + 1; 2265 } 2266 slide_end = lgap >> 3; 2267 if (slide_end < slide_from) { 2268 panic("impossible slide"); 2269 } 2270 distance = (slide_end - slide_from) + 1; 2271 #ifdef SCTP_MAP_LOGGING 2272 sctp_log_map(old_base, old_cumack, old_highest, 2273 SCTP_MAP_PREPARE_SLIDE); 2274 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2275 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2276 #endif 2277 if (distance + slide_from > asoc->mapping_array_size || 2278 distance < 0) { 2279 /* 2280 * Here we do NOT slide forward the array so that 2281 * hopefully when more data comes in to fill it up 2282 * we will be able to slide it forward. Really I 2283 * don't think this should happen :-0 2284 */ 2285 2286 #ifdef SCTP_MAP_LOGGING 2287 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2288 (uint32_t) asoc->mapping_array_size, 2289 SCTP_MAP_SLIDE_NONE); 2290 #endif 2291 } else { 2292 int ii; 2293 2294 for (ii = 0; ii < distance; ii++) { 2295 asoc->mapping_array[ii] = 2296 asoc->mapping_array[slide_from + ii]; 2297 } 2298 for (ii = distance; ii <= slide_end; ii++) { 2299 asoc->mapping_array[ii] = 0; 2300 } 2301 asoc->mapping_array_base_tsn += (slide_from << 3); 2302 #ifdef SCTP_MAP_LOGGING 2303 sctp_log_map(asoc->mapping_array_base_tsn, 2304 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2305 SCTP_MAP_SLIDE_RESULT); 2306 #endif 2307 } 2308 } 2309 /* check the special flag for stream resets */ 2310 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2311 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2312 (asoc->cumulative_tsn == liste->tsn)) 2313 ) { 2314 /* 2315 * we have finished working through the backlogged TSN's now 2316 * time to reset streams. 1: call reset function. 2: free 2317 * pending_reply space 3: distribute any chunks in 2318 * pending_reply_queue. 2319 */ 2320 struct sctp_queued_to_read *ctl; 2321 2322 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2323 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2324 SCTP_FREE(liste); 2325 liste = TAILQ_FIRST(&asoc->resetHead); 2326 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2327 if (ctl && (liste == NULL)) { 2328 /* All can be removed */ 2329 while (ctl) { 2330 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2331 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2332 if (*abort_flag) { 2333 return; 2334 } 2335 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2336 } 2337 } else if (ctl) { 2338 /* more than one in queue */ 2339 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2340 /* 2341 * if ctl->sinfo_tsn is <= liste->tsn we can 2342 * process it which is the NOT of 2343 * ctl->sinfo_tsn > liste->tsn 2344 */ 2345 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2346 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2347 if (*abort_flag) { 2348 return; 2349 } 2350 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2351 } 2352 } 2353 /* 2354 * Now service re-assembly to pick up anything that has been 2355 * held on reassembly queue? 2356 */ 2357 sctp_deliver_reasm_check(stcb, asoc); 2358 } 2359 /* 2360 * Now we need to see if we need to queue a sack or just start the 2361 * timer (if allowed). 2362 */ 2363 if (ok_to_sack) { 2364 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2365 /* 2366 * Ok special case, in SHUTDOWN-SENT case. here we 2367 * maker sure SACK timer is off and instead send a 2368 * SHUTDOWN and a SACK 2369 */ 2370 if (callout_pending(&stcb->asoc.dack_timer.timer)) { 2371 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2372 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2373 } 2374 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2375 sctp_send_sack(stcb); 2376 } else { 2377 int is_a_gap; 2378 2379 /* is there a gap now ? */ 2380 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2381 stcb->asoc.cumulative_tsn, MAX_TSN); 2382 2383 /* 2384 * CMT DAC algorithm: increase number of packets 2385 * received since last ack 2386 */ 2387 stcb->asoc.cmt_dac_pkts_rcvd++; 2388 2389 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a 2390 * sack */ 2391 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2392 * longer is one */ 2393 (stcb->asoc.numduptsns) || /* we have dup's */ 2394 (is_a_gap) || /* is still a gap */ 2395 (stcb->asoc.delayed_ack == 0) || 2396 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second 2397 * packet */ 2398 ) { 2399 2400 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) && 2401 (stcb->asoc.first_ack_sent == 1) && 2402 (stcb->asoc.numduptsns == 0) && 2403 (stcb->asoc.delayed_ack) && 2404 (!callout_pending(&stcb->asoc.dack_timer.timer))) { 2405 2406 /* 2407 * CMT DAC algorithm: With CMT, 2408 * delay acks even in the face of 2409 * 2410 * reordering. Therefore, if acks that 2411 * do not have to be sent because of 2412 * the above reasons, will be 2413 * delayed. That is, acks that would 2414 * have been sent due to gap reports 2415 * will be delayed with DAC. Start 2416 * the delayed ack timer. 2417 */ 2418 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2419 stcb->sctp_ep, stcb, NULL); 2420 } else { 2421 /* 2422 * Ok we must build a SACK since the 2423 * timer is pending, we got our 2424 * first packet OR there are gaps or 2425 * duplicates. 2426 */ 2427 stcb->asoc.first_ack_sent = 1; 2428 2429 sctp_send_sack(stcb); 2430 /* The sending will stop the timer */ 2431 } 2432 } else { 2433 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2434 stcb->sctp_ep, stcb, NULL); 2435 } 2436 } 2437 } 2438 } 2439 2440 void 2441 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2442 { 2443 struct sctp_tmit_chunk *chk; 2444 uint32_t tsize; 2445 uint16_t nxt_todel; 2446 2447 if (asoc->fragmented_delivery_inprogress) { 2448 sctp_service_reassembly(stcb, asoc); 2449 } 2450 /* Can we proceed further, i.e. the PD-API is complete */ 2451 if (asoc->fragmented_delivery_inprogress) { 2452 /* no */ 2453 return; 2454 } 2455 /* 2456 * Now is there some other chunk I can deliver from the reassembly 2457 * queue. 2458 */ 2459 chk = TAILQ_FIRST(&asoc->reasmqueue); 2460 if (chk == NULL) { 2461 asoc->size_on_reasm_queue = 0; 2462 asoc->cnt_on_reasm_queue = 0; 2463 return; 2464 } 2465 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2466 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2467 ((nxt_todel == chk->rec.data.stream_seq) || 2468 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2469 /* 2470 * Yep the first one is here. We setup to start reception, 2471 * by backing down the TSN just in case we can't deliver. 2472 */ 2473 2474 /* 2475 * Before we start though either all of the message should 2476 * be here or 1/4 the socket buffer max or nothing on the 2477 * delivery queue and something can be delivered. 2478 */ 2479 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2480 (tsize > stcb->sctp_ep->partial_delivery_point))) { 2481 asoc->fragmented_delivery_inprogress = 1; 2482 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2483 asoc->str_of_pdapi = chk->rec.data.stream_number; 2484 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2485 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2486 asoc->fragment_flags = chk->rec.data.rcv_flags; 2487 sctp_service_reassembly(stcb, asoc); 2488 } 2489 } 2490 } 2491 2492 extern int sctp_strict_data_order; 2493 2494 int 2495 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2496 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2497 struct sctp_nets *net, uint32_t * high_tsn) 2498 { 2499 struct sctp_data_chunk *ch, chunk_buf; 2500 struct sctp_association *asoc; 2501 int num_chunks = 0; /* number of control chunks processed */ 2502 int stop_proc = 0; 2503 int chk_length, break_flag, last_chunk; 2504 int abort_flag = 0, was_a_gap = 0; 2505 struct mbuf *m; 2506 2507 /* set the rwnd */ 2508 sctp_set_rwnd(stcb, &stcb->asoc); 2509 2510 m = *mm; 2511 SCTP_TCB_LOCK_ASSERT(stcb); 2512 asoc = &stcb->asoc; 2513 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2514 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2515 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 2516 /* 2517 * wait a minute, this guy is gone, there is no longer a 2518 * receiver. Send peer an ABORT! 2519 */ 2520 struct mbuf *op_err; 2521 2522 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2523 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 2524 return (2); 2525 } 2526 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2527 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2528 /* there was a gap before this data was processed */ 2529 was_a_gap = 1; 2530 } 2531 /* 2532 * setup where we got the last DATA packet from for any SACK that 2533 * may need to go out. Don't bump the net. This is done ONLY when a 2534 * chunk is assigned. 2535 */ 2536 asoc->last_data_chunk_from = net; 2537 2538 /* 2539 * Now before we proceed we must figure out if this is a wasted 2540 * cluster... i.e. it is a small packet sent in and yet the driver 2541 * underneath allocated a full cluster for it. If so we must copy it 2542 * to a smaller mbuf and free up the cluster mbuf. This will help 2543 * with cluster starvation. 2544 */ 2545 if (m->m_len < (long)MHLEN && m->m_next == NULL) { 2546 /* we only handle mbufs that are singletons.. not chains */ 2547 m = sctp_get_mbuf_for_msg(m->m_len, 1, M_DONTWAIT, 1, MT_DATA); 2548 if (m) { 2549 /* ok lets see if we can copy the data up */ 2550 caddr_t *from, *to; 2551 2552 if ((*mm)->m_flags & M_PKTHDR) { 2553 /* got to copy the header first */ 2554 M_MOVE_PKTHDR(m, (*mm)); 2555 } 2556 /* get the pointers and copy */ 2557 to = mtod(m, caddr_t *); 2558 from = mtod((*mm), caddr_t *); 2559 memcpy(to, from, (*mm)->m_len); 2560 /* copy the length and free up the old */ 2561 m->m_len = (*mm)->m_len; 2562 sctp_m_freem(*mm); 2563 /* sucess, back copy */ 2564 *mm = m; 2565 } else { 2566 /* We are in trouble in the mbuf world .. yikes */ 2567 m = *mm; 2568 } 2569 } 2570 /* get pointer to the first chunk header */ 2571 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2572 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2573 if (ch == NULL) { 2574 return (1); 2575 } 2576 /* 2577 * process all DATA chunks... 2578 */ 2579 *high_tsn = asoc->cumulative_tsn; 2580 break_flag = 0; 2581 while (stop_proc == 0) { 2582 /* validate chunk length */ 2583 chk_length = ntohs(ch->ch.chunk_length); 2584 if (length - *offset < chk_length) { 2585 /* all done, mutulated chunk */ 2586 stop_proc = 1; 2587 break; 2588 } 2589 if (ch->ch.chunk_type == SCTP_DATA) { 2590 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2591 /* 2592 * Need to send an abort since we had a 2593 * invalid data chunk. 2594 */ 2595 struct mbuf *op_err; 2596 2597 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 2598 0, M_DONTWAIT, 1, MT_DATA); 2599 2600 if (op_err) { 2601 struct sctp_paramhdr *ph; 2602 uint32_t *ippp; 2603 2604 op_err->m_len = sizeof(struct sctp_paramhdr) + 2605 (2 * sizeof(uint32_t)); 2606 ph = mtod(op_err, struct sctp_paramhdr *); 2607 ph->param_type = 2608 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2609 ph->param_length = htons(op_err->m_len); 2610 ippp = (uint32_t *) (ph + 1); 2611 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2612 ippp++; 2613 *ippp = asoc->cumulative_tsn; 2614 2615 } 2616 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2617 sctp_abort_association(inp, stcb, m, iphlen, sh, 2618 op_err); 2619 return (2); 2620 } 2621 #ifdef SCTP_AUDITING_ENABLED 2622 sctp_audit_log(0xB1, 0); 2623 #endif 2624 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2625 last_chunk = 1; 2626 } else { 2627 last_chunk = 0; 2628 } 2629 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2630 chk_length, net, high_tsn, &abort_flag, &break_flag, 2631 last_chunk)) { 2632 num_chunks++; 2633 } 2634 if (abort_flag) 2635 return (2); 2636 2637 if (break_flag) { 2638 /* 2639 * Set because of out of rwnd space and no 2640 * drop rep space left. 2641 */ 2642 stop_proc = 1; 2643 break; 2644 } 2645 } else { 2646 /* not a data chunk in the data region */ 2647 switch (ch->ch.chunk_type) { 2648 case SCTP_INITIATION: 2649 case SCTP_INITIATION_ACK: 2650 case SCTP_SELECTIVE_ACK: 2651 case SCTP_HEARTBEAT_REQUEST: 2652 case SCTP_HEARTBEAT_ACK: 2653 case SCTP_ABORT_ASSOCIATION: 2654 case SCTP_SHUTDOWN: 2655 case SCTP_SHUTDOWN_ACK: 2656 case SCTP_OPERATION_ERROR: 2657 case SCTP_COOKIE_ECHO: 2658 case SCTP_COOKIE_ACK: 2659 case SCTP_ECN_ECHO: 2660 case SCTP_ECN_CWR: 2661 case SCTP_SHUTDOWN_COMPLETE: 2662 case SCTP_AUTHENTICATION: 2663 case SCTP_ASCONF_ACK: 2664 case SCTP_PACKET_DROPPED: 2665 case SCTP_STREAM_RESET: 2666 case SCTP_FORWARD_CUM_TSN: 2667 case SCTP_ASCONF: 2668 /* 2669 * Now, what do we do with KNOWN chunks that 2670 * are NOT in the right place? 2671 * 2672 * For now, I do nothing but ignore them. We 2673 * may later want to add sysctl stuff to 2674 * switch out and do either an ABORT() or 2675 * possibly process them. 2676 */ 2677 if (sctp_strict_data_order) { 2678 struct mbuf *op_err; 2679 2680 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2681 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err); 2682 return (2); 2683 } 2684 break; 2685 default: 2686 /* unknown chunk type, use bit rules */ 2687 if (ch->ch.chunk_type & 0x40) { 2688 /* Add a error report to the queue */ 2689 struct mbuf *mm; 2690 struct sctp_paramhdr *phd; 2691 2692 mm = sctp_get_mbuf_for_msg(sizeof(*phd), 1, M_DONTWAIT, 1, MT_DATA); 2693 if (mm) { 2694 phd = mtod(mm, struct sctp_paramhdr *); 2695 /* 2696 * We cheat and use param 2697 * type since we did not 2698 * bother to define a error 2699 * cause struct. They are 2700 * the same basic format 2701 * with different names. 2702 */ 2703 phd->param_type = 2704 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2705 phd->param_length = 2706 htons(chk_length + sizeof(*phd)); 2707 mm->m_len = sizeof(*phd); 2708 mm->m_next = sctp_m_copym(m, *offset, 2709 SCTP_SIZE32(chk_length), 2710 M_DONTWAIT); 2711 if (mm->m_next) { 2712 mm->m_pkthdr.len = 2713 SCTP_SIZE32(chk_length) + 2714 sizeof(*phd); 2715 sctp_queue_op_err(stcb, mm); 2716 } else { 2717 sctp_m_freem(mm); 2718 } 2719 } 2720 } 2721 if ((ch->ch.chunk_type & 0x80) == 0) { 2722 /* discard the rest of this packet */ 2723 stop_proc = 1; 2724 } /* else skip this bad chunk and 2725 * continue... */ 2726 break; 2727 }; /* switch of chunk type */ 2728 } 2729 *offset += SCTP_SIZE32(chk_length); 2730 if ((*offset >= length) || stop_proc) { 2731 /* no more data left in the mbuf chain */ 2732 stop_proc = 1; 2733 continue; 2734 } 2735 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2736 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2737 if (ch == NULL) { 2738 *offset = length; 2739 stop_proc = 1; 2740 break; 2741 2742 } 2743 } /* while */ 2744 if (break_flag) { 2745 /* 2746 * we need to report rwnd overrun drops. 2747 */ 2748 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2749 } 2750 if (num_chunks) { 2751 /* 2752 * Did we get data, if so update the time for auto-close and 2753 * give peer credit for being alive. 2754 */ 2755 SCTP_STAT_INCR(sctps_recvpktwithdata); 2756 stcb->asoc.overall_error_count = 0; 2757 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2758 } 2759 /* now service all of the reassm queue if needed */ 2760 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2761 sctp_service_queues(stcb, asoc); 2762 2763 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2764 /* 2765 * Assure that we ack right away by making sure that a d-ack 2766 * timer is running. So the sack_check will send a sack. 2767 */ 2768 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, 2769 net); 2770 } 2771 /* Start a sack timer or QUEUE a SACK for sending */ 2772 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2773 (stcb->asoc.first_ack_sent)) { 2774 /* Everything is in order */ 2775 if (stcb->asoc.mapping_array[0] == 0xff) { 2776 /* need to do the slide */ 2777 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2778 } else { 2779 if (callout_pending(&stcb->asoc.dack_timer.timer)) { 2780 stcb->asoc.first_ack_sent = 1; 2781 callout_stop(&stcb->asoc.dack_timer.timer); 2782 sctp_send_sack(stcb); 2783 } else { 2784 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2785 stcb->sctp_ep, stcb, NULL); 2786 } 2787 } 2788 } else { 2789 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2790 } 2791 if (abort_flag) 2792 return (2); 2793 2794 return (0); 2795 } 2796 2797 static void 2798 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc, 2799 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2800 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, int num_seg, int *ecn_seg_sums) 2801 { 2802 /************************************************/ 2803 /* process fragments and update sendqueue */ 2804 /************************************************/ 2805 struct sctp_sack *sack; 2806 struct sctp_gap_ack_block *frag; 2807 struct sctp_tmit_chunk *tp1; 2808 int i; 2809 unsigned int j; 2810 2811 #ifdef SCTP_FR_LOGGING 2812 int num_frs = 0; 2813 2814 #endif 2815 uint16_t frag_strt, frag_end, primary_flag_set; 2816 u_long last_frag_high; 2817 2818 /* 2819 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2820 */ 2821 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2822 primary_flag_set = 1; 2823 } else { 2824 primary_flag_set = 0; 2825 } 2826 2827 sack = &ch->sack; 2828 frag = (struct sctp_gap_ack_block *)((caddr_t)sack + 2829 sizeof(struct sctp_sack)); 2830 tp1 = NULL; 2831 last_frag_high = 0; 2832 for (i = 0; i < num_seg; i++) { 2833 frag_strt = ntohs(frag->start); 2834 frag_end = ntohs(frag->end); 2835 /* some sanity checks on the fargment offsets */ 2836 if (frag_strt > frag_end) { 2837 /* this one is malformed, skip */ 2838 frag++; 2839 continue; 2840 } 2841 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2842 MAX_TSN)) 2843 *biggest_tsn_acked = frag_end + last_tsn; 2844 2845 /* mark acked dgs and find out the highestTSN being acked */ 2846 if (tp1 == NULL) { 2847 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2848 2849 /* save the locations of the last frags */ 2850 last_frag_high = frag_end + last_tsn; 2851 } else { 2852 /* 2853 * now lets see if we need to reset the queue due to 2854 * a out-of-order SACK fragment 2855 */ 2856 if (compare_with_wrap(frag_strt + last_tsn, 2857 last_frag_high, MAX_TSN)) { 2858 /* 2859 * if the new frag starts after the last TSN 2860 * frag covered, we are ok and this one is 2861 * beyond the last one 2862 */ 2863 ; 2864 } else { 2865 /* 2866 * ok, they have reset us, so we need to 2867 * reset the queue this will cause extra 2868 * hunting but hey, they chose the 2869 * performance hit when they failed to order 2870 * there gaps.. 2871 */ 2872 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2873 } 2874 last_frag_high = frag_end + last_tsn; 2875 } 2876 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2877 while (tp1) { 2878 #ifdef SCTP_FR_LOGGING 2879 if (tp1->rec.data.doing_fast_retransmit) 2880 num_frs++; 2881 #endif 2882 2883 /* 2884 * CMT: CUCv2 algorithm. For each TSN being 2885 * processed from the sent queue, track the 2886 * next expected pseudo-cumack, or 2887 * rtx_pseudo_cumack, if required. Separate 2888 * cumack trackers for first transmissions, 2889 * and retransmissions. 2890 */ 2891 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2892 (tp1->snd_count == 1)) { 2893 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2894 tp1->whoTo->find_pseudo_cumack = 0; 2895 } 2896 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2897 (tp1->snd_count > 1)) { 2898 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2899 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2900 } 2901 if (tp1->rec.data.TSN_seq == j) { 2902 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2903 /* 2904 * must be held until 2905 * cum-ack passes 2906 */ 2907 /* 2908 * ECN Nonce: Add the nonce 2909 * value to the sender's 2910 * nonce sum 2911 */ 2912 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 2913 /* 2914 * If it is less 2915 * than ACKED, it is 2916 * now no-longer in 2917 * flight. Higher 2918 * values may 2919 * already be set 2920 * via previous Gap 2921 * Ack Blocks... 2922 * i.e. ACKED or 2923 * MARKED. 2924 */ 2925 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2926 *biggest_newly_acked_tsn, MAX_TSN)) { 2927 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2928 } 2929 /* 2930 * CMT: SFR algo 2931 * (and HTNA) - set 2932 * saw_newack to 1 2933 * for dest being 2934 * newly acked. 2935 * update 2936 * this_sack_highest_ 2937 * n ewack if 2938 * appropriate. 2939 */ 2940 if (tp1->rec.data.chunk_was_revoked == 0) 2941 tp1->whoTo->saw_newack = 1; 2942 2943 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2944 tp1->whoTo->this_sack_highest_newack, 2945 MAX_TSN)) { 2946 tp1->whoTo->this_sack_highest_newack = 2947 tp1->rec.data.TSN_seq; 2948 } 2949 /* 2950 * CMT DAC algo: 2951 * also update 2952 * this_sack_lowest_n 2953 * e wack 2954 */ 2955 if (*this_sack_lowest_newack == 0) { 2956 #ifdef SCTP_SACK_LOGGING 2957 sctp_log_sack(*this_sack_lowest_newack, 2958 last_tsn, 2959 tp1->rec.data.TSN_seq, 2960 0, 2961 0, 2962 SCTP_LOG_TSN_ACKED); 2963 #endif 2964 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2965 } 2966 /* 2967 * CMT: CUCv2 2968 * algorithm. If 2969 * (rtx-)pseudo-cumac 2970 * k for corresp 2971 * dest is being 2972 * acked, then we 2973 * have a new 2974 * (rtx-)pseudo-cumac 2975 * k . Set 2976 * new_(rtx_)pseudo_c 2977 * u mack to TRUE so 2978 * that the cwnd for 2979 * this dest can be 2980 * updated. Also 2981 * trigger search 2982 * for the next 2983 * expected 2984 * (rtx-)pseudo-cumac 2985 * k . Separate 2986 * pseudo_cumack 2987 * trackers for 2988 * first 2989 * transmissions and 2990 * retransmissions. 2991 */ 2992 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2993 if (tp1->rec.data.chunk_was_revoked == 0) { 2994 tp1->whoTo->new_pseudo_cumack = 1; 2995 } 2996 tp1->whoTo->find_pseudo_cumack = 1; 2997 } 2998 #ifdef SCTP_CWND_LOGGING 2999 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3000 #endif 3001 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 3002 if (tp1->rec.data.chunk_was_revoked == 0) { 3003 tp1->whoTo->new_pseudo_cumack = 1; 3004 } 3005 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3006 } 3007 #ifdef SCTP_SACK_LOGGING 3008 sctp_log_sack(*biggest_newly_acked_tsn, 3009 last_tsn, 3010 tp1->rec.data.TSN_seq, 3011 frag_strt, 3012 frag_end, 3013 SCTP_LOG_TSN_ACKED); 3014 #endif 3015 #ifdef SCTP_FLIGHT_LOGGING 3016 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN, 3017 tp1->whoTo->flight_size, 3018 tp1->book_size, 3019 (uintptr_t) stcb, 3020 tp1->rec.data.TSN_seq); 3021 #endif 3022 if (tp1->whoTo->flight_size >= tp1->book_size) 3023 tp1->whoTo->flight_size -= tp1->book_size; 3024 else 3025 tp1->whoTo->flight_size = 0; 3026 if (asoc->total_flight >= tp1->book_size) { 3027 asoc->total_flight -= tp1->book_size; 3028 if (asoc->total_flight_count > 0) 3029 asoc->total_flight_count--; 3030 } else { 3031 asoc->total_flight = 0; 3032 asoc->total_flight_count = 0; 3033 } 3034 3035 tp1->whoTo->net_ack += tp1->send_size; 3036 3037 if (tp1->snd_count < 2) { 3038 /* 3039 * True 3040 * non-retran 3041 * smited 3042 * chunk */ 3043 tp1->whoTo->net_ack2 += tp1->send_size; 3044 3045 /* 3046 * update RTO 3047 * too ? */ 3048 if (tp1->do_rtt) { 3049 tp1->whoTo->RTO = 3050 sctp_calculate_rto(stcb, 3051 asoc, 3052 tp1->whoTo, 3053 &tp1->sent_rcv_time); 3054 tp1->whoTo->rto_pending = 0; 3055 tp1->do_rtt = 0; 3056 } 3057 } 3058 } 3059 if (tp1->sent <= SCTP_DATAGRAM_RESEND && 3060 tp1->sent != SCTP_DATAGRAM_UNSENT && 3061 compare_with_wrap(tp1->rec.data.TSN_seq, 3062 asoc->this_sack_highest_gap, 3063 MAX_TSN)) { 3064 asoc->this_sack_highest_gap = 3065 tp1->rec.data.TSN_seq; 3066 } 3067 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3068 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3069 #ifdef SCTP_AUDITING_ENABLED 3070 sctp_audit_log(0xB2, 3071 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3072 #endif 3073 3074 } 3075 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3076 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3077 3078 tp1->sent = SCTP_DATAGRAM_MARKED; 3079 } 3080 break; 3081 } /* if (tp1->TSN_seq == j) */ 3082 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3083 MAX_TSN)) 3084 break; 3085 3086 tp1 = TAILQ_NEXT(tp1, sctp_next); 3087 } /* end while (tp1) */ 3088 } /* end for (j = fragStart */ 3089 frag++; /* next one */ 3090 } 3091 #ifdef SCTP_FR_LOGGING 3092 /* 3093 * if (num_frs) sctp_log_fr(*biggest_tsn_acked, 3094 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3095 */ 3096 #endif 3097 } 3098 3099 static void 3100 sctp_check_for_revoked(struct sctp_association *asoc, uint32_t cumack, 3101 u_long biggest_tsn_acked) 3102 { 3103 struct sctp_tmit_chunk *tp1; 3104 int tot_revoked = 0; 3105 3106 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3107 while (tp1) { 3108 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3109 MAX_TSN)) { 3110 /* 3111 * ok this guy is either ACK or MARKED. If it is 3112 * ACKED it has been previously acked but not this 3113 * time i.e. revoked. If it is MARKED it was ACK'ed 3114 * again. 3115 */ 3116 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3117 /* it has been revoked */ 3118 tp1->sent = SCTP_DATAGRAM_SENT; 3119 tp1->rec.data.chunk_was_revoked = 1; 3120 /* 3121 * We must add this stuff back in to assure 3122 * timers and such get started. 3123 */ 3124 tp1->whoTo->flight_size += tp1->book_size; 3125 asoc->total_flight_count++; 3126 asoc->total_flight += tp1->book_size; 3127 tot_revoked++; 3128 #ifdef SCTP_SACK_LOGGING 3129 sctp_log_sack(asoc->last_acked_seq, 3130 cumack, 3131 tp1->rec.data.TSN_seq, 3132 0, 3133 0, 3134 SCTP_LOG_TSN_REVOKED); 3135 #endif 3136 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3137 /* it has been re-acked in this SACK */ 3138 tp1->sent = SCTP_DATAGRAM_ACKED; 3139 } 3140 } 3141 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3142 break; 3143 tp1 = TAILQ_NEXT(tp1, sctp_next); 3144 } 3145 if (tot_revoked > 0) { 3146 /* 3147 * Setup the ecn nonce re-sync point. We do this since once 3148 * data is revoked we begin to retransmit things, which do 3149 * NOT have the ECN bits set. This means we are now out of 3150 * sync and must wait until we get back in sync with the 3151 * peer to check ECN bits. 3152 */ 3153 tp1 = TAILQ_FIRST(&asoc->send_queue); 3154 if (tp1 == NULL) { 3155 asoc->nonce_resync_tsn = asoc->sending_seq; 3156 } else { 3157 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3158 } 3159 asoc->nonce_wait_for_ecne = 0; 3160 asoc->nonce_sum_check = 0; 3161 } 3162 } 3163 3164 extern int sctp_peer_chunk_oh; 3165 3166 static void 3167 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3168 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3169 { 3170 struct sctp_tmit_chunk *tp1; 3171 int strike_flag = 0; 3172 struct timeval now; 3173 int tot_retrans = 0; 3174 uint32_t sending_seq; 3175 struct sctp_nets *net; 3176 int num_dests_sacked = 0; 3177 3178 /* 3179 * select the sending_seq, this is either the next thing ready to be 3180 * sent but not transmitted, OR, the next seq we assign. 3181 */ 3182 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3183 if (tp1 == NULL) { 3184 sending_seq = asoc->sending_seq; 3185 } else { 3186 sending_seq = tp1->rec.data.TSN_seq; 3187 } 3188 3189 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3190 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3191 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3192 if (net->saw_newack) 3193 num_dests_sacked++; 3194 } 3195 } 3196 if (stcb->asoc.peer_supports_prsctp) { 3197 SCTP_GETTIME_TIMEVAL(&now); 3198 } 3199 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3200 while (tp1) { 3201 strike_flag = 0; 3202 if (tp1->no_fr_allowed) { 3203 /* this one had a timeout or something */ 3204 tp1 = TAILQ_NEXT(tp1, sctp_next); 3205 continue; 3206 } 3207 #ifdef SCTP_FR_LOGGING 3208 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3209 sctp_log_fr(biggest_tsn_newly_acked, 3210 tp1->rec.data.TSN_seq, 3211 tp1->sent, 3212 SCTP_FR_LOG_CHECK_STRIKE); 3213 #endif 3214 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3215 MAX_TSN) || 3216 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3217 /* done */ 3218 break; 3219 } 3220 if (stcb->asoc.peer_supports_prsctp) { 3221 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3222 /* Is it expired? */ 3223 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3224 /* Yes so drop it */ 3225 if (tp1->data != NULL) { 3226 sctp_release_pr_sctp_chunk(stcb, tp1, 3227 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3228 &asoc->sent_queue); 3229 } 3230 tp1 = TAILQ_NEXT(tp1, sctp_next); 3231 continue; 3232 } 3233 } 3234 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3235 /* Has it been retransmitted tv_sec times? */ 3236 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3237 /* Yes, so drop it */ 3238 if (tp1->data != NULL) { 3239 sctp_release_pr_sctp_chunk(stcb, tp1, 3240 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3241 &asoc->sent_queue); 3242 } 3243 tp1 = TAILQ_NEXT(tp1, sctp_next); 3244 continue; 3245 } 3246 } 3247 } 3248 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3249 asoc->this_sack_highest_gap, MAX_TSN)) { 3250 /* we are beyond the tsn in the sack */ 3251 break; 3252 } 3253 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3254 /* either a RESEND, ACKED, or MARKED */ 3255 /* skip */ 3256 tp1 = TAILQ_NEXT(tp1, sctp_next); 3257 continue; 3258 } 3259 /* 3260 * CMT : SFR algo (covers part of DAC and HTNA as well) 3261 */ 3262 if (tp1->whoTo->saw_newack == 0) { 3263 /* 3264 * No new acks were receieved for data sent to this 3265 * dest. Therefore, according to the SFR algo for 3266 * CMT, no data sent to this dest can be marked for 3267 * FR using this SACK. (iyengar@cis.udel.edu, 3268 * 2005/05/12) 3269 */ 3270 tp1 = TAILQ_NEXT(tp1, sctp_next); 3271 continue; 3272 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3273 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3274 /* 3275 * CMT: New acks were receieved for data sent to 3276 * this dest. But no new acks were seen for data 3277 * sent after tp1. Therefore, according to the SFR 3278 * algo for CMT, tp1 cannot be marked for FR using 3279 * this SACK. This step covers part of the DAC algo 3280 * and the HTNA algo as well. 3281 */ 3282 tp1 = TAILQ_NEXT(tp1, sctp_next); 3283 continue; 3284 } 3285 /* 3286 * Here we check to see if we were have already done a FR 3287 * and if so we see if the biggest TSN we saw in the sack is 3288 * smaller than the recovery point. If so we don't strike 3289 * the tsn... otherwise we CAN strike the TSN. 3290 */ 3291 /* 3292 * @@@ JRI: Check for CMT 3293 */ 3294 if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { 3295 /* 3296 * Strike the TSN if in fast-recovery and cum-ack 3297 * moved. 3298 */ 3299 #ifdef SCTP_FR_LOGGING 3300 sctp_log_fr(biggest_tsn_newly_acked, 3301 tp1->rec.data.TSN_seq, 3302 tp1->sent, 3303 SCTP_FR_LOG_STRIKE_CHUNK); 3304 #endif 3305 tp1->sent++; 3306 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3307 /* 3308 * CMT DAC algorithm: If SACK flag is set to 3309 * 0, then lowest_newack test will not pass 3310 * because it would have been set to the 3311 * cumack earlier. If not already to be 3312 * rtx'd, If not a mixed sack and if tp1 is 3313 * not between two sacked TSNs, then mark by 3314 * one more. 3315 */ 3316 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3317 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3318 #ifdef SCTP_FR_LOGGING 3319 sctp_log_fr(16 + num_dests_sacked, 3320 tp1->rec.data.TSN_seq, 3321 tp1->sent, 3322 SCTP_FR_LOG_STRIKE_CHUNK); 3323 #endif 3324 tp1->sent++; 3325 } 3326 } 3327 } else if (tp1->rec.data.doing_fast_retransmit) { 3328 /* 3329 * For those that have done a FR we must take 3330 * special consideration if we strike. I.e the 3331 * biggest_newly_acked must be higher than the 3332 * sending_seq at the time we did the FR. 3333 */ 3334 #ifdef SCTP_FR_TO_ALTERNATE 3335 /* 3336 * If FR's go to new networks, then we must only do 3337 * this for singly homed asoc's. However if the FR's 3338 * go to the same network (Armando's work) then its 3339 * ok to FR multiple times. 3340 */ 3341 if (asoc->numnets < 2) 3342 #else 3343 if (1) 3344 #endif 3345 { 3346 if ((compare_with_wrap(biggest_tsn_newly_acked, 3347 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3348 (biggest_tsn_newly_acked == 3349 tp1->rec.data.fast_retran_tsn)) { 3350 /* 3351 * Strike the TSN, since this ack is 3352 * beyond where things were when we 3353 * did a FR. 3354 */ 3355 #ifdef SCTP_FR_LOGGING 3356 sctp_log_fr(biggest_tsn_newly_acked, 3357 tp1->rec.data.TSN_seq, 3358 tp1->sent, 3359 SCTP_FR_LOG_STRIKE_CHUNK); 3360 #endif 3361 tp1->sent++; 3362 strike_flag = 1; 3363 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3364 /* 3365 * CMT DAC algorithm: If 3366 * SACK flag is set to 0, 3367 * then lowest_newack test 3368 * will not pass because it 3369 * would have been set to 3370 * the cumack earlier. If 3371 * not already to be rtx'd, 3372 * If not a mixed sack and 3373 * if tp1 is not between two 3374 * sacked TSNs, then mark by 3375 * one more. 3376 */ 3377 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3378 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3379 #ifdef SCTP_FR_LOGGING 3380 sctp_log_fr(32 + num_dests_sacked, 3381 tp1->rec.data.TSN_seq, 3382 tp1->sent, 3383 SCTP_FR_LOG_STRIKE_CHUNK); 3384 #endif 3385 tp1->sent++; 3386 } 3387 } 3388 } 3389 } 3390 /* 3391 * @@@ JRI: TODO: remove code for HTNA algo. CMT's 3392 * SFR algo covers HTNA. 3393 */ 3394 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3395 biggest_tsn_newly_acked, MAX_TSN)) { 3396 /* 3397 * We don't strike these: This is the HTNA 3398 * algorithm i.e. we don't strike If our TSN is 3399 * larger than the Highest TSN Newly Acked. 3400 */ 3401 ; 3402 } else { 3403 /* Strike the TSN */ 3404 #ifdef SCTP_FR_LOGGING 3405 sctp_log_fr(biggest_tsn_newly_acked, 3406 tp1->rec.data.TSN_seq, 3407 tp1->sent, 3408 SCTP_FR_LOG_STRIKE_CHUNK); 3409 #endif 3410 tp1->sent++; 3411 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3412 /* 3413 * CMT DAC algorithm: If SACK flag is set to 3414 * 0, then lowest_newack test will not pass 3415 * because it would have been set to the 3416 * cumack earlier. If not already to be 3417 * rtx'd, If not a mixed sack and if tp1 is 3418 * not between two sacked TSNs, then mark by 3419 * one more. 3420 */ 3421 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3422 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3423 #ifdef SCTP_FR_LOGGING 3424 sctp_log_fr(48 + num_dests_sacked, 3425 tp1->rec.data.TSN_seq, 3426 tp1->sent, 3427 SCTP_FR_LOG_STRIKE_CHUNK); 3428 #endif 3429 tp1->sent++; 3430 } 3431 } 3432 } 3433 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3434 /* Increment the count to resend */ 3435 struct sctp_nets *alt; 3436 3437 /* printf("OK, we are now ready to FR this guy\n"); */ 3438 #ifdef SCTP_FR_LOGGING 3439 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3440 0, SCTP_FR_MARKED); 3441 #endif 3442 if (strike_flag) { 3443 /* This is a subsequent FR */ 3444 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3445 } 3446 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3447 3448 if (sctp_cmt_on_off) { 3449 /* 3450 * CMT: Using RTX_SSTHRESH policy for CMT. 3451 * If CMT is being used, then pick dest with 3452 * largest ssthresh for any retransmission. 3453 * (iyengar@cis.udel.edu, 2005/08/12) 3454 */ 3455 tp1->no_fr_allowed = 1; 3456 alt = tp1->whoTo; 3457 alt = sctp_find_alternate_net(stcb, alt, 1); 3458 /* 3459 * CUCv2: If a different dest is picked for 3460 * the retransmission, then new 3461 * (rtx-)pseudo_cumack needs to be tracked 3462 * for orig dest. Let CUCv2 track new (rtx-) 3463 * pseudo-cumack always. 3464 */ 3465 tp1->whoTo->find_pseudo_cumack = 1; 3466 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3467 3468 3469 } else {/* CMT is OFF */ 3470 3471 #ifdef SCTP_FR_TO_ALTERNATE 3472 /* Can we find an alternate? */ 3473 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3474 #else 3475 /* 3476 * default behavior is to NOT retransmit 3477 * FR's to an alternate. Armando Caro's 3478 * paper details why. 3479 */ 3480 alt = tp1->whoTo; 3481 #endif 3482 } 3483 3484 tp1->rec.data.doing_fast_retransmit = 1; 3485 tot_retrans++; 3486 /* mark the sending seq for possible subsequent FR's */ 3487 /* 3488 * printf("Marking TSN for FR new value %x\n", 3489 * (uint32_t)tpi->rec.data.TSN_seq); 3490 */ 3491 if (TAILQ_EMPTY(&asoc->send_queue)) { 3492 /* 3493 * If the queue of send is empty then its 3494 * the next sequence number that will be 3495 * assigned so we subtract one from this to 3496 * get the one we last sent. 3497 */ 3498 tp1->rec.data.fast_retran_tsn = sending_seq; 3499 } else { 3500 /* 3501 * If there are chunks on the send queue 3502 * (unsent data that has made it from the 3503 * stream queues but not out the door, we 3504 * take the first one (which will have the 3505 * lowest TSN) and subtract one to get the 3506 * one we last sent. 3507 */ 3508 struct sctp_tmit_chunk *ttt; 3509 3510 ttt = TAILQ_FIRST(&asoc->send_queue); 3511 tp1->rec.data.fast_retran_tsn = 3512 ttt->rec.data.TSN_seq; 3513 } 3514 3515 if (tp1->do_rtt) { 3516 /* 3517 * this guy had a RTO calculation pending on 3518 * it, cancel it 3519 */ 3520 tp1->whoTo->rto_pending = 0; 3521 tp1->do_rtt = 0; 3522 } 3523 /* fix counts and things */ 3524 #ifdef SCTP_FLIGHT_LOGGING 3525 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN, 3526 tp1->whoTo->flight_size, 3527 tp1->book_size, 3528 (uintptr_t) stcb, 3529 tp1->rec.data.TSN_seq); 3530 #endif 3531 tp1->whoTo->net_ack++; 3532 if (tp1->whoTo->flight_size >= tp1->book_size) 3533 tp1->whoTo->flight_size -= tp1->book_size; 3534 else 3535 tp1->whoTo->flight_size = 0; 3536 3537 #ifdef SCTP_LOG_RWND 3538 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3539 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh); 3540 #endif 3541 /* add back to the rwnd */ 3542 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3543 3544 /* remove from the total flight */ 3545 if (asoc->total_flight >= tp1->book_size) { 3546 asoc->total_flight -= tp1->book_size; 3547 if (asoc->total_flight_count > 0) 3548 asoc->total_flight_count--; 3549 } else { 3550 asoc->total_flight = 0; 3551 asoc->total_flight_count = 0; 3552 } 3553 3554 3555 if (alt != tp1->whoTo) { 3556 /* yes, there is an alternate. */ 3557 sctp_free_remote_addr(tp1->whoTo); 3558 tp1->whoTo = alt; 3559 atomic_add_int(&alt->ref_count, 1); 3560 } 3561 } 3562 tp1 = TAILQ_NEXT(tp1, sctp_next); 3563 } /* while (tp1) */ 3564 3565 if (tot_retrans > 0) { 3566 /* 3567 * Setup the ecn nonce re-sync point. We do this since once 3568 * we go to FR something we introduce a Karn's rule scenario 3569 * and won't know the totals for the ECN bits. 3570 */ 3571 asoc->nonce_resync_tsn = sending_seq; 3572 asoc->nonce_wait_for_ecne = 0; 3573 asoc->nonce_sum_check = 0; 3574 } 3575 } 3576 3577 struct sctp_tmit_chunk * 3578 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3579 struct sctp_association *asoc) 3580 { 3581 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3582 struct timeval now; 3583 int now_filled = 0; 3584 3585 if (asoc->peer_supports_prsctp == 0) { 3586 return (NULL); 3587 } 3588 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3589 while (tp1) { 3590 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3591 tp1->sent != SCTP_DATAGRAM_RESEND) { 3592 /* no chance to advance, out of here */ 3593 break; 3594 } 3595 if (!PR_SCTP_ENABLED(tp1->flags)) { 3596 /* 3597 * We can't fwd-tsn past any that are reliable aka 3598 * retransmitted until the asoc fails. 3599 */ 3600 break; 3601 } 3602 if (!now_filled) { 3603 SCTP_GETTIME_TIMEVAL(&now); 3604 now_filled = 1; 3605 } 3606 tp2 = TAILQ_NEXT(tp1, sctp_next); 3607 /* 3608 * now we got a chunk which is marked for another 3609 * retransmission to a PR-stream but has run out its chances 3610 * already maybe OR has been marked to skip now. Can we skip 3611 * it if its a resend? 3612 */ 3613 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3614 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3615 /* 3616 * Now is this one marked for resend and its time is 3617 * now up? 3618 */ 3619 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3620 /* Yes so drop it */ 3621 if (tp1->data) { 3622 sctp_release_pr_sctp_chunk(stcb, tp1, 3623 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3624 &asoc->sent_queue); 3625 } 3626 } else { 3627 /* 3628 * No, we are done when hit one for resend 3629 * whos time as not expired. 3630 */ 3631 break; 3632 } 3633 } 3634 /* 3635 * Ok now if this chunk is marked to drop it we can clean up 3636 * the chunk, advance our peer ack point and we can check 3637 * the next chunk. 3638 */ 3639 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3640 /* advance PeerAckPoint goes forward */ 3641 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3642 a_adv = tp1; 3643 /* 3644 * we don't want to de-queue it here. Just wait for 3645 * the next peer SACK to come with a new cumTSN and 3646 * then the chunk will be droped in the normal 3647 * fashion. 3648 */ 3649 if (tp1->data) { 3650 sctp_free_bufspace(stcb, asoc, tp1, 1); 3651 /* 3652 * Maybe there should be another 3653 * notification type 3654 */ 3655 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3656 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3657 tp1); 3658 sctp_m_freem(tp1->data); 3659 tp1->data = NULL; 3660 if (stcb->sctp_socket) { 3661 sctp_sowwakeup(stcb->sctp_ep, 3662 stcb->sctp_socket); 3663 #ifdef SCTP_WAKE_LOGGING 3664 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3665 #endif 3666 } 3667 } 3668 } else { 3669 /* 3670 * If it is still in RESEND we can advance no 3671 * further 3672 */ 3673 break; 3674 } 3675 /* 3676 * If we hit here we just dumped tp1, move to next tsn on 3677 * sent queue. 3678 */ 3679 tp1 = tp2; 3680 } 3681 return (a_adv); 3682 } 3683 3684 #ifdef SCTP_HIGH_SPEED 3685 struct sctp_hs_raise_drop { 3686 int32_t cwnd; 3687 int32_t increase; 3688 int32_t drop_percent; 3689 }; 3690 3691 #define SCTP_HS_TABLE_SIZE 73 3692 3693 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 3694 {38, 1, 50}, /* 0 */ 3695 {118, 2, 44}, /* 1 */ 3696 {221, 3, 41}, /* 2 */ 3697 {347, 4, 38}, /* 3 */ 3698 {495, 5, 37}, /* 4 */ 3699 {663, 6, 35}, /* 5 */ 3700 {851, 7, 34}, /* 6 */ 3701 {1058, 8, 33}, /* 7 */ 3702 {1284, 9, 32}, /* 8 */ 3703 {1529, 10, 31}, /* 9 */ 3704 {1793, 11, 30}, /* 10 */ 3705 {2076, 12, 29}, /* 11 */ 3706 {2378, 13, 28}, /* 12 */ 3707 {2699, 14, 28}, /* 13 */ 3708 {3039, 15, 27}, /* 14 */ 3709 {3399, 16, 27}, /* 15 */ 3710 {3778, 17, 26}, /* 16 */ 3711 {4177, 18, 26}, /* 17 */ 3712 {4596, 19, 25}, /* 18 */ 3713 {5036, 20, 25}, /* 19 */ 3714 {5497, 21, 24}, /* 20 */ 3715 {5979, 22, 24}, /* 21 */ 3716 {6483, 23, 23}, /* 22 */ 3717 {7009, 24, 23}, /* 23 */ 3718 {7558, 25, 22}, /* 24 */ 3719 {8130, 26, 22}, /* 25 */ 3720 {8726, 27, 22}, /* 26 */ 3721 {9346, 28, 21}, /* 27 */ 3722 {9991, 29, 21}, /* 28 */ 3723 {10661, 30, 21}, /* 29 */ 3724 {11358, 31, 20}, /* 30 */ 3725 {12082, 32, 20}, /* 31 */ 3726 {12834, 33, 20}, /* 32 */ 3727 {13614, 34, 19}, /* 33 */ 3728 {14424, 35, 19}, /* 34 */ 3729 {15265, 36, 19}, /* 35 */ 3730 {16137, 37, 19}, /* 36 */ 3731 {17042, 38, 18}, /* 37 */ 3732 {17981, 39, 18}, /* 38 */ 3733 {18955, 40, 18}, /* 39 */ 3734 {19965, 41, 17}, /* 40 */ 3735 {21013, 42, 17}, /* 41 */ 3736 {22101, 43, 17}, /* 42 */ 3737 {23230, 44, 17}, /* 43 */ 3738 {24402, 45, 16}, /* 44 */ 3739 {25618, 46, 16}, /* 45 */ 3740 {26881, 47, 16}, /* 46 */ 3741 {28193, 48, 16}, /* 47 */ 3742 {29557, 49, 15}, /* 48 */ 3743 {30975, 50, 15}, /* 49 */ 3744 {32450, 51, 15}, /* 50 */ 3745 {33986, 52, 15}, /* 51 */ 3746 {35586, 53, 14}, /* 52 */ 3747 {37253, 54, 14}, /* 53 */ 3748 {38992, 55, 14}, /* 54 */ 3749 {40808, 56, 14}, /* 55 */ 3750 {42707, 57, 13}, /* 56 */ 3751 {44694, 58, 13}, /* 57 */ 3752 {46776, 59, 13}, /* 58 */ 3753 {48961, 60, 13}, /* 59 */ 3754 {51258, 61, 13}, /* 60 */ 3755 {53677, 62, 12}, /* 61 */ 3756 {56230, 63, 12}, /* 62 */ 3757 {58932, 64, 12}, /* 63 */ 3758 {61799, 65, 12}, /* 64 */ 3759 {64851, 66, 11}, /* 65 */ 3760 {68113, 67, 11}, /* 66 */ 3761 {71617, 68, 11}, /* 67 */ 3762 {75401, 69, 10}, /* 68 */ 3763 {79517, 70, 10}, /* 69 */ 3764 {84035, 71, 10}, /* 70 */ 3765 {89053, 72, 10}, /* 71 */ 3766 {94717, 73, 9} /* 72 */ 3767 }; 3768 3769 static void 3770 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 3771 { 3772 int cur_val, i, indx, incr; 3773 3774 cur_val = net->cwnd >> 10; 3775 indx = SCTP_HS_TABLE_SIZE - 1; 3776 3777 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3778 /* normal mode */ 3779 if (net->net_ack > net->mtu) { 3780 net->cwnd += net->mtu; 3781 #ifdef SCTP_CWND_MONITOR 3782 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 3783 #endif 3784 } else { 3785 net->cwnd += net->net_ack; 3786 #ifdef SCTP_CWND_MONITOR 3787 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 3788 #endif 3789 } 3790 } else { 3791 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 3792 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 3793 indx = i; 3794 break; 3795 } 3796 } 3797 net->last_hs_used = indx; 3798 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 3799 net->cwnd += incr; 3800 #ifdef SCTP_CWND_MONITOR 3801 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 3802 #endif 3803 } 3804 } 3805 3806 static void 3807 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 3808 { 3809 int cur_val, i, indx; 3810 3811 #ifdef SCTP_CWND_MONITOR 3812 int old_cwnd = net->cwnd; 3813 3814 #endif 3815 3816 cur_val = net->cwnd >> 10; 3817 indx = net->last_hs_used; 3818 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3819 /* normal mode */ 3820 net->ssthresh = net->cwnd / 2; 3821 if (net->ssthresh < (net->mtu * 2)) { 3822 net->ssthresh = 2 * net->mtu; 3823 } 3824 net->cwnd = net->ssthresh; 3825 } else { 3826 /* drop by the proper amount */ 3827 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 3828 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 3829 net->cwnd = net->ssthresh; 3830 /* now where are we */ 3831 indx = net->last_hs_used; 3832 cur_val = net->cwnd >> 10; 3833 /* reset where we are in the table */ 3834 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3835 /* feel out of hs */ 3836 net->last_hs_used = 0; 3837 } else { 3838 for (i = indx; i >= 1; i--) { 3839 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 3840 break; 3841 } 3842 } 3843 net->last_hs_used = indx; 3844 } 3845 } 3846 #ifdef SCTP_CWND_MONITOR 3847 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 3848 #endif 3849 3850 } 3851 3852 #endif 3853 3854 extern int sctp_early_fr; 3855 extern int sctp_L2_abc_variable; 3856 3857 3858 static __inline void 3859 sctp_cwnd_update(struct sctp_tcb *stcb, 3860 struct sctp_association *asoc, 3861 int accum_moved, int reneged_all, int will_exit) 3862 { 3863 struct sctp_nets *net; 3864 3865 /******************************/ 3866 /* update cwnd and Early FR */ 3867 /******************************/ 3868 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3869 #ifdef JANA_CODE_WHY_THIS 3870 /* 3871 * CMT fast recovery code. Need to debug. 3872 */ 3873 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 3874 if (compare_with_wrap(asoc->last_acked_seq, 3875 net->fast_recovery_tsn, MAX_TSN) || 3876 (asoc->last_acked_seq == net->fast_recovery_tsn) || 3877 compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) || 3878 (net->pseudo_cumack == net->fast_recovery_tsn)) { 3879 net->will_exit_fast_recovery = 1; 3880 } 3881 } 3882 #endif 3883 if (sctp_early_fr) { 3884 /* 3885 * So, first of all do we need to have a Early FR 3886 * timer running? 3887 */ 3888 if (((TAILQ_FIRST(&asoc->sent_queue)) && 3889 (net->ref_count > 1) && 3890 (net->flight_size < net->cwnd)) || 3891 (reneged_all)) { 3892 /* 3893 * yes, so in this case stop it if its 3894 * running, and then restart it. Reneging 3895 * all is a special case where we want to 3896 * run the Early FR timer and then force the 3897 * last few unacked to be sent, causing us 3898 * to illicit a sack with gaps to force out 3899 * the others. 3900 */ 3901 if (callout_pending(&net->fr_timer.timer)) { 3902 SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 3903 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 3904 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 3905 } 3906 SCTP_STAT_INCR(sctps_earlyfrstrid); 3907 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3908 } else { 3909 /* No, stop it if its running */ 3910 if (callout_pending(&net->fr_timer.timer)) { 3911 SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 3912 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 3913 SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 3914 } 3915 } 3916 } 3917 /* if nothing was acked on this destination skip it */ 3918 if (net->net_ack == 0) { 3919 #ifdef SCTP_CWND_LOGGING 3920 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 3921 #endif 3922 continue; 3923 } 3924 if (net->net_ack2 > 0) { 3925 /* 3926 * Karn's rule applies to clearing error count, this 3927 * is optional. 3928 */ 3929 net->error_count = 0; 3930 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 3931 SCTP_ADDR_NOT_REACHABLE) { 3932 /* addr came good */ 3933 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 3934 net->dest_state |= SCTP_ADDR_REACHABLE; 3935 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 3936 SCTP_RECEIVED_SACK, (void *)net); 3937 /* now was it the primary? if so restore */ 3938 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 3939 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 3940 } 3941 } 3942 } 3943 #ifdef JANA_CODE_WHY_THIS 3944 /* 3945 * Cannot skip for CMT. Need to come back and check these 3946 * variables for CMT. CMT fast recovery code. Need to debug. 3947 */ 3948 if (sctp_cmt_on_off == 1 && 3949 net->fast_retran_loss_recovery && 3950 net->will_exit_fast_recovery == 0) 3951 #endif 3952 if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) { 3953 /* 3954 * If we are in loss recovery we skip any 3955 * cwnd update 3956 */ 3957 goto skip_cwnd_update; 3958 } 3959 /* 3960 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 3961 * moved. 3962 */ 3963 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) { 3964 /* If the cumulative ack moved we can proceed */ 3965 if (net->cwnd <= net->ssthresh) { 3966 /* We are in slow start */ 3967 if (net->flight_size + net->net_ack >= 3968 net->cwnd) { 3969 #ifdef SCTP_HIGH_SPEED 3970 sctp_hs_cwnd_increase(stcb, net); 3971 #else 3972 if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) { 3973 net->cwnd += (net->mtu * sctp_L2_abc_variable); 3974 #ifdef SCTP_CWND_MONITOR 3975 sctp_log_cwnd(stcb, net, net->mtu, 3976 SCTP_CWND_LOG_FROM_SS); 3977 #endif 3978 3979 } else { 3980 net->cwnd += net->net_ack; 3981 #ifdef SCTP_CWND_MONITOR 3982 sctp_log_cwnd(stcb, net, net->net_ack, 3983 SCTP_CWND_LOG_FROM_SS); 3984 #endif 3985 3986 } 3987 #endif 3988 } else { 3989 unsigned int dif; 3990 3991 dif = net->cwnd - (net->flight_size + 3992 net->net_ack); 3993 #ifdef SCTP_CWND_LOGGING 3994 sctp_log_cwnd(stcb, net, net->net_ack, 3995 SCTP_CWND_LOG_NOADV_SS); 3996 #endif 3997 } 3998 } else { 3999 /* We are in congestion avoidance */ 4000 if (net->flight_size + net->net_ack >= 4001 net->cwnd) { 4002 /* 4003 * add to pba only if we had a 4004 * cwnd's worth (or so) in flight OR 4005 * the burst limit was applied. 4006 */ 4007 net->partial_bytes_acked += 4008 net->net_ack; 4009 4010 /* 4011 * Do we need to increase (if pba is 4012 * > cwnd)? 4013 */ 4014 if (net->partial_bytes_acked >= 4015 net->cwnd) { 4016 if (net->cwnd < 4017 net->partial_bytes_acked) { 4018 net->partial_bytes_acked -= 4019 net->cwnd; 4020 } else { 4021 net->partial_bytes_acked = 4022 0; 4023 } 4024 net->cwnd += net->mtu; 4025 #ifdef SCTP_CWND_MONITOR 4026 sctp_log_cwnd(stcb, net, net->mtu, 4027 SCTP_CWND_LOG_FROM_CA); 4028 #endif 4029 } 4030 #ifdef SCTP_CWND_LOGGING 4031 else { 4032 sctp_log_cwnd(stcb, net, net->net_ack, 4033 SCTP_CWND_LOG_NOADV_CA); 4034 } 4035 #endif 4036 } else { 4037 unsigned int dif; 4038 4039 #ifdef SCTP_CWND_LOGGING 4040 sctp_log_cwnd(stcb, net, net->net_ack, 4041 SCTP_CWND_LOG_NOADV_CA); 4042 #endif 4043 dif = net->cwnd - (net->flight_size + 4044 net->net_ack); 4045 } 4046 } 4047 } else { 4048 #ifdef SCTP_CWND_LOGGING 4049 sctp_log_cwnd(stcb, net, net->mtu, 4050 SCTP_CWND_LOG_NO_CUMACK); 4051 #endif 4052 } 4053 skip_cwnd_update: 4054 /* 4055 * NOW, according to Karn's rule do we need to restore the 4056 * RTO timer back? Check our net_ack2. If not set then we 4057 * have a ambiguity.. i.e. all data ack'd was sent to more 4058 * than one place. 4059 */ 4060 if (net->net_ack2) { 4061 /* restore any doubled timers */ 4062 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 4063 if (net->RTO < stcb->asoc.minrto) { 4064 net->RTO = stcb->asoc.minrto; 4065 } 4066 if (net->RTO > stcb->asoc.maxrto) { 4067 net->RTO = stcb->asoc.maxrto; 4068 } 4069 } 4070 } 4071 } 4072 4073 4074 void 4075 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 4076 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 4077 { 4078 struct sctp_nets *net; 4079 struct sctp_association *asoc; 4080 struct sctp_tmit_chunk *tp1, *tp2; 4081 int j; 4082 4083 SCTP_TCB_LOCK_ASSERT(stcb); 4084 asoc = &stcb->asoc; 4085 /* First setup for CC stuff */ 4086 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4087 net->prev_cwnd = net->cwnd; 4088 net->net_ack = 0; 4089 net->net_ack2 = 0; 4090 } 4091 asoc->this_sack_highest_gap = cumack; 4092 stcb->asoc.overall_error_count = 0; 4093 /* process the new consecutive TSN first */ 4094 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4095 while (tp1) { 4096 tp2 = TAILQ_NEXT(tp1, sctp_next); 4097 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 4098 MAX_TSN) || 4099 cumack == tp1->rec.data.TSN_seq) { 4100 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4101 /* 4102 * ECN Nonce: Add the nonce to the sender's 4103 * nonce sum 4104 */ 4105 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4106 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4107 /* 4108 * If it is less than ACKED, it is 4109 * now no-longer in flight. Higher 4110 * values may occur during marking 4111 */ 4112 #ifdef SCTP_FLIGHT_LOGGING 4113 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN, 4114 tp1->whoTo->flight_size, 4115 tp1->book_size, 4116 (uintptr_t) stcb, 4117 tp1->rec.data.TSN_seq); 4118 #endif 4119 4120 if (tp1->whoTo->flight_size >= tp1->book_size) { 4121 tp1->whoTo->flight_size -= tp1->book_size; 4122 } else { 4123 tp1->whoTo->flight_size = 0; 4124 } 4125 if (asoc->total_flight >= tp1->book_size) { 4126 asoc->total_flight -= tp1->book_size; 4127 if (asoc->total_flight_count > 0) 4128 asoc->total_flight_count--; 4129 } else { 4130 asoc->total_flight = 0; 4131 asoc->total_flight_count = 0; 4132 } 4133 tp1->whoTo->net_ack += tp1->send_size; 4134 if (tp1->snd_count < 2) { 4135 /* 4136 * True non-retransmited 4137 * chunk 4138 */ 4139 tp1->whoTo->net_ack2 += 4140 tp1->send_size; 4141 4142 /* update RTO too? */ 4143 if ((tp1->do_rtt) && (tp1->whoTo->rto_pending)) { 4144 tp1->whoTo->RTO = 4145 sctp_calculate_rto(stcb, 4146 asoc, tp1->whoTo, 4147 &tp1->sent_rcv_time); 4148 tp1->whoTo->rto_pending = 0; 4149 tp1->do_rtt = 0; 4150 } 4151 } 4152 #ifdef SCTP_CWND_LOGGING 4153 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4154 #endif 4155 } 4156 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4157 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4158 } 4159 tp1->sent = SCTP_DATAGRAM_ACKED; 4160 } 4161 } else { 4162 break; 4163 } 4164 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4165 if (tp1->data) { 4166 sctp_free_bufspace(stcb, asoc, tp1, 1); 4167 sctp_m_freem(tp1->data); 4168 } 4169 #ifdef SCTP_SACK_LOGGING 4170 sctp_log_sack(asoc->last_acked_seq, 4171 cumack, 4172 tp1->rec.data.TSN_seq, 4173 0, 4174 0, 4175 SCTP_LOG_FREE_SENT); 4176 #endif 4177 tp1->data = NULL; 4178 asoc->sent_queue_cnt--; 4179 sctp_free_remote_addr(tp1->whoTo); 4180 sctp_free_a_chunk(stcb, tp1); 4181 tp1 = tp2; 4182 } 4183 if (stcb->sctp_socket) { 4184 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4185 #ifdef SCTP_WAKE_LOGGING 4186 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 4187 #endif 4188 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4189 #ifdef SCTP_WAKE_LOGGING 4190 } else { 4191 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 4192 #endif 4193 } 4194 4195 if (asoc->last_acked_seq != cumack) 4196 sctp_cwnd_update(stcb, asoc, 1, 0, 0); 4197 asoc->last_acked_seq = cumack; 4198 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4199 /* nothing left in-flight */ 4200 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4201 net->flight_size = 0; 4202 net->partial_bytes_acked = 0; 4203 } 4204 asoc->total_flight = 0; 4205 asoc->total_flight_count = 0; 4206 } 4207 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4208 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4209 asoc->advanced_peer_ack_point = cumack; 4210 } 4211 /* ECN Nonce updates */ 4212 if (asoc->ecn_nonce_allowed) { 4213 if (asoc->nonce_sum_check) { 4214 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4215 if (asoc->nonce_wait_for_ecne == 0) { 4216 struct sctp_tmit_chunk *lchk; 4217 4218 lchk = TAILQ_FIRST(&asoc->send_queue); 4219 asoc->nonce_wait_for_ecne = 1; 4220 if (lchk) { 4221 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4222 } else { 4223 asoc->nonce_wait_tsn = asoc->sending_seq; 4224 } 4225 } else { 4226 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4227 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4228 /* 4229 * Misbehaving peer. We need 4230 * to react to this guy 4231 */ 4232 asoc->ecn_allowed = 0; 4233 asoc->ecn_nonce_allowed = 0; 4234 } 4235 } 4236 } 4237 } else { 4238 /* See if Resynchronization Possible */ 4239 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4240 asoc->nonce_sum_check = 1; 4241 /* 4242 * now we must calculate what the base is. 4243 * We do this based on two things, we know 4244 * the total's for all the segments 4245 * gap-acked in the SACK (none), We also 4246 * know the SACK's nonce sum, its in 4247 * nonce_sum_flag. So we can build a truth 4248 * table to back-calculate the new value of 4249 * asoc->nonce_sum_expect_base: 4250 * 4251 * SACK-flag-Value Seg-Sums Base 0 0 0 4252 * 1 0 1 0 1 1 1 4253 * 1 0 4254 */ 4255 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4256 } 4257 } 4258 } 4259 /* RWND update */ 4260 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4261 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4262 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4263 /* SWS sender side engages */ 4264 asoc->peers_rwnd = 0; 4265 } 4266 /* Now assure a timer where data is queued at */ 4267 again: 4268 j = 0; 4269 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4270 if (net->flight_size) { 4271 int to_ticks; 4272 4273 if (net->RTO == 0) { 4274 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4275 } else { 4276 to_ticks = MSEC_TO_TICKS(net->RTO); 4277 } 4278 j++; 4279 callout_reset(&net->rxt_timer.timer, to_ticks, 4280 sctp_timeout_handler, &net->rxt_timer); 4281 } else { 4282 if (callout_pending(&net->rxt_timer.timer)) { 4283 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4284 stcb, net, 4285 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4286 } 4287 if (sctp_early_fr) { 4288 if (callout_pending(&net->fr_timer.timer)) { 4289 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4290 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4291 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4292 } 4293 } 4294 } 4295 } 4296 if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) { 4297 /* huh, this should not happen */ 4298 #ifdef INVARIANTS 4299 panic("Flight size incorrect? fixing??"); 4300 #else 4301 printf("Flight size incorrect? fixing\n"); 4302 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4303 net->flight_size = 0; 4304 } 4305 asoc->total_flight = 0; 4306 asoc->total_flight_count = 0; 4307 asoc->sent_queue_retran_cnt = 0; 4308 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4309 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4310 tp1->whoTo->flight_size += tp1->book_size; 4311 asoc->total_flight += tp1->book_size; 4312 asoc->total_flight_count++; 4313 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4314 asoc->sent_queue_retran_cnt++; 4315 } 4316 } 4317 #endif 4318 goto again; 4319 } 4320 /**********************************/ 4321 /* Now what about shutdown issues */ 4322 /**********************************/ 4323 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4324 /* nothing left on sendqueue.. consider done */ 4325 /* clean up */ 4326 if ((asoc->stream_queue_cnt == 1) && 4327 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4328 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4329 (asoc->locked_on_sending) 4330 ) { 4331 struct sctp_stream_queue_pending *sp; 4332 4333 /* 4334 * I may be in a state where we got all across.. but 4335 * cannot write more due to a shutdown... we abort 4336 * since the user did not indicate EOR in this case. 4337 * The sp will be cleaned during free of the asoc. 4338 */ 4339 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4340 sctp_streamhead); 4341 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4342 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4343 asoc->locked_on_sending = NULL; 4344 asoc->stream_queue_cnt--; 4345 } 4346 } 4347 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4348 (asoc->stream_queue_cnt == 0)) { 4349 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4350 /* Need to abort here */ 4351 struct mbuf *oper; 4352 4353 abort_out_now: 4354 *abort_now = 1; 4355 /* XXX */ 4356 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4357 0, M_DONTWAIT, 1, MT_DATA); 4358 if (oper) { 4359 struct sctp_paramhdr *ph; 4360 uint32_t *ippp; 4361 4362 oper->m_len = sizeof(struct sctp_paramhdr) + 4363 sizeof(uint32_t); 4364 ph = mtod(oper, struct sctp_paramhdr *); 4365 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4366 ph->param_length = htons(oper->m_len); 4367 ippp = (uint32_t *) (ph + 1); 4368 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4369 } 4370 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4371 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4372 } else { 4373 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4374 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4375 sctp_stop_timers_for_shutdown(stcb); 4376 sctp_send_shutdown(stcb, 4377 stcb->asoc.primary_destination); 4378 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4379 stcb->sctp_ep, stcb, asoc->primary_destination); 4380 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4381 stcb->sctp_ep, stcb, asoc->primary_destination); 4382 } 4383 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4384 (asoc->stream_queue_cnt == 0)) { 4385 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4386 goto abort_out_now; 4387 } 4388 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4389 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4390 sctp_send_shutdown_ack(stcb, 4391 stcb->asoc.primary_destination); 4392 4393 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4394 stcb->sctp_ep, stcb, asoc->primary_destination); 4395 } 4396 } 4397 #ifdef SCTP_SACK_RWND_LOGGING 4398 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4399 rwnd, 4400 stcb->asoc.peers_rwnd, 4401 stcb->asoc.total_flight, 4402 stcb->asoc.total_output_queue_size); 4403 4404 #endif 4405 } 4406 4407 4408 4409 void 4410 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4411 struct sctp_nets *net_from, int *abort_now) 4412 { 4413 struct sctp_association *asoc; 4414 struct sctp_sack *sack; 4415 struct sctp_tmit_chunk *tp1, *tp2; 4416 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4417 this_sack_lowest_newack; 4418 uint16_t num_seg, num_dup; 4419 uint16_t wake_him = 0; 4420 unsigned int sack_length; 4421 uint32_t send_s; 4422 long j; 4423 int accum_moved = 0; 4424 int will_exit_fast_recovery = 0; 4425 uint32_t a_rwnd; 4426 struct sctp_nets *net = NULL; 4427 int nonce_sum_flag, ecn_seg_sums = 0; 4428 uint8_t reneged_all = 0; 4429 uint8_t cmt_dac_flag; 4430 4431 /* 4432 * we take any chance we can to service our queues since we cannot 4433 * get awoken when the socket is read from :< 4434 */ 4435 /* 4436 * Now perform the actual SACK handling: 1) Verify that it is not an 4437 * old sack, if so discard. 2) If there is nothing left in the send 4438 * queue (cum-ack is equal to last acked) then you have a duplicate 4439 * too, update any rwnd change and verify no timers are running. 4440 * then return. 3) Process any new consequtive data i.e. cum-ack 4441 * moved process these first and note that it moved. 4) Process any 4442 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4443 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4444 * sync up flightsizes and things, stop all timers and also check 4445 * for shutdown_pending state. If so then go ahead and send off the 4446 * shutdown. If in shutdown recv, send off the shutdown-ack and 4447 * start that timer, Ret. 9) Strike any non-acked things and do FR 4448 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4449 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4450 * if in shutdown_recv state. 4451 */ 4452 SCTP_TCB_LOCK_ASSERT(stcb); 4453 sack = &ch->sack; 4454 /* CMT DAC algo */ 4455 this_sack_lowest_newack = 0; 4456 j = 0; 4457 sack_length = ntohs(ch->ch.chunk_length); 4458 if (sack_length < sizeof(struct sctp_sack_chunk)) { 4459 #ifdef SCTP_DEBUG 4460 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 4461 printf("Bad size on sack chunk .. to small\n"); 4462 } 4463 #endif 4464 return; 4465 } 4466 /* ECN Nonce */ 4467 SCTP_STAT_INCR(sctps_slowpath_sack); 4468 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4469 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4470 num_seg = ntohs(sack->num_gap_ack_blks); 4471 a_rwnd = (uint32_t) ntohl(sack->a_rwnd); 4472 4473 /* CMT DAC algo */ 4474 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4475 num_dup = ntohs(sack->num_dup_tsns); 4476 4477 4478 stcb->asoc.overall_error_count = 0; 4479 asoc = &stcb->asoc; 4480 #ifdef SCTP_SACK_LOGGING 4481 sctp_log_sack(asoc->last_acked_seq, 4482 cum_ack, 4483 0, 4484 num_seg, 4485 num_dup, 4486 SCTP_LOG_NEW_SACK); 4487 #endif 4488 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 4489 if (num_dup) { 4490 int off_to_dup, iii; 4491 uint32_t *dupdata; 4492 4493 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4494 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4495 dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup); 4496 for (iii = 0; iii < num_dup; iii++) { 4497 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4498 dupdata++; 4499 4500 } 4501 } else { 4502 printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4503 off_to_dup, num_dup, sack_length, num_seg); 4504 } 4505 } 4506 #endif 4507 /* reality check */ 4508 if (TAILQ_EMPTY(&asoc->send_queue)) { 4509 send_s = asoc->sending_seq; 4510 } else { 4511 tp1 = TAILQ_FIRST(&asoc->send_queue); 4512 send_s = tp1->rec.data.TSN_seq; 4513 } 4514 4515 if (sctp_strict_sacks) { 4516 if (cum_ack == send_s || 4517 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4518 struct mbuf *oper; 4519 4520 /* 4521 * no way, we have not even sent this TSN out yet. 4522 * Peer is hopelessly messed up with us. 4523 */ 4524 hopeless_peer: 4525 *abort_now = 1; 4526 /* XXX */ 4527 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4528 0, M_DONTWAIT, 1, MT_DATA); 4529 if (oper) { 4530 struct sctp_paramhdr *ph; 4531 uint32_t *ippp; 4532 4533 oper->m_len = sizeof(struct sctp_paramhdr) + 4534 sizeof(uint32_t); 4535 ph = mtod(oper, struct sctp_paramhdr *); 4536 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4537 ph->param_length = htons(oper->m_len); 4538 ippp = (uint32_t *) (ph + 1); 4539 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4540 } 4541 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4542 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4543 return; 4544 } 4545 } 4546 /**********************/ 4547 /* 1) check the range */ 4548 /**********************/ 4549 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4550 /* acking something behind */ 4551 return; 4552 } 4553 /* update the Rwnd of the peer */ 4554 if (TAILQ_EMPTY(&asoc->sent_queue) && 4555 TAILQ_EMPTY(&asoc->send_queue) && 4556 (asoc->stream_queue_cnt == 0) 4557 ) { 4558 /* nothing left on send/sent and strmq */ 4559 #ifdef SCTP_LOG_RWND 4560 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4561 asoc->peers_rwnd, 0, 0, a_rwnd); 4562 #endif 4563 asoc->peers_rwnd = a_rwnd; 4564 if (asoc->sent_queue_retran_cnt) { 4565 asoc->sent_queue_retran_cnt = 0; 4566 } 4567 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4568 /* SWS sender side engages */ 4569 asoc->peers_rwnd = 0; 4570 } 4571 /* stop any timers */ 4572 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4573 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4574 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4575 if (sctp_early_fr) { 4576 if (callout_pending(&net->fr_timer.timer)) { 4577 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4578 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4579 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4580 } 4581 } 4582 net->partial_bytes_acked = 0; 4583 net->flight_size = 0; 4584 } 4585 asoc->total_flight = 0; 4586 asoc->total_flight_count = 0; 4587 return; 4588 } 4589 /* 4590 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4591 * things. The total byte count acked is tracked in netAckSz AND 4592 * netAck2 is used to track the total bytes acked that are un- 4593 * amibguious and were never retransmitted. We track these on a per 4594 * destination address basis. 4595 */ 4596 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4597 net->prev_cwnd = net->cwnd; 4598 net->net_ack = 0; 4599 net->net_ack2 = 0; 4600 4601 /* 4602 * CMT: Reset CUC algo variable before SACK processing 4603 */ 4604 net->new_pseudo_cumack = 0; 4605 net->will_exit_fast_recovery = 0; 4606 } 4607 /* process the new consecutive TSN first */ 4608 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4609 while (tp1) { 4610 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4611 MAX_TSN) || 4612 last_tsn == tp1->rec.data.TSN_seq) { 4613 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4614 /* 4615 * ECN Nonce: Add the nonce to the sender's 4616 * nonce sum 4617 */ 4618 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4619 accum_moved = 1; 4620 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4621 /* 4622 * If it is less than ACKED, it is 4623 * now no-longer in flight. Higher 4624 * values may occur during marking 4625 */ 4626 if ((tp1->whoTo->dest_state & 4627 SCTP_ADDR_UNCONFIRMED) && 4628 (tp1->snd_count < 2)) { 4629 /* 4630 * If there was no retran 4631 * and the address is 4632 * un-confirmed and we sent 4633 * there and are now 4634 * sacked.. its confirmed, 4635 * mark it so. 4636 */ 4637 tp1->whoTo->dest_state &= 4638 ~SCTP_ADDR_UNCONFIRMED; 4639 } 4640 #ifdef SCTP_FLIGHT_LOGGING 4641 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN, 4642 tp1->whoTo->flight_size, 4643 tp1->book_size, 4644 (uintptr_t) stcb, 4645 tp1->rec.data.TSN_seq); 4646 #endif 4647 if (tp1->whoTo->flight_size >= tp1->book_size) { 4648 tp1->whoTo->flight_size -= tp1->book_size; 4649 } else { 4650 tp1->whoTo->flight_size = 0; 4651 } 4652 if (asoc->total_flight >= tp1->book_size) { 4653 asoc->total_flight -= tp1->book_size; 4654 if (asoc->total_flight_count > 0) 4655 asoc->total_flight_count--; 4656 } else { 4657 asoc->total_flight = 0; 4658 asoc->total_flight_count = 0; 4659 } 4660 tp1->whoTo->net_ack += tp1->send_size; 4661 4662 /* CMT SFR and DAC algos */ 4663 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4664 tp1->whoTo->saw_newack = 1; 4665 4666 if (tp1->snd_count < 2) { 4667 /* 4668 * True non-retransmited 4669 * chunk 4670 */ 4671 tp1->whoTo->net_ack2 += 4672 tp1->send_size; 4673 4674 /* update RTO too? */ 4675 if (tp1->do_rtt) { 4676 tp1->whoTo->RTO = 4677 sctp_calculate_rto(stcb, 4678 asoc, tp1->whoTo, 4679 &tp1->sent_rcv_time); 4680 tp1->whoTo->rto_pending = 0; 4681 tp1->do_rtt = 0; 4682 } 4683 } 4684 /* 4685 * CMT: CUCv2 algorithm. From the 4686 * cumack'd TSNs, for each TSN being 4687 * acked for the first time, set the 4688 * following variables for the 4689 * corresp destination. 4690 * new_pseudo_cumack will trigger a 4691 * cwnd update. 4692 * find_(rtx_)pseudo_cumack will 4693 * trigger search for the next 4694 * expected (rtx-)pseudo-cumack. 4695 */ 4696 tp1->whoTo->new_pseudo_cumack = 1; 4697 tp1->whoTo->find_pseudo_cumack = 1; 4698 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4699 4700 4701 #ifdef SCTP_SACK_LOGGING 4702 sctp_log_sack(asoc->last_acked_seq, 4703 cum_ack, 4704 tp1->rec.data.TSN_seq, 4705 0, 4706 0, 4707 SCTP_LOG_TSN_ACKED); 4708 #endif 4709 #ifdef SCTP_CWND_LOGGING 4710 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4711 #endif 4712 } 4713 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4714 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4715 #ifdef SCTP_AUDITING_ENABLED 4716 sctp_audit_log(0xB3, 4717 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4718 #endif 4719 } 4720 tp1->sent = SCTP_DATAGRAM_ACKED; 4721 } 4722 } else { 4723 break; 4724 } 4725 tp1 = TAILQ_NEXT(tp1, sctp_next); 4726 } 4727 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4728 /* always set this up to cum-ack */ 4729 asoc->this_sack_highest_gap = last_tsn; 4730 4731 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4732 4733 /* skip corrupt segments */ 4734 goto skip_segments; 4735 } 4736 if (num_seg > 0) { 4737 4738 /* 4739 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4740 * to be greater than the cumack. Also reset saw_newack to 0 4741 * for all dests. 4742 */ 4743 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4744 net->saw_newack = 0; 4745 net->this_sack_highest_newack = last_tsn; 4746 } 4747 4748 /* 4749 * thisSackHighestGap will increase while handling NEW 4750 * segments this_sack_highest_newack will increase while 4751 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4752 * used for CMT DAC algo. saw_newack will also change. 4753 */ 4754 sctp_handle_segments(stcb, asoc, ch, last_tsn, 4755 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4756 num_seg, &ecn_seg_sums); 4757 4758 if (sctp_strict_sacks) { 4759 /* 4760 * validate the biggest_tsn_acked in the gap acks if 4761 * strict adherence is wanted. 4762 */ 4763 if ((biggest_tsn_acked == send_s) || 4764 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4765 /* 4766 * peer is either confused or we are under 4767 * attack. We must abort. 4768 */ 4769 goto hopeless_peer; 4770 } 4771 } 4772 } 4773 skip_segments: 4774 /*******************************************/ 4775 /* cancel ALL T3-send timer if accum moved */ 4776 /*******************************************/ 4777 if (sctp_cmt_on_off) { 4778 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4779 if (net->new_pseudo_cumack) 4780 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4781 stcb, net, 4782 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4783 4784 } 4785 } else { 4786 if (accum_moved) { 4787 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4788 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4789 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4790 } 4791 } 4792 } 4793 /********************************************/ 4794 /* drop the acked chunks from the sendqueue */ 4795 /********************************************/ 4796 asoc->last_acked_seq = cum_ack; 4797 4798 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4799 if (tp1 == NULL) 4800 goto done_with_it; 4801 do { 4802 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4803 MAX_TSN)) { 4804 break; 4805 } 4806 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4807 /* no more sent on list */ 4808 break; 4809 } 4810 tp2 = TAILQ_NEXT(tp1, sctp_next); 4811 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4812 /* 4813 * Friendlier printf in lieu of panic now that I think its 4814 * fixed 4815 */ 4816 4817 if (tp1->pr_sctp_on) { 4818 if (asoc->pr_sctp_cnt != 0) 4819 asoc->pr_sctp_cnt--; 4820 } 4821 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4822 (asoc->total_flight > 0)) { 4823 printf("Warning flight size incorrect should be 0 is %d\n", 4824 asoc->total_flight); 4825 asoc->total_flight = 0; 4826 } 4827 if (tp1->data) { 4828 sctp_free_bufspace(stcb, asoc, tp1, 1); 4829 sctp_m_freem(tp1->data); 4830 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4831 asoc->sent_queue_cnt_removeable--; 4832 } 4833 } 4834 #ifdef SCTP_SACK_LOGGING 4835 sctp_log_sack(asoc->last_acked_seq, 4836 cum_ack, 4837 tp1->rec.data.TSN_seq, 4838 0, 4839 0, 4840 SCTP_LOG_FREE_SENT); 4841 #endif 4842 tp1->data = NULL; 4843 asoc->sent_queue_cnt--; 4844 sctp_free_remote_addr(tp1->whoTo); 4845 4846 sctp_free_a_chunk(stcb, tp1); 4847 wake_him++; 4848 tp1 = tp2; 4849 } while (tp1 != NULL); 4850 4851 done_with_it: 4852 if ((wake_him) && (stcb->sctp_socket)) { 4853 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4854 #ifdef SCTP_WAKE_LOGGING 4855 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 4856 #endif 4857 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4858 #ifdef SCTP_WAKE_LOGGING 4859 } else { 4860 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 4861 #endif 4862 } 4863 4864 if ((sctp_cmt_on_off == 0) && asoc->fast_retran_loss_recovery && accum_moved) { 4865 if (compare_with_wrap(asoc->last_acked_seq, 4866 asoc->fast_recovery_tsn, MAX_TSN) || 4867 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 4868 /* Setup so we will exit RFC2582 fast recovery */ 4869 will_exit_fast_recovery = 1; 4870 } 4871 } 4872 /* 4873 * Check for revoked fragments: 4874 * 4875 * if Previous sack - Had no frags then we can't have any revoked if 4876 * Previous sack - Had frag's then - If we now have frags aka 4877 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4878 * some of them. else - The peer revoked all ACKED fragments, since 4879 * we had some before and now we have NONE. 4880 */ 4881 4882 if (sctp_cmt_on_off) { 4883 /* 4884 * Don't check for revoked if CMT is ON. CMT causes 4885 * reordering of data and acks (received on different 4886 * interfaces) can be persistently reordered. Acking 4887 * followed by apparent revoking and re-acking causes 4888 * unexpected weird behavior. So, at this time, CMT does not 4889 * respect renegs. Renegs will have to be recovered through 4890 * a timeout. Not a big deal for such a rare event. 4891 */ 4892 } else if (num_seg) 4893 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked); 4894 else if (asoc->saw_sack_with_frags) { 4895 int cnt_revoked = 0; 4896 4897 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4898 if (tp1 != NULL) { 4899 /* Peer revoked all dg's marked or acked */ 4900 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4901 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 4902 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 4903 tp1->sent = SCTP_DATAGRAM_SENT; 4904 tp1->rec.data.chunk_was_revoked = 1; 4905 tp1->whoTo->flight_size += tp1->book_size; 4906 asoc->total_flight_count++; 4907 asoc->total_flight += tp1->book_size; 4908 cnt_revoked++; 4909 } 4910 } 4911 if (cnt_revoked) { 4912 reneged_all = 1; 4913 } 4914 } 4915 asoc->saw_sack_with_frags = 0; 4916 } 4917 if (num_seg) 4918 asoc->saw_sack_with_frags = 1; 4919 else 4920 asoc->saw_sack_with_frags = 0; 4921 4922 4923 sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4924 4925 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4926 /* nothing left in-flight */ 4927 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4928 /* stop all timers */ 4929 if (sctp_early_fr) { 4930 if (callout_pending(&net->fr_timer.timer)) { 4931 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4932 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4933 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4934 } 4935 } 4936 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4937 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4938 net->flight_size = 0; 4939 net->partial_bytes_acked = 0; 4940 } 4941 asoc->total_flight = 0; 4942 asoc->total_flight_count = 0; 4943 } 4944 /**********************************/ 4945 /* Now what about shutdown issues */ 4946 /**********************************/ 4947 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4948 /* nothing left on sendqueue.. consider done */ 4949 #ifdef SCTP_LOG_RWND 4950 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4951 asoc->peers_rwnd, 0, 0, a_rwnd); 4952 #endif 4953 asoc->peers_rwnd = a_rwnd; 4954 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4955 /* SWS sender side engages */ 4956 asoc->peers_rwnd = 0; 4957 } 4958 /* clean up */ 4959 if ((asoc->stream_queue_cnt == 1) && 4960 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4961 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4962 (asoc->locked_on_sending) 4963 ) { 4964 struct sctp_stream_queue_pending *sp; 4965 4966 /* 4967 * I may be in a state where we got all across.. but 4968 * cannot write more due to a shutdown... we abort 4969 * since the user did not indicate EOR in this case. 4970 */ 4971 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4972 sctp_streamhead); 4973 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4974 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4975 asoc->locked_on_sending = NULL; 4976 asoc->stream_queue_cnt--; 4977 } 4978 } 4979 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4980 (asoc->stream_queue_cnt == 0)) { 4981 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4982 /* Need to abort here */ 4983 struct mbuf *oper; 4984 4985 abort_out_now: 4986 *abort_now = 1; 4987 /* XXX */ 4988 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4989 0, M_DONTWAIT, 1, MT_DATA); 4990 if (oper) { 4991 struct sctp_paramhdr *ph; 4992 uint32_t *ippp; 4993 4994 oper->m_len = sizeof(struct sctp_paramhdr) + 4995 sizeof(uint32_t); 4996 ph = mtod(oper, struct sctp_paramhdr *); 4997 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4998 ph->param_length = htons(oper->m_len); 4999 ippp = (uint32_t *) (ph + 1); 5000 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 5001 } 5002 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 5003 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 5004 return; 5005 } else { 5006 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 5007 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5008 sctp_stop_timers_for_shutdown(stcb); 5009 sctp_send_shutdown(stcb, 5010 stcb->asoc.primary_destination); 5011 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5012 stcb->sctp_ep, stcb, asoc->primary_destination); 5013 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5014 stcb->sctp_ep, stcb, asoc->primary_destination); 5015 } 5016 return; 5017 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5018 (asoc->stream_queue_cnt == 0)) { 5019 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 5020 goto abort_out_now; 5021 } 5022 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 5023 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5024 sctp_send_shutdown_ack(stcb, 5025 stcb->asoc.primary_destination); 5026 5027 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5028 stcb->sctp_ep, stcb, asoc->primary_destination); 5029 return; 5030 } 5031 } 5032 /* 5033 * Now here we are going to recycle net_ack for a different use... 5034 * HEADS UP. 5035 */ 5036 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5037 net->net_ack = 0; 5038 } 5039 5040 /* 5041 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5042 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5043 * automatically ensure that. 5044 */ 5045 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) { 5046 this_sack_lowest_newack = cum_ack; 5047 } 5048 if (num_seg > 0) { 5049 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5050 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5051 } 5052 /*********************************************/ 5053 /* Here we perform PR-SCTP procedures */ 5054 /* (section 4.2) */ 5055 /*********************************************/ 5056 /* C1. update advancedPeerAckPoint */ 5057 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 5058 asoc->advanced_peer_ack_point = cum_ack; 5059 } 5060 /* C2. try to further move advancedPeerAckPoint ahead */ 5061 5062 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 5063 struct sctp_tmit_chunk *lchk; 5064 5065 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5066 /* C3. See if we need to send a Fwd-TSN */ 5067 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 5068 MAX_TSN)) { 5069 /* 5070 * ISSUE with ECN, see FWD-TSN processing for notes 5071 * on issues that will occur when the ECN NONCE 5072 * stuff is put into SCTP for cross checking. 5073 */ 5074 send_forward_tsn(stcb, asoc); 5075 5076 /* 5077 * ECN Nonce: Disable Nonce Sum check when FWD TSN 5078 * is sent and store resync tsn 5079 */ 5080 asoc->nonce_sum_check = 0; 5081 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 5082 if (lchk) { 5083 /* Assure a timer is up */ 5084 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5085 stcb->sctp_ep, stcb, lchk->whoTo); 5086 } 5087 } 5088 } 5089 /* 5090 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) && 5091 * (net->fast_retran_loss_recovery == 0))) 5092 */ 5093 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5094 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) { 5095 /* out of a RFC2582 Fast recovery window? */ 5096 if (net->net_ack > 0) { 5097 /* 5098 * per section 7.2.3, are there any 5099 * destinations that had a fast retransmit 5100 * to them. If so what we need to do is 5101 * adjust ssthresh and cwnd. 5102 */ 5103 struct sctp_tmit_chunk *lchk; 5104 5105 #ifdef SCTP_HIGH_SPEED 5106 sctp_hs_cwnd_decrease(stcb, net); 5107 #else 5108 #ifdef SCTP_CWND_MONITOR 5109 int old_cwnd = net->cwnd; 5110 5111 #endif 5112 net->ssthresh = net->cwnd / 2; 5113 if (net->ssthresh < (net->mtu * 2)) { 5114 net->ssthresh = 2 * net->mtu; 5115 } 5116 net->cwnd = net->ssthresh; 5117 #ifdef SCTP_CWND_MONITOR 5118 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 5119 SCTP_CWND_LOG_FROM_FR); 5120 #endif 5121 #endif 5122 5123 lchk = TAILQ_FIRST(&asoc->send_queue); 5124 5125 net->partial_bytes_acked = 0; 5126 /* Turn on fast recovery window */ 5127 asoc->fast_retran_loss_recovery = 1; 5128 if (lchk == NULL) { 5129 /* Mark end of the window */ 5130 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 5131 } else { 5132 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5133 } 5134 5135 /* 5136 * CMT fast recovery -- per destination 5137 * recovery variable. 5138 */ 5139 net->fast_retran_loss_recovery = 1; 5140 5141 if (lchk == NULL) { 5142 /* Mark end of the window */ 5143 net->fast_recovery_tsn = asoc->sending_seq - 1; 5144 } else { 5145 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5146 } 5147 5148 5149 5150 /* 5151 * Disable Nonce Sum Checking and store the 5152 * resync tsn 5153 */ 5154 asoc->nonce_sum_check = 0; 5155 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 5156 5157 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 5158 stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5159 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5160 stcb->sctp_ep, stcb, net); 5161 } 5162 } else if (net->net_ack > 0) { 5163 /* 5164 * Mark a peg that we WOULD have done a cwnd 5165 * reduction but RFC2582 prevented this action. 5166 */ 5167 SCTP_STAT_INCR(sctps_fastretransinrtt); 5168 } 5169 } 5170 5171 5172 /****************************************************************** 5173 * Here we do the stuff with ECN Nonce checking. 5174 * We basically check to see if the nonce sum flag was incorrect 5175 * or if resynchronization needs to be done. Also if we catch a 5176 * misbehaving receiver we give him the kick. 5177 ******************************************************************/ 5178 5179 if (asoc->ecn_nonce_allowed) { 5180 if (asoc->nonce_sum_check) { 5181 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 5182 if (asoc->nonce_wait_for_ecne == 0) { 5183 struct sctp_tmit_chunk *lchk; 5184 5185 lchk = TAILQ_FIRST(&asoc->send_queue); 5186 asoc->nonce_wait_for_ecne = 1; 5187 if (lchk) { 5188 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 5189 } else { 5190 asoc->nonce_wait_tsn = asoc->sending_seq; 5191 } 5192 } else { 5193 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 5194 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 5195 /* 5196 * Misbehaving peer. We need 5197 * to react to this guy 5198 */ 5199 asoc->ecn_allowed = 0; 5200 asoc->ecn_nonce_allowed = 0; 5201 } 5202 } 5203 } 5204 } else { 5205 /* See if Resynchronization Possible */ 5206 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 5207 asoc->nonce_sum_check = 1; 5208 /* 5209 * now we must calculate what the base is. 5210 * We do this based on two things, we know 5211 * the total's for all the segments 5212 * gap-acked in the SACK, its stored in 5213 * ecn_seg_sums. We also know the SACK's 5214 * nonce sum, its in nonce_sum_flag. So we 5215 * can build a truth table to back-calculate 5216 * the new value of 5217 * asoc->nonce_sum_expect_base: 5218 * 5219 * SACK-flag-Value Seg-Sums Base 0 0 0 5220 * 1 0 1 0 1 1 1 5221 * 1 0 5222 */ 5223 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 5224 } 5225 } 5226 } 5227 /* Now are we exiting loss recovery ? */ 5228 if (will_exit_fast_recovery) { 5229 /* Ok, we must exit fast recovery */ 5230 asoc->fast_retran_loss_recovery = 0; 5231 } 5232 if ((asoc->sat_t3_loss_recovery) && 5233 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 5234 MAX_TSN) || 5235 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 5236 /* end satellite t3 loss recovery */ 5237 asoc->sat_t3_loss_recovery = 0; 5238 } 5239 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5240 if (net->will_exit_fast_recovery) { 5241 /* Ok, we must exit fast recovery */ 5242 net->fast_retran_loss_recovery = 0; 5243 } 5244 } 5245 5246 /* Adjust and set the new rwnd value */ 5247 #ifdef SCTP_LOG_RWND 5248 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5249 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 5250 #endif 5251 5252 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5253 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 5254 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5255 /* SWS sender side engages */ 5256 asoc->peers_rwnd = 0; 5257 } 5258 /* 5259 * Now we must setup so we have a timer up for anyone with 5260 * outstanding data. 5261 */ 5262 again: 5263 j = 0; 5264 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5265 if (net->flight_size) { 5266 j++; 5267 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5268 stcb->sctp_ep, stcb, net); 5269 } 5270 } 5271 if ((j == 0) && (!TAILQ_EMPTY(&asoc->sent_queue)) && (asoc->sent_queue_retran_cnt == 0)) { 5272 /* huh, this should not happen */ 5273 #ifdef INVARIANTS 5274 panic("Flight size incorrect? fixing??"); 5275 #else 5276 printf("Flight size incorrect? fixing??\n"); 5277 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5278 net->flight_size = 0; 5279 } 5280 asoc->total_flight = 0; 5281 asoc->total_flight_count = 0; 5282 asoc->sent_queue_retran_cnt = 0; 5283 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5284 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5285 tp1->whoTo->flight_size += tp1->book_size; 5286 asoc->total_flight += tp1->book_size; 5287 asoc->total_flight_count++; 5288 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5289 asoc->sent_queue_retran_cnt++; 5290 } 5291 } 5292 #endif 5293 goto again; 5294 } 5295 #ifdef SCTP_SACK_RWND_LOGGING 5296 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5297 a_rwnd, 5298 stcb->asoc.peers_rwnd, 5299 stcb->asoc.total_flight, 5300 stcb->asoc.total_output_queue_size); 5301 5302 #endif 5303 5304 } 5305 5306 void 5307 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5308 struct sctp_nets *netp, int *abort_flag) 5309 { 5310 /* Copy cum-ack */ 5311 uint32_t cum_ack, a_rwnd; 5312 5313 cum_ack = ntohl(cp->cumulative_tsn_ack); 5314 /* Arrange so a_rwnd does NOT change */ 5315 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5316 5317 /* Now call the express sack handling */ 5318 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5319 } 5320 5321 static void 5322 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5323 struct sctp_stream_in *strmin) 5324 { 5325 struct sctp_queued_to_read *ctl, *nctl; 5326 struct sctp_association *asoc; 5327 int tt; 5328 5329 asoc = &stcb->asoc; 5330 tt = strmin->last_sequence_delivered; 5331 /* 5332 * First deliver anything prior to and including the stream no that 5333 * came in 5334 */ 5335 ctl = TAILQ_FIRST(&strmin->inqueue); 5336 while (ctl) { 5337 nctl = TAILQ_NEXT(ctl, next); 5338 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5339 (tt == ctl->sinfo_ssn)) { 5340 /* this is deliverable now */ 5341 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5342 /* subtract pending on streams */ 5343 asoc->size_on_all_streams -= ctl->length; 5344 sctp_ucount_decr(asoc->cnt_on_all_streams); 5345 /* deliver it to at least the delivery-q */ 5346 if (stcb->sctp_socket) { 5347 sctp_add_to_readq(stcb->sctp_ep, stcb, 5348 ctl, 5349 &stcb->sctp_socket->so_rcv, 1); 5350 } 5351 } else { 5352 /* no more delivery now. */ 5353 break; 5354 } 5355 ctl = nctl; 5356 } 5357 /* 5358 * now we must deliver things in queue the normal way if any are 5359 * now ready. 5360 */ 5361 tt = strmin->last_sequence_delivered + 1; 5362 ctl = TAILQ_FIRST(&strmin->inqueue); 5363 while (ctl) { 5364 nctl = TAILQ_NEXT(ctl, next); 5365 if (tt == ctl->sinfo_ssn) { 5366 /* this is deliverable now */ 5367 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5368 /* subtract pending on streams */ 5369 asoc->size_on_all_streams -= ctl->length; 5370 sctp_ucount_decr(asoc->cnt_on_all_streams); 5371 /* deliver it to at least the delivery-q */ 5372 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5373 if (stcb->sctp_socket) { 5374 sctp_add_to_readq(stcb->sctp_ep, stcb, 5375 ctl, 5376 &stcb->sctp_socket->so_rcv, 1); 5377 } 5378 tt = strmin->last_sequence_delivered + 1; 5379 } else { 5380 break; 5381 } 5382 ctl = nctl; 5383 } 5384 } 5385 5386 void 5387 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5388 struct sctp_forward_tsn_chunk *fwd, int *abort_flag) 5389 { 5390 /* 5391 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5392 * forward TSN, when the SACK comes back that acknowledges the 5393 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5394 * get quite tricky since we may have sent more data interveneing 5395 * and must carefully account for what the SACK says on the nonce 5396 * and any gaps that are reported. This work will NOT be done here, 5397 * but I note it here since it is really related to PR-SCTP and 5398 * FWD-TSN's 5399 */ 5400 5401 /* The pr-sctp fwd tsn */ 5402 /* 5403 * here we will perform all the data receiver side steps for 5404 * processing FwdTSN, as required in by pr-sctp draft: 5405 * 5406 * Assume we get FwdTSN(x): 5407 * 5408 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5409 * others we have 3) examine and update re-ordering queue on 5410 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5411 * report where we are. 5412 */ 5413 struct sctp_strseq *stseq; 5414 struct sctp_association *asoc; 5415 uint32_t new_cum_tsn, gap, back_out_htsn; 5416 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5417 struct sctp_stream_in *strm; 5418 struct sctp_tmit_chunk *chk, *at; 5419 5420 cumack_set_flag = 0; 5421 asoc = &stcb->asoc; 5422 cnt_gone = 0; 5423 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5424 #ifdef SCTP_DEBUG 5425 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 5426 printf("Bad size too small/big fwd-tsn\n"); 5427 } 5428 #endif 5429 return; 5430 } 5431 m_size = (stcb->asoc.mapping_array_size << 3); 5432 /*************************************************************/ 5433 /* 1. Here we update local cumTSN and shift the bitmap array */ 5434 /*************************************************************/ 5435 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5436 5437 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5438 asoc->cumulative_tsn == new_cum_tsn) { 5439 /* Already got there ... */ 5440 return; 5441 } 5442 back_out_htsn = asoc->highest_tsn_inside_map; 5443 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5444 MAX_TSN)) { 5445 asoc->highest_tsn_inside_map = new_cum_tsn; 5446 #ifdef SCTP_MAP_LOGGING 5447 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5448 #endif 5449 } 5450 /* 5451 * now we know the new TSN is more advanced, let's find the actual 5452 * gap 5453 */ 5454 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5455 MAX_TSN)) || 5456 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5457 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5458 } else { 5459 /* try to prevent underflow here */ 5460 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5461 } 5462 5463 if (gap > m_size || gap < 0) { 5464 asoc->highest_tsn_inside_map = back_out_htsn; 5465 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5466 /* 5467 * out of range (of single byte chunks in the rwnd I 5468 * give out) too questionable. better to drop it 5469 * silently 5470 */ 5471 return; 5472 } 5473 if (asoc->highest_tsn_inside_map > 5474 asoc->mapping_array_base_tsn) { 5475 gap = asoc->highest_tsn_inside_map - 5476 asoc->mapping_array_base_tsn; 5477 } else { 5478 gap = asoc->highest_tsn_inside_map + 5479 (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5480 } 5481 cumack_set_flag = 1; 5482 } 5483 for (i = 0; i <= gap; i++) { 5484 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5485 } 5486 /* 5487 * Now after marking all, slide thing forward but no sack please. 5488 */ 5489 sctp_sack_check(stcb, 0, 0, abort_flag); 5490 if (*abort_flag) 5491 return; 5492 5493 if (cumack_set_flag) { 5494 /* 5495 * fwd-tsn went outside my gap array - not a common 5496 * occurance. Do the same thing we do when a cookie-echo 5497 * arrives. 5498 */ 5499 asoc->highest_tsn_inside_map = new_cum_tsn - 1; 5500 asoc->mapping_array_base_tsn = new_cum_tsn; 5501 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 5502 #ifdef SCTP_MAP_LOGGING 5503 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5504 #endif 5505 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5506 } 5507 /*************************************************************/ 5508 /* 2. Clear up re-assembly queue */ 5509 /*************************************************************/ 5510 5511 /* 5512 * First service it if pd-api is up, just in case we can progress it 5513 * forward 5514 */ 5515 if (asoc->fragmented_delivery_inprogress) { 5516 sctp_service_reassembly(stcb, asoc); 5517 } 5518 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5519 /* For each one on here see if we need to toss it */ 5520 /* 5521 * For now large messages held on the reasmqueue that are 5522 * complete will be tossed too. We could in theory do more 5523 * work to spin through and stop after dumping one msg aka 5524 * seeing the start of a new msg at the head, and call the 5525 * delivery function... to see if it can be delivered... But 5526 * for now we just dump everything on the queue. 5527 */ 5528 chk = TAILQ_FIRST(&asoc->reasmqueue); 5529 while (chk) { 5530 at = TAILQ_NEXT(chk, sctp_next); 5531 if (compare_with_wrap(asoc->cumulative_tsn, 5532 chk->rec.data.TSN_seq, MAX_TSN) || 5533 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5534 /* It needs to be tossed */ 5535 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5536 if (compare_with_wrap(chk->rec.data.TSN_seq, 5537 asoc->tsn_last_delivered, MAX_TSN)) { 5538 asoc->tsn_last_delivered = 5539 chk->rec.data.TSN_seq; 5540 asoc->str_of_pdapi = 5541 chk->rec.data.stream_number; 5542 asoc->ssn_of_pdapi = 5543 chk->rec.data.stream_seq; 5544 asoc->fragment_flags = 5545 chk->rec.data.rcv_flags; 5546 } 5547 asoc->size_on_reasm_queue -= chk->send_size; 5548 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5549 cnt_gone++; 5550 5551 /* Clear up any stream problem */ 5552 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5553 SCTP_DATA_UNORDERED && 5554 (compare_with_wrap(chk->rec.data.stream_seq, 5555 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5556 MAX_SEQ))) { 5557 /* 5558 * We must dump forward this streams 5559 * sequence number if the chunk is 5560 * not unordered that is being 5561 * skipped. There is a chance that 5562 * if the peer does not include the 5563 * last fragment in its FWD-TSN we 5564 * WILL have a problem here since 5565 * you would have a partial chunk in 5566 * queue that may not be 5567 * deliverable. Also if a Partial 5568 * delivery API as started the user 5569 * may get a partial chunk. The next 5570 * read returning a new chunk... 5571 * really ugly but I see no way 5572 * around it! Maybe a notify?? 5573 */ 5574 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5575 chk->rec.data.stream_seq; 5576 } 5577 if (chk->data) { 5578 sctp_m_freem(chk->data); 5579 chk->data = NULL; 5580 } 5581 sctp_free_remote_addr(chk->whoTo); 5582 sctp_free_a_chunk(stcb, chk); 5583 } else { 5584 /* 5585 * Ok we have gone beyond the end of the 5586 * fwd-tsn's mark. Some checks... 5587 */ 5588 if ((asoc->fragmented_delivery_inprogress) && 5589 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5590 /* 5591 * Special case PD-API is up and 5592 * what we fwd-tsn' over includes 5593 * one that had the LAST_FRAG. We no 5594 * longer need to do the PD-API. 5595 */ 5596 asoc->fragmented_delivery_inprogress = 0; 5597 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5598 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 5599 5600 } 5601 break; 5602 } 5603 chk = at; 5604 } 5605 } 5606 if (asoc->fragmented_delivery_inprogress) { 5607 /* 5608 * Ok we removed cnt_gone chunks in the PD-API queue that 5609 * were being delivered. So now we must turn off the flag. 5610 */ 5611 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5612 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 5613 asoc->fragmented_delivery_inprogress = 0; 5614 } 5615 /*************************************************************/ 5616 /* 3. Update the PR-stream re-ordering queues */ 5617 /*************************************************************/ 5618 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd)); 5619 fwd_sz -= sizeof(*fwd); 5620 { 5621 /* New method. */ 5622 int num_str, i; 5623 5624 num_str = fwd_sz / sizeof(struct sctp_strseq); 5625 for (i = 0; i < num_str; i++) { 5626 uint16_t st; 5627 unsigned char *xx; 5628 5629 /* Convert */ 5630 xx = (unsigned char *)&stseq[i]; 5631 st = ntohs(stseq[i].stream); 5632 stseq[i].stream = st; 5633 st = ntohs(stseq[i].sequence); 5634 stseq[i].sequence = st; 5635 /* now process */ 5636 if (stseq[i].stream > asoc->streamincnt) { 5637 /* 5638 * It is arguable if we should continue. 5639 * Since the peer sent bogus stream info we 5640 * may be in deep trouble.. a return may be 5641 * a better choice? 5642 */ 5643 continue; 5644 } 5645 strm = &asoc->strmin[stseq[i].stream]; 5646 if (compare_with_wrap(stseq[i].sequence, 5647 strm->last_sequence_delivered, MAX_SEQ)) { 5648 /* Update the sequence number */ 5649 strm->last_sequence_delivered = 5650 stseq[i].sequence; 5651 } 5652 /* now kick the stream the new way */ 5653 sctp_kick_prsctp_reorder_queue(stcb, strm); 5654 } 5655 } 5656 } 5657