1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $kejKAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 37 #include "opt_ipsec.h" 38 #include "opt_inet6.h" 39 #include "opt_inet.h" 40 41 #include "opt_sctp.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 51 #include <net/if.h> 52 #include <net/route.h> 53 54 55 #include <sys/limits.h> 56 #include <machine/cpu.h> 57 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/ip.h> 61 #ifdef INET6 62 #include <netinet/ip6.h> 63 #endif /* INET6 */ 64 #include <netinet/in_pcb.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 #ifdef INET6 68 #include <netinet6/ip6_var.h> 69 #endif /* INET6 */ 70 #include <netinet/ip_icmp.h> 71 #include <netinet/icmp_var.h> 72 73 #include <netinet/sctp_os.h> 74 #include <netinet/sctp_var.h> 75 #include <netinet/sctp_pcb.h> 76 #include <netinet/sctp_header.h> 77 #include <netinet/sctputil.h> 78 #include <netinet/sctp_output.h> 79 #include <netinet/sctp_input.h> 80 #include <netinet/sctp_indata.h> 81 #include <netinet/sctp_uio.h> 82 #include <netinet/sctp_timer.h> 83 #ifdef IPSEC 84 #include <netinet6/ipsec.h> 85 #include <netkey/key.h> 86 #endif /* IPSEC */ 87 88 89 #ifdef SCTP_DEBUG 90 extern uint32_t sctp_debug_on; 91 92 #endif 93 94 /* 95 * NOTES: On the outbound side of things I need to check the sack timer to 96 * see if I should generate a sack into the chunk queue (if I have data to 97 * send that is and will be sending it .. for bundling. 98 * 99 * The callback in sctp_usrreq.c will get called when the socket is read from. 100 * This will cause sctp_service_queues() to get called on the top entry in 101 * the list. 102 */ 103 104 extern int sctp_strict_sacks; 105 106 __inline void 107 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 108 { 109 uint32_t calc, calc_w_oh; 110 111 /* 112 * This is really set wrong with respect to a 1-2-m socket. Since 113 * the sb_cc is the count that everyone as put up. When we re-write 114 * sctp_soreceive then we will fix this so that ONLY this 115 * associations data is taken into account. 116 */ 117 if (stcb->sctp_socket == NULL) 118 return; 119 120 if (stcb->asoc.sb_cc == 0 && 121 asoc->size_on_reasm_queue == 0 && 122 asoc->size_on_all_streams == 0) { 123 /* Full rwnd granted */ 124 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat, 125 SCTP_MINIMAL_RWND); 126 return; 127 } 128 /* get actual space */ 129 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 130 131 /* 132 * take out what has NOT been put on socket queue and we yet hold 133 * for putting up. 134 */ 135 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 136 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 137 138 if (calc == 0) { 139 /* out of space */ 140 asoc->my_rwnd = 0; 141 return; 142 } 143 /* what is the overhead of all these rwnd's */ 144 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 145 asoc->my_rwnd = calc; 146 if (calc_w_oh == 0) { 147 /* 148 * If our overhead is greater than the advertised rwnd, we 149 * clamp the rwnd to 1. This lets us still accept inbound 150 * segments, but hopefully will shut the sender down when he 151 * finally gets the message. 152 */ 153 asoc->my_rwnd = 1; 154 } else { 155 /* SWS threshold */ 156 if (asoc->my_rwnd && 157 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 158 /* SWS engaged, tell peer none left */ 159 asoc->my_rwnd = 1; 160 } 161 } 162 } 163 164 /* Calculate what the rwnd would be */ 165 166 __inline uint32_t 167 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 168 { 169 uint32_t calc = 0, calc_w_oh; 170 171 /* 172 * This is really set wrong with respect to a 1-2-m socket. Since 173 * the sb_cc is the count that everyone as put up. When we re-write 174 * sctp_soreceive then we will fix this so that ONLY this 175 * associations data is taken into account. 176 */ 177 if (stcb->sctp_socket == NULL) 178 return (calc); 179 180 if (stcb->asoc.sb_cc == 0 && 181 asoc->size_on_reasm_queue == 0 && 182 asoc->size_on_all_streams == 0) { 183 /* Full rwnd granted */ 184 calc = max(stcb->sctp_socket->so_rcv.sb_hiwat, 185 SCTP_MINIMAL_RWND); 186 return (calc); 187 } 188 /* get actual space */ 189 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 190 191 /* 192 * take out what has NOT been put on socket queue and we yet hold 193 * for putting up. 194 */ 195 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 196 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 197 198 if (calc == 0) { 199 /* out of space */ 200 return (calc); 201 } 202 /* what is the overhead of all these rwnd's */ 203 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 204 if (calc_w_oh == 0) { 205 /* 206 * If our overhead is greater than the advertised rwnd, we 207 * clamp the rwnd to 1. This lets us still accept inbound 208 * segments, but hopefully will shut the sender down when he 209 * finally gets the message. 210 */ 211 calc = 1; 212 } else { 213 /* SWS threshold */ 214 if (calc && 215 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 216 /* SWS engaged, tell peer none left */ 217 calc = 1; 218 } 219 } 220 return (calc); 221 } 222 223 224 225 /* 226 * Build out our readq entry based on the incoming packet. 227 */ 228 struct sctp_queued_to_read * 229 sctp_build_readq_entry(struct sctp_tcb *stcb, 230 struct sctp_nets *net, 231 uint32_t tsn, uint32_t ppid, 232 uint32_t context, uint16_t stream_no, 233 uint16_t stream_seq, uint8_t flags, 234 struct mbuf *dm) 235 { 236 struct sctp_queued_to_read *read_queue_e = NULL; 237 238 sctp_alloc_a_readq(stcb, read_queue_e); 239 if (read_queue_e == NULL) { 240 goto failed_build; 241 } 242 read_queue_e->sinfo_stream = stream_no; 243 read_queue_e->sinfo_ssn = stream_seq; 244 read_queue_e->sinfo_flags = (flags << 8); 245 read_queue_e->sinfo_ppid = ppid; 246 read_queue_e->sinfo_context = stcb->asoc.context; 247 read_queue_e->sinfo_timetolive = 0; 248 read_queue_e->sinfo_tsn = tsn; 249 read_queue_e->sinfo_cumtsn = tsn; 250 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 251 read_queue_e->whoFrom = net; 252 read_queue_e->length = 0; 253 atomic_add_int(&net->ref_count, 1); 254 read_queue_e->data = dm; 255 read_queue_e->tail_mbuf = NULL; 256 read_queue_e->stcb = stcb; 257 read_queue_e->port_from = stcb->rport; 258 read_queue_e->do_not_ref_stcb = 0; 259 read_queue_e->end_added = 0; 260 read_queue_e->pdapi_aborted = 0; 261 failed_build: 262 return (read_queue_e); 263 } 264 265 266 /* 267 * Build out our readq entry based on the incoming packet. 268 */ 269 static struct sctp_queued_to_read * 270 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 271 struct sctp_tmit_chunk *chk) 272 { 273 struct sctp_queued_to_read *read_queue_e = NULL; 274 275 sctp_alloc_a_readq(stcb, read_queue_e); 276 if (read_queue_e == NULL) { 277 goto failed_build; 278 } 279 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 280 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 281 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 282 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 283 read_queue_e->sinfo_context = stcb->asoc.context; 284 read_queue_e->sinfo_timetolive = 0; 285 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 286 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 287 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 288 read_queue_e->whoFrom = chk->whoTo; 289 read_queue_e->length = 0; 290 atomic_add_int(&chk->whoTo->ref_count, 1); 291 read_queue_e->data = chk->data; 292 read_queue_e->tail_mbuf = NULL; 293 read_queue_e->stcb = stcb; 294 read_queue_e->port_from = stcb->rport; 295 read_queue_e->do_not_ref_stcb = 0; 296 read_queue_e->end_added = 0; 297 read_queue_e->pdapi_aborted = 0; 298 failed_build: 299 return (read_queue_e); 300 } 301 302 303 struct mbuf * 304 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 305 struct sctp_sndrcvinfo *sinfo) 306 { 307 struct sctp_sndrcvinfo *outinfo; 308 struct cmsghdr *cmh; 309 struct mbuf *ret; 310 int len; 311 int use_extended = 0; 312 313 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 314 /* user does not want the sndrcv ctl */ 315 return (NULL); 316 } 317 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 318 use_extended = 1; 319 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 320 } else { 321 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 322 } 323 324 325 ret = sctp_get_mbuf_for_msg(len, 326 1, M_DONTWAIT, 1, MT_DATA); 327 328 if (ret == NULL) { 329 /* No space */ 330 return (ret); 331 } 332 /* We need a CMSG header followed by the struct */ 333 cmh = mtod(ret, struct cmsghdr *); 334 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 335 cmh->cmsg_level = IPPROTO_SCTP; 336 if (use_extended) { 337 cmh->cmsg_type = SCTP_EXTRCV; 338 cmh->cmsg_len = len; 339 memcpy(outinfo, sinfo, len); 340 } else { 341 cmh->cmsg_type = SCTP_SNDRCV; 342 cmh->cmsg_len = len; 343 *outinfo = *sinfo; 344 } 345 ret->m_len = cmh->cmsg_len; 346 ret->m_pkthdr.len = ret->m_len; 347 return (ret); 348 } 349 350 /* 351 * We are delivering currently from the reassembly queue. We must continue to 352 * deliver until we either: 1) run out of space. 2) run out of sequential 353 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 354 */ 355 static void 356 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 357 { 358 struct sctp_tmit_chunk *chk; 359 struct mbuf *m; 360 uint16_t nxt_todel; 361 uint16_t stream_no; 362 int end = 0; 363 int cntDel; 364 365 cntDel = stream_no = 0; 366 struct sctp_queued_to_read *control, *ctl, *ctlat; 367 368 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 369 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 370 ) { 371 /* socket above is long gone */ 372 asoc->fragmented_delivery_inprogress = 0; 373 chk = TAILQ_FIRST(&asoc->reasmqueue); 374 while (chk) { 375 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 376 asoc->size_on_reasm_queue -= chk->send_size; 377 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 378 /* 379 * Lose the data pointer, since its in the socket 380 * buffer 381 */ 382 if (chk->data) { 383 sctp_m_freem(chk->data); 384 chk->data = NULL; 385 } 386 /* Now free the address and data */ 387 sctp_free_remote_addr(chk->whoTo); 388 sctp_free_a_chunk(stcb, chk); 389 chk = TAILQ_FIRST(&asoc->reasmqueue); 390 } 391 return; 392 } 393 SCTP_TCB_LOCK_ASSERT(stcb); 394 do { 395 chk = TAILQ_FIRST(&asoc->reasmqueue); 396 if (chk == NULL) { 397 return; 398 } 399 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 400 /* Can't deliver more :< */ 401 return; 402 } 403 stream_no = chk->rec.data.stream_number; 404 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 405 if (nxt_todel != chk->rec.data.stream_seq && 406 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 407 /* 408 * Not the next sequence to deliver in its stream OR 409 * unordered 410 */ 411 return; 412 } 413 if ((chk->data->m_flags & M_PKTHDR) == 0) { 414 m = sctp_get_mbuf_for_msg(1, 415 1, M_DONTWAIT, 1, MT_DATA); 416 if (m == NULL) { 417 /* no room! */ 418 return; 419 } 420 m->m_pkthdr.len = chk->send_size; 421 m->m_len = 0; 422 m->m_next = chk->data; 423 chk->data = m; 424 } 425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 426 if (chk->data->m_next == NULL) { 427 /* hopefully we hit here most of the time */ 428 chk->data->m_flags |= M_EOR; 429 } else { 430 /* 431 * Add the flag to the LAST mbuf in the 432 * chain 433 */ 434 m = chk->data; 435 while (m->m_next != NULL) { 436 m = m->m_next; 437 } 438 m->m_flags |= M_EOR; 439 } 440 } 441 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 442 443 control = sctp_build_readq_entry_chk(stcb, chk); 444 if (control == NULL) { 445 /* out of memory? */ 446 return; 447 } 448 /* save it off for our future deliveries */ 449 stcb->asoc.control_pdapi = control; 450 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 451 end = 1; 452 else 453 end = 0; 454 sctp_add_to_readq(stcb->sctp_ep, 455 stcb, control, &stcb->sctp_socket->so_rcv, end); 456 cntDel++; 457 } else { 458 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 459 end = 1; 460 else 461 end = 0; 462 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 463 stcb->asoc.control_pdapi, 464 chk->data, end, chk->rec.data.TSN_seq, 465 &stcb->sctp_socket->so_rcv)) { 466 /* 467 * something is very wrong, either 468 * control_pdapi is NULL, or the tail_mbuf 469 * is corrupt, or there is a EOM already on 470 * the mbuf chain. 471 */ 472 if (stcb->asoc.control_pdapi == NULL) { 473 panic("This should not happen control_pdapi NULL?"); 474 } 475 if (stcb->asoc.control_pdapi->tail_mbuf == NULL) { 476 panic("This should not happen, tail_mbuf not being maintained?"); 477 } 478 /* if we did not panic, it was a EOM */ 479 panic("Bad chunking ??"); 480 } 481 cntDel++; 482 } 483 /* pull it we did it */ 484 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 485 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 486 asoc->fragmented_delivery_inprogress = 0; 487 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 488 asoc->strmin[stream_no].last_sequence_delivered++; 489 } 490 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 491 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 492 } 493 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 494 /* 495 * turn the flag back on since we just delivered 496 * yet another one. 497 */ 498 asoc->fragmented_delivery_inprogress = 1; 499 } 500 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 501 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 502 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 503 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 504 505 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 506 asoc->size_on_reasm_queue -= chk->send_size; 507 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 508 /* free up the chk */ 509 chk->data = NULL; 510 sctp_free_remote_addr(chk->whoTo); 511 sctp_free_a_chunk(stcb, chk); 512 513 if (asoc->fragmented_delivery_inprogress == 0) { 514 /* 515 * Now lets see if we can deliver the next one on 516 * the stream 517 */ 518 uint16_t nxt_todel; 519 struct sctp_stream_in *strm; 520 521 strm = &asoc->strmin[stream_no]; 522 nxt_todel = strm->last_sequence_delivered + 1; 523 ctl = TAILQ_FIRST(&strm->inqueue); 524 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 525 while (ctl != NULL) { 526 /* Deliver more if we can. */ 527 if (nxt_todel == ctl->sinfo_ssn) { 528 ctlat = TAILQ_NEXT(ctl, next); 529 TAILQ_REMOVE(&strm->inqueue, ctl, next); 530 asoc->size_on_all_streams -= ctl->length; 531 sctp_ucount_decr(asoc->cnt_on_all_streams); 532 strm->last_sequence_delivered++; 533 sctp_add_to_readq(stcb->sctp_ep, stcb, 534 ctl, 535 &stcb->sctp_socket->so_rcv, 1); 536 ctl = ctlat; 537 } else { 538 break; 539 } 540 nxt_todel = strm->last_sequence_delivered + 1; 541 } 542 } 543 return; 544 } 545 chk = TAILQ_FIRST(&asoc->reasmqueue); 546 } while (chk); 547 } 548 549 /* 550 * Queue the chunk either right into the socket buffer if it is the next one 551 * to go OR put it in the correct place in the delivery queue. If we do 552 * append to the so_buf, keep doing so until we are out of order. One big 553 * question still remains, what to do when the socket buffer is FULL?? 554 */ 555 static void 556 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 557 struct sctp_queued_to_read *control, int *abort_flag) 558 { 559 /* 560 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 561 * all the data in one stream this could happen quite rapidly. One 562 * could use the TSN to keep track of things, but this scheme breaks 563 * down in the other type of stream useage that could occur. Send a 564 * single msg to stream 0, send 4Billion messages to stream 1, now 565 * send a message to stream 0. You have a situation where the TSN 566 * has wrapped but not in the stream. Is this worth worrying about 567 * or should we just change our queue sort at the bottom to be by 568 * TSN. 569 * 570 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 571 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 572 * assignment this could happen... and I don't see how this would be 573 * a violation. So for now I am undecided an will leave the sort by 574 * SSN alone. Maybe a hybred approach is the answer 575 * 576 */ 577 struct sctp_stream_in *strm; 578 struct sctp_queued_to_read *at; 579 int queue_needed; 580 uint16_t nxt_todel; 581 struct mbuf *oper; 582 583 queue_needed = 1; 584 asoc->size_on_all_streams += control->length; 585 sctp_ucount_incr(asoc->cnt_on_all_streams); 586 strm = &asoc->strmin[control->sinfo_stream]; 587 nxt_todel = strm->last_sequence_delivered + 1; 588 #ifdef SCTP_STR_LOGGING 589 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 590 #endif 591 #ifdef SCTP_DEBUG 592 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 593 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 594 (uint32_t) control->sinfo_stream, 595 (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel); 596 } 597 #endif 598 if (compare_with_wrap(strm->last_sequence_delivered, 599 control->sinfo_ssn, MAX_SEQ) || 600 (strm->last_sequence_delivered == control->sinfo_ssn)) { 601 /* The incoming sseq is behind where we last delivered? */ 602 #ifdef SCTP_DEBUG 603 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 604 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 605 control->sinfo_ssn, 606 strm->last_sequence_delivered); 607 } 608 #endif 609 /* 610 * throw it in the stream so it gets cleaned up in 611 * association destruction 612 */ 613 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 614 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 615 0, M_DONTWAIT, 1, MT_DATA); 616 if (oper) { 617 struct sctp_paramhdr *ph; 618 uint32_t *ippp; 619 620 oper->m_len = sizeof(struct sctp_paramhdr) + 621 (sizeof(uint32_t) * 3); 622 ph = mtod(oper, struct sctp_paramhdr *); 623 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 624 ph->param_length = htons(oper->m_len); 625 ippp = (uint32_t *) (ph + 1); 626 *ippp = htonl(0x00000001); 627 ippp++; 628 *ippp = control->sinfo_tsn; 629 ippp++; 630 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 631 } 632 sctp_abort_an_association(stcb->sctp_ep, stcb, 633 SCTP_PEER_FAULTY, oper); 634 635 *abort_flag = 1; 636 return; 637 638 } 639 if (nxt_todel == control->sinfo_ssn) { 640 /* can be delivered right away? */ 641 #ifdef SCTP_STR_LOGGING 642 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 643 #endif 644 queue_needed = 0; 645 asoc->size_on_all_streams -= control->length; 646 sctp_ucount_decr(asoc->cnt_on_all_streams); 647 strm->last_sequence_delivered++; 648 sctp_add_to_readq(stcb->sctp_ep, stcb, 649 control, 650 &stcb->sctp_socket->so_rcv, 1); 651 control = TAILQ_FIRST(&strm->inqueue); 652 while (control != NULL) { 653 /* all delivered */ 654 nxt_todel = strm->last_sequence_delivered + 1; 655 if (nxt_todel == control->sinfo_ssn) { 656 at = TAILQ_NEXT(control, next); 657 TAILQ_REMOVE(&strm->inqueue, control, next); 658 asoc->size_on_all_streams -= control->length; 659 sctp_ucount_decr(asoc->cnt_on_all_streams); 660 strm->last_sequence_delivered++; 661 /* 662 * We ignore the return of deliver_data here 663 * since we always can hold the chunk on the 664 * d-queue. And we have a finite number that 665 * can be delivered from the strq. 666 */ 667 #ifdef SCTP_STR_LOGGING 668 sctp_log_strm_del(control, NULL, 669 SCTP_STR_LOG_FROM_IMMED_DEL); 670 #endif 671 sctp_add_to_readq(stcb->sctp_ep, stcb, 672 control, 673 &stcb->sctp_socket->so_rcv, 1); 674 control = at; 675 continue; 676 } 677 break; 678 } 679 } 680 if (queue_needed) { 681 /* 682 * Ok, we did not deliver this guy, find the correct place 683 * to put it on the queue. 684 */ 685 if (TAILQ_EMPTY(&strm->inqueue)) { 686 /* Empty queue */ 687 #ifdef SCTP_STR_LOGGING 688 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 689 #endif 690 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 691 } else { 692 TAILQ_FOREACH(at, &strm->inqueue, next) { 693 if (compare_with_wrap(at->sinfo_ssn, 694 control->sinfo_ssn, MAX_SEQ)) { 695 /* 696 * one in queue is bigger than the 697 * new one, insert before this one 698 */ 699 #ifdef SCTP_STR_LOGGING 700 sctp_log_strm_del(control, at, 701 SCTP_STR_LOG_FROM_INSERT_MD); 702 #endif 703 TAILQ_INSERT_BEFORE(at, control, next); 704 break; 705 } else if (at->sinfo_ssn == control->sinfo_ssn) { 706 /* 707 * Gak, He sent me a duplicate str 708 * seq number 709 */ 710 /* 711 * foo bar, I guess I will just free 712 * this new guy, should we abort 713 * too? FIX ME MAYBE? Or it COULD be 714 * that the SSN's have wrapped. 715 * Maybe I should compare to TSN 716 * somehow... sigh for now just blow 717 * away the chunk! 718 */ 719 720 if (control->data) 721 sctp_m_freem(control->data); 722 control->data = NULL; 723 asoc->size_on_all_streams -= control->length; 724 sctp_ucount_decr(asoc->cnt_on_all_streams); 725 sctp_free_remote_addr(control->whoFrom); 726 sctp_free_a_readq(stcb, control); 727 return; 728 } else { 729 if (TAILQ_NEXT(at, next) == NULL) { 730 /* 731 * We are at the end, insert 732 * it after this one 733 */ 734 #ifdef SCTP_STR_LOGGING 735 sctp_log_strm_del(control, at, 736 SCTP_STR_LOG_FROM_INSERT_TL); 737 #endif 738 TAILQ_INSERT_AFTER(&strm->inqueue, 739 at, control, next); 740 break; 741 } 742 } 743 } 744 } 745 } 746 } 747 748 /* 749 * Returns two things: You get the total size of the deliverable parts of the 750 * first fragmented message on the reassembly queue. And you get a 1 back if 751 * all of the message is ready or a 0 back if the message is still incomplete 752 */ 753 static int 754 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 755 { 756 struct sctp_tmit_chunk *chk; 757 uint32_t tsn; 758 759 *t_size = 0; 760 chk = TAILQ_FIRST(&asoc->reasmqueue); 761 if (chk == NULL) { 762 /* nothing on the queue */ 763 return (0); 764 } 765 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 766 /* Not a first on the queue */ 767 return (0); 768 } 769 tsn = chk->rec.data.TSN_seq; 770 while (chk) { 771 if (tsn != chk->rec.data.TSN_seq) { 772 return (0); 773 } 774 *t_size += chk->send_size; 775 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 776 return (1); 777 } 778 tsn++; 779 chk = TAILQ_NEXT(chk, sctp_next); 780 } 781 return (0); 782 } 783 784 static void 785 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 786 { 787 struct sctp_tmit_chunk *chk; 788 uint16_t nxt_todel; 789 uint32_t tsize; 790 791 chk = TAILQ_FIRST(&asoc->reasmqueue); 792 if (chk == NULL) { 793 /* Huh? */ 794 asoc->size_on_reasm_queue = 0; 795 asoc->cnt_on_reasm_queue = 0; 796 return; 797 } 798 if (asoc->fragmented_delivery_inprogress == 0) { 799 nxt_todel = 800 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 801 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 802 (nxt_todel == chk->rec.data.stream_seq || 803 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 804 /* 805 * Yep the first one is here and its ok to deliver 806 * but should we? 807 */ 808 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 809 (tsize > stcb->sctp_ep->partial_delivery_point))) { 810 811 /* 812 * Yes, we setup to start reception, by 813 * backing down the TSN just in case we 814 * can't deliver. If we 815 */ 816 asoc->fragmented_delivery_inprogress = 1; 817 asoc->tsn_last_delivered = 818 chk->rec.data.TSN_seq - 1; 819 asoc->str_of_pdapi = 820 chk->rec.data.stream_number; 821 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 822 asoc->pdapi_ppid = chk->rec.data.payloadtype; 823 asoc->fragment_flags = chk->rec.data.rcv_flags; 824 sctp_service_reassembly(stcb, asoc); 825 } 826 } 827 } else { 828 sctp_service_reassembly(stcb, asoc); 829 } 830 } 831 832 /* 833 * Dump onto the re-assembly queue, in its proper place. After dumping on the 834 * queue, see if anthing can be delivered. If so pull it off (or as much as 835 * we can. If we run out of space then we must dump what we can and set the 836 * appropriate flag to say we queued what we could. 837 */ 838 static void 839 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 840 struct sctp_tmit_chunk *chk, int *abort_flag) 841 { 842 struct mbuf *oper; 843 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 844 u_char last_flags; 845 struct sctp_tmit_chunk *at, *prev, *next; 846 847 prev = next = NULL; 848 cum_ackp1 = asoc->tsn_last_delivered + 1; 849 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 850 /* This is the first one on the queue */ 851 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 852 /* 853 * we do not check for delivery of anything when only one 854 * fragment is here 855 */ 856 asoc->size_on_reasm_queue = chk->send_size; 857 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 858 if (chk->rec.data.TSN_seq == cum_ackp1) { 859 if (asoc->fragmented_delivery_inprogress == 0 && 860 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 861 SCTP_DATA_FIRST_FRAG) { 862 /* 863 * An empty queue, no delivery inprogress, 864 * we hit the next one and it does NOT have 865 * a FIRST fragment mark. 866 */ 867 #ifdef SCTP_DEBUG 868 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 869 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 870 } 871 #endif 872 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 873 0, M_DONTWAIT, 1, MT_DATA); 874 875 if (oper) { 876 struct sctp_paramhdr *ph; 877 uint32_t *ippp; 878 879 oper->m_len = 880 sizeof(struct sctp_paramhdr) + 881 (sizeof(uint32_t) * 3); 882 ph = mtod(oper, struct sctp_paramhdr *); 883 ph->param_type = 884 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 885 ph->param_length = htons(oper->m_len); 886 ippp = (uint32_t *) (ph + 1); 887 *ippp = htonl(0x10000001); 888 ippp++; 889 *ippp = chk->rec.data.TSN_seq; 890 ippp++; 891 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 892 893 } 894 sctp_abort_an_association(stcb->sctp_ep, stcb, 895 SCTP_PEER_FAULTY, oper); 896 *abort_flag = 1; 897 } else if (asoc->fragmented_delivery_inprogress && 898 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 899 /* 900 * We are doing a partial delivery and the 901 * NEXT chunk MUST be either the LAST or 902 * MIDDLE fragment NOT a FIRST 903 */ 904 #ifdef SCTP_DEBUG 905 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 906 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 907 } 908 #endif 909 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 910 0, M_DONTWAIT, 1, MT_DATA); 911 if (oper) { 912 struct sctp_paramhdr *ph; 913 uint32_t *ippp; 914 915 oper->m_len = 916 sizeof(struct sctp_paramhdr) + 917 (3 * sizeof(uint32_t)); 918 ph = mtod(oper, struct sctp_paramhdr *); 919 ph->param_type = 920 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 921 ph->param_length = htons(oper->m_len); 922 ippp = (uint32_t *) (ph + 1); 923 *ippp = htonl(0x10000002); 924 ippp++; 925 *ippp = chk->rec.data.TSN_seq; 926 ippp++; 927 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 928 } 929 sctp_abort_an_association(stcb->sctp_ep, stcb, 930 SCTP_PEER_FAULTY, oper); 931 *abort_flag = 1; 932 } else if (asoc->fragmented_delivery_inprogress) { 933 /* 934 * Here we are ok with a MIDDLE or LAST 935 * piece 936 */ 937 if (chk->rec.data.stream_number != 938 asoc->str_of_pdapi) { 939 /* Got to be the right STR No */ 940 #ifdef SCTP_DEBUG 941 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 942 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n", 943 chk->rec.data.stream_number, 944 asoc->str_of_pdapi); 945 } 946 #endif 947 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 948 0, M_DONTWAIT, 1, MT_DATA); 949 if (oper) { 950 struct sctp_paramhdr *ph; 951 uint32_t *ippp; 952 953 oper->m_len = 954 sizeof(struct sctp_paramhdr) + 955 (sizeof(uint32_t) * 3); 956 ph = mtod(oper, 957 struct sctp_paramhdr *); 958 ph->param_type = 959 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 960 ph->param_length = 961 htons(oper->m_len); 962 ippp = (uint32_t *) (ph + 1); 963 *ippp = htonl(0x10000003); 964 ippp++; 965 *ippp = chk->rec.data.TSN_seq; 966 ippp++; 967 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 968 } 969 sctp_abort_an_association(stcb->sctp_ep, 970 stcb, SCTP_PEER_FAULTY, oper); 971 *abort_flag = 1; 972 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 973 SCTP_DATA_UNORDERED && 974 chk->rec.data.stream_seq != 975 asoc->ssn_of_pdapi) { 976 /* Got to be the right STR Seq */ 977 #ifdef SCTP_DEBUG 978 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 979 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n", 980 chk->rec.data.stream_seq, 981 asoc->ssn_of_pdapi); 982 } 983 #endif 984 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 985 0, M_DONTWAIT, 1, MT_DATA); 986 if (oper) { 987 struct sctp_paramhdr *ph; 988 uint32_t *ippp; 989 990 oper->m_len = 991 sizeof(struct sctp_paramhdr) + 992 (3 * sizeof(uint32_t)); 993 ph = mtod(oper, 994 struct sctp_paramhdr *); 995 ph->param_type = 996 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 997 ph->param_length = 998 htons(oper->m_len); 999 ippp = (uint32_t *) (ph + 1); 1000 *ippp = htonl(0x10000004); 1001 ippp++; 1002 *ippp = chk->rec.data.TSN_seq; 1003 ippp++; 1004 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1005 1006 } 1007 sctp_abort_an_association(stcb->sctp_ep, 1008 stcb, SCTP_PEER_FAULTY, oper); 1009 *abort_flag = 1; 1010 } 1011 } 1012 } 1013 return; 1014 } 1015 /* Find its place */ 1016 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1017 if (compare_with_wrap(at->rec.data.TSN_seq, 1018 chk->rec.data.TSN_seq, MAX_TSN)) { 1019 /* 1020 * one in queue is bigger than the new one, insert 1021 * before this one 1022 */ 1023 /* A check */ 1024 asoc->size_on_reasm_queue += chk->send_size; 1025 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1026 next = at; 1027 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1028 break; 1029 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1030 /* Gak, He sent me a duplicate str seq number */ 1031 /* 1032 * foo bar, I guess I will just free this new guy, 1033 * should we abort too? FIX ME MAYBE? Or it COULD be 1034 * that the SSN's have wrapped. Maybe I should 1035 * compare to TSN somehow... sigh for now just blow 1036 * away the chunk! 1037 */ 1038 if (chk->data) { 1039 sctp_m_freem(chk->data); 1040 chk->data = NULL; 1041 } 1042 sctp_free_remote_addr(chk->whoTo); 1043 sctp_free_a_chunk(stcb, chk); 1044 return; 1045 } else { 1046 last_flags = at->rec.data.rcv_flags; 1047 last_tsn = at->rec.data.TSN_seq; 1048 prev = at; 1049 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1050 /* 1051 * We are at the end, insert it after this 1052 * one 1053 */ 1054 /* check it first */ 1055 asoc->size_on_reasm_queue += chk->send_size; 1056 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1057 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1058 break; 1059 } 1060 } 1061 } 1062 /* Now the audits */ 1063 if (prev) { 1064 prev_tsn = chk->rec.data.TSN_seq - 1; 1065 if (prev_tsn == prev->rec.data.TSN_seq) { 1066 /* 1067 * Ok the one I am dropping onto the end is the 1068 * NEXT. A bit of valdiation here. 1069 */ 1070 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1071 SCTP_DATA_FIRST_FRAG || 1072 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1073 SCTP_DATA_MIDDLE_FRAG) { 1074 /* 1075 * Insert chk MUST be a MIDDLE or LAST 1076 * fragment 1077 */ 1078 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1079 SCTP_DATA_FIRST_FRAG) { 1080 #ifdef SCTP_DEBUG 1081 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1082 printf("Prev check - It can be a midlle or last but not a first\n"); 1083 printf("Gak, Evil plot, it's a FIRST!\n"); 1084 } 1085 #endif 1086 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1087 0, M_DONTWAIT, 1, MT_DATA); 1088 if (oper) { 1089 struct sctp_paramhdr *ph; 1090 uint32_t *ippp; 1091 1092 oper->m_len = 1093 sizeof(struct sctp_paramhdr) + 1094 (3 * sizeof(uint32_t)); 1095 ph = mtod(oper, 1096 struct sctp_paramhdr *); 1097 ph->param_type = 1098 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1099 ph->param_length = 1100 htons(oper->m_len); 1101 ippp = (uint32_t *) (ph + 1); 1102 *ippp = htonl(0x10000005); 1103 ippp++; 1104 *ippp = chk->rec.data.TSN_seq; 1105 ippp++; 1106 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1107 1108 } 1109 sctp_abort_an_association(stcb->sctp_ep, 1110 stcb, SCTP_PEER_FAULTY, oper); 1111 *abort_flag = 1; 1112 return; 1113 } 1114 if (chk->rec.data.stream_number != 1115 prev->rec.data.stream_number) { 1116 /* 1117 * Huh, need the correct STR here, 1118 * they must be the same. 1119 */ 1120 #ifdef SCTP_DEBUG 1121 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1122 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1123 chk->rec.data.stream_number, 1124 prev->rec.data.stream_number); 1125 } 1126 #endif 1127 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1128 0, M_DONTWAIT, 1, MT_DATA); 1129 if (oper) { 1130 struct sctp_paramhdr *ph; 1131 uint32_t *ippp; 1132 1133 oper->m_len = 1134 sizeof(struct sctp_paramhdr) + 1135 (3 * sizeof(uint32_t)); 1136 ph = mtod(oper, 1137 struct sctp_paramhdr *); 1138 ph->param_type = 1139 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1140 ph->param_length = 1141 htons(oper->m_len); 1142 ippp = (uint32_t *) (ph + 1); 1143 *ippp = htonl(0x10000006); 1144 ippp++; 1145 *ippp = chk->rec.data.TSN_seq; 1146 ippp++; 1147 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1148 } 1149 sctp_abort_an_association(stcb->sctp_ep, 1150 stcb, SCTP_PEER_FAULTY, oper); 1151 1152 *abort_flag = 1; 1153 return; 1154 } 1155 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1156 chk->rec.data.stream_seq != 1157 prev->rec.data.stream_seq) { 1158 /* 1159 * Huh, need the correct STR here, 1160 * they must be the same. 1161 */ 1162 #ifdef SCTP_DEBUG 1163 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1164 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1165 chk->rec.data.stream_seq, 1166 prev->rec.data.stream_seq); 1167 } 1168 #endif 1169 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1170 0, M_DONTWAIT, 1, MT_DATA); 1171 if (oper) { 1172 struct sctp_paramhdr *ph; 1173 uint32_t *ippp; 1174 1175 oper->m_len = 1176 sizeof(struct sctp_paramhdr) + 1177 (3 * sizeof(uint32_t)); 1178 ph = mtod(oper, 1179 struct sctp_paramhdr *); 1180 ph->param_type = 1181 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1182 ph->param_length = 1183 htons(oper->m_len); 1184 ippp = (uint32_t *) (ph + 1); 1185 *ippp = htonl(0x10000007); 1186 ippp++; 1187 *ippp = chk->rec.data.TSN_seq; 1188 ippp++; 1189 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1190 } 1191 sctp_abort_an_association(stcb->sctp_ep, 1192 stcb, SCTP_PEER_FAULTY, oper); 1193 1194 *abort_flag = 1; 1195 return; 1196 } 1197 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1198 SCTP_DATA_LAST_FRAG) { 1199 /* Insert chk MUST be a FIRST */ 1200 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1201 SCTP_DATA_FIRST_FRAG) { 1202 #ifdef SCTP_DEBUG 1203 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1204 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1205 } 1206 #endif 1207 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1208 0, M_DONTWAIT, 1, MT_DATA); 1209 if (oper) { 1210 struct sctp_paramhdr *ph; 1211 uint32_t *ippp; 1212 1213 oper->m_len = 1214 sizeof(struct sctp_paramhdr) + 1215 (3 * sizeof(uint32_t)); 1216 ph = mtod(oper, 1217 struct sctp_paramhdr *); 1218 ph->param_type = 1219 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1220 ph->param_length = 1221 htons(oper->m_len); 1222 ippp = (uint32_t *) (ph + 1); 1223 *ippp = htonl(0x10000008); 1224 ippp++; 1225 *ippp = chk->rec.data.TSN_seq; 1226 ippp++; 1227 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1228 1229 } 1230 sctp_abort_an_association(stcb->sctp_ep, 1231 stcb, SCTP_PEER_FAULTY, oper); 1232 1233 *abort_flag = 1; 1234 return; 1235 } 1236 } 1237 } 1238 } 1239 if (next) { 1240 post_tsn = chk->rec.data.TSN_seq + 1; 1241 if (post_tsn == next->rec.data.TSN_seq) { 1242 /* 1243 * Ok the one I am inserting ahead of is my NEXT 1244 * one. A bit of valdiation here. 1245 */ 1246 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1247 /* Insert chk MUST be a last fragment */ 1248 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1249 != SCTP_DATA_LAST_FRAG) { 1250 #ifdef SCTP_DEBUG 1251 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1252 printf("Next chk - Next is FIRST, we must be LAST\n"); 1253 printf("Gak, Evil plot, its not a last!\n"); 1254 } 1255 #endif 1256 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1257 0, M_DONTWAIT, 1, MT_DATA); 1258 if (oper) { 1259 struct sctp_paramhdr *ph; 1260 uint32_t *ippp; 1261 1262 oper->m_len = 1263 sizeof(struct sctp_paramhdr) + 1264 (3 * sizeof(uint32_t)); 1265 ph = mtod(oper, 1266 struct sctp_paramhdr *); 1267 ph->param_type = 1268 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1269 ph->param_length = 1270 htons(oper->m_len); 1271 ippp = (uint32_t *) (ph + 1); 1272 *ippp = htonl(0x10000009); 1273 ippp++; 1274 *ippp = chk->rec.data.TSN_seq; 1275 ippp++; 1276 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1277 } 1278 sctp_abort_an_association(stcb->sctp_ep, 1279 stcb, SCTP_PEER_FAULTY, oper); 1280 1281 *abort_flag = 1; 1282 return; 1283 } 1284 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1285 SCTP_DATA_MIDDLE_FRAG || 1286 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1287 SCTP_DATA_LAST_FRAG) { 1288 /* 1289 * Insert chk CAN be MIDDLE or FIRST NOT 1290 * LAST 1291 */ 1292 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1293 SCTP_DATA_LAST_FRAG) { 1294 #ifdef SCTP_DEBUG 1295 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1296 printf("Next chk - Next is a MIDDLE/LAST\n"); 1297 printf("Gak, Evil plot, new prev chunk is a LAST\n"); 1298 } 1299 #endif 1300 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1301 0, M_DONTWAIT, 1, MT_DATA); 1302 if (oper) { 1303 struct sctp_paramhdr *ph; 1304 uint32_t *ippp; 1305 1306 oper->m_len = 1307 sizeof(struct sctp_paramhdr) + 1308 (3 * sizeof(uint32_t)); 1309 ph = mtod(oper, 1310 struct sctp_paramhdr *); 1311 ph->param_type = 1312 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1313 ph->param_length = 1314 htons(oper->m_len); 1315 ippp = (uint32_t *) (ph + 1); 1316 *ippp = htonl(0x1000000a); 1317 ippp++; 1318 *ippp = chk->rec.data.TSN_seq; 1319 ippp++; 1320 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1321 1322 } 1323 sctp_abort_an_association(stcb->sctp_ep, 1324 stcb, SCTP_PEER_FAULTY, oper); 1325 1326 *abort_flag = 1; 1327 return; 1328 } 1329 if (chk->rec.data.stream_number != 1330 next->rec.data.stream_number) { 1331 /* 1332 * Huh, need the correct STR here, 1333 * they must be the same. 1334 */ 1335 #ifdef SCTP_DEBUG 1336 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1337 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1338 chk->rec.data.stream_number, 1339 next->rec.data.stream_number); 1340 } 1341 #endif 1342 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1343 0, M_DONTWAIT, 1, MT_DATA); 1344 if (oper) { 1345 struct sctp_paramhdr *ph; 1346 uint32_t *ippp; 1347 1348 oper->m_len = 1349 sizeof(struct sctp_paramhdr) + 1350 (3 * sizeof(uint32_t)); 1351 ph = mtod(oper, 1352 struct sctp_paramhdr *); 1353 ph->param_type = 1354 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1355 ph->param_length = 1356 htons(oper->m_len); 1357 ippp = (uint32_t *) (ph + 1); 1358 *ippp = htonl(0x1000000b); 1359 ippp++; 1360 *ippp = chk->rec.data.TSN_seq; 1361 ippp++; 1362 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1363 1364 } 1365 sctp_abort_an_association(stcb->sctp_ep, 1366 stcb, SCTP_PEER_FAULTY, oper); 1367 1368 *abort_flag = 1; 1369 return; 1370 } 1371 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1372 chk->rec.data.stream_seq != 1373 next->rec.data.stream_seq) { 1374 /* 1375 * Huh, need the correct STR here, 1376 * they must be the same. 1377 */ 1378 #ifdef SCTP_DEBUG 1379 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1380 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1381 chk->rec.data.stream_seq, 1382 next->rec.data.stream_seq); 1383 } 1384 #endif 1385 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1386 0, M_DONTWAIT, 1, MT_DATA); 1387 if (oper) { 1388 struct sctp_paramhdr *ph; 1389 uint32_t *ippp; 1390 1391 oper->m_len = 1392 sizeof(struct sctp_paramhdr) + 1393 (3 * sizeof(uint32_t)); 1394 ph = mtod(oper, 1395 struct sctp_paramhdr *); 1396 ph->param_type = 1397 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1398 ph->param_length = 1399 htons(oper->m_len); 1400 ippp = (uint32_t *) (ph + 1); 1401 *ippp = htonl(0x1000000c); 1402 ippp++; 1403 *ippp = chk->rec.data.TSN_seq; 1404 ippp++; 1405 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1406 } 1407 sctp_abort_an_association(stcb->sctp_ep, 1408 stcb, SCTP_PEER_FAULTY, oper); 1409 1410 *abort_flag = 1; 1411 return; 1412 1413 } 1414 } 1415 } 1416 } 1417 /* Do we need to do some delivery? check */ 1418 sctp_deliver_reasm_check(stcb, asoc); 1419 } 1420 1421 /* 1422 * This is an unfortunate routine. It checks to make sure a evil guy is not 1423 * stuffing us full of bad packet fragments. A broken peer could also do this 1424 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1425 * :< more cycles. 1426 */ 1427 static int 1428 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1429 uint32_t TSN_seq) 1430 { 1431 struct sctp_tmit_chunk *at; 1432 uint32_t tsn_est; 1433 1434 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1435 if (compare_with_wrap(TSN_seq, 1436 at->rec.data.TSN_seq, MAX_TSN)) { 1437 /* is it one bigger? */ 1438 tsn_est = at->rec.data.TSN_seq + 1; 1439 if (tsn_est == TSN_seq) { 1440 /* yep. It better be a last then */ 1441 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1442 SCTP_DATA_LAST_FRAG) { 1443 /* 1444 * Ok this guy belongs next to a guy 1445 * that is NOT last, it should be a 1446 * middle/last, not a complete 1447 * chunk. 1448 */ 1449 return (1); 1450 } else { 1451 /* 1452 * This guy is ok since its a LAST 1453 * and the new chunk is a fully 1454 * self- contained one. 1455 */ 1456 return (0); 1457 } 1458 } 1459 } else if (TSN_seq == at->rec.data.TSN_seq) { 1460 /* Software error since I have a dup? */ 1461 return (1); 1462 } else { 1463 /* 1464 * Ok, 'at' is larger than new chunk but does it 1465 * need to be right before it. 1466 */ 1467 tsn_est = TSN_seq + 1; 1468 if (tsn_est == at->rec.data.TSN_seq) { 1469 /* Yep, It better be a first */ 1470 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1471 SCTP_DATA_FIRST_FRAG) { 1472 return (1); 1473 } else { 1474 return (0); 1475 } 1476 } 1477 } 1478 } 1479 return (0); 1480 } 1481 1482 1483 extern unsigned int sctp_max_chunks_on_queue; 1484 static int 1485 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1486 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1487 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1488 int *break_flag, int last_chunk) 1489 { 1490 /* Process a data chunk */ 1491 /* struct sctp_tmit_chunk *chk; */ 1492 struct sctp_tmit_chunk *chk; 1493 uint32_t tsn, gap; 1494 struct mbuf *dmbuf; 1495 int indx, the_len; 1496 uint16_t strmno, strmseq; 1497 struct mbuf *oper; 1498 struct sctp_queued_to_read *control; 1499 1500 chk = NULL; 1501 tsn = ntohl(ch->dp.tsn); 1502 #ifdef SCTP_MAP_LOGGING 1503 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE); 1504 #endif 1505 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1506 asoc->cumulative_tsn == tsn) { 1507 /* It is a duplicate */ 1508 SCTP_STAT_INCR(sctps_recvdupdata); 1509 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1510 /* Record a dup for the next outbound sack */ 1511 asoc->dup_tsns[asoc->numduptsns] = tsn; 1512 asoc->numduptsns++; 1513 } 1514 return (0); 1515 } 1516 /* Calculate the number of TSN's between the base and this TSN */ 1517 if (tsn >= asoc->mapping_array_base_tsn) { 1518 gap = tsn - asoc->mapping_array_base_tsn; 1519 } else { 1520 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1521 } 1522 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1523 /* Can't hold the bit in the mapping at max array, toss it */ 1524 return (0); 1525 } 1526 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1527 if (sctp_expand_mapping_array(asoc)) { 1528 /* Can't expand, drop it */ 1529 return (0); 1530 } 1531 } 1532 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1533 *high_tsn = tsn; 1534 } 1535 /* See if we have received this one already */ 1536 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1537 SCTP_STAT_INCR(sctps_recvdupdata); 1538 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1539 /* Record a dup for the next outbound sack */ 1540 asoc->dup_tsns[asoc->numduptsns] = tsn; 1541 asoc->numduptsns++; 1542 } 1543 if (!callout_pending(&asoc->dack_timer.timer)) { 1544 /* 1545 * By starting the timer we assure that we WILL sack 1546 * at the end of the packet when sctp_sack_check 1547 * gets called. 1548 */ 1549 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, 1550 stcb, NULL); 1551 } 1552 return (0); 1553 } 1554 /* 1555 * Check to see about the GONE flag, duplicates would cause a sack 1556 * to be sent up above 1557 */ 1558 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1559 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1560 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1561 ) { 1562 /* 1563 * wait a minute, this guy is gone, there is no longer a 1564 * receiver. Send peer an ABORT! 1565 */ 1566 struct mbuf *op_err; 1567 1568 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1569 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 1570 *abort_flag = 1; 1571 return (0); 1572 } 1573 /* 1574 * Now before going further we see if there is room. If NOT then we 1575 * MAY let one through only IF this TSN is the one we are waiting 1576 * for on a partial delivery API. 1577 */ 1578 1579 /* now do the tests */ 1580 if (((asoc->cnt_on_all_streams + 1581 asoc->cnt_on_reasm_queue + 1582 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1583 (((int)asoc->my_rwnd) <= 0)) { 1584 /* 1585 * When we have NO room in the rwnd we check to make sure 1586 * the reader is doing its job... 1587 */ 1588 if (stcb->sctp_socket->so_rcv.sb_cc) { 1589 /* some to read, wake-up */ 1590 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1591 } 1592 /* now is it in the mapping array of what we have accepted? */ 1593 if (compare_with_wrap(tsn, 1594 asoc->highest_tsn_inside_map, MAX_TSN)) { 1595 1596 /* Nope not in the valid range dump it */ 1597 #ifdef SCTP_DEBUG 1598 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1599 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n", 1600 (u_long)tsn, (u_long)asoc->my_rwnd, 1601 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)); 1602 1603 } 1604 #endif 1605 sctp_set_rwnd(stcb, asoc); 1606 if ((asoc->cnt_on_all_streams + 1607 asoc->cnt_on_reasm_queue + 1608 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1609 SCTP_STAT_INCR(sctps_datadropchklmt); 1610 } else { 1611 SCTP_STAT_INCR(sctps_datadroprwnd); 1612 } 1613 indx = *break_flag; 1614 *break_flag = 1; 1615 return (0); 1616 } 1617 } 1618 strmno = ntohs(ch->dp.stream_id); 1619 if (strmno >= asoc->streamincnt) { 1620 struct sctp_paramhdr *phdr; 1621 struct mbuf *mb; 1622 1623 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1624 1, M_DONTWAIT, 1, MT_DATA); 1625 if (mb != NULL) { 1626 /* add some space up front so prepend will work well */ 1627 mb->m_data += sizeof(struct sctp_chunkhdr); 1628 phdr = mtod(mb, struct sctp_paramhdr *); 1629 /* 1630 * Error causes are just param's and this one has 1631 * two back to back phdr, one with the error type 1632 * and size, the other with the streamid and a rsvd 1633 */ 1634 mb->m_pkthdr.len = mb->m_len = 1635 (sizeof(struct sctp_paramhdr) * 2); 1636 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1637 phdr->param_length = 1638 htons(sizeof(struct sctp_paramhdr) * 2); 1639 phdr++; 1640 /* We insert the stream in the type field */ 1641 phdr->param_type = ch->dp.stream_id; 1642 /* And set the length to 0 for the rsvd field */ 1643 phdr->param_length = 0; 1644 sctp_queue_op_err(stcb, mb); 1645 } 1646 SCTP_STAT_INCR(sctps_badsid); 1647 return (0); 1648 } 1649 /* 1650 * Before we continue lets validate that we are not being fooled by 1651 * an evil attacker. We can only have 4k chunks based on our TSN 1652 * spread allowed by the mapping array 512 * 8 bits, so there is no 1653 * way our stream sequence numbers could have wrapped. We of course 1654 * only validate the FIRST fragment so the bit must be set. 1655 */ 1656 strmseq = ntohs(ch->dp.stream_sequence); 1657 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) && 1658 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1659 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1660 strmseq, MAX_SEQ) || 1661 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1662 /* The incoming sseq is behind where we last delivered? */ 1663 #ifdef SCTP_DEBUG 1664 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1665 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1666 strmseq, 1667 asoc->strmin[strmno].last_sequence_delivered); 1668 } 1669 #endif 1670 /* 1671 * throw it in the stream so it gets cleaned up in 1672 * association destruction 1673 */ 1674 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1675 0, M_DONTWAIT, 1, MT_DATA); 1676 if (oper) { 1677 struct sctp_paramhdr *ph; 1678 uint32_t *ippp; 1679 1680 oper->m_len = sizeof(struct sctp_paramhdr) + 1681 (3 * sizeof(uint32_t)); 1682 ph = mtod(oper, struct sctp_paramhdr *); 1683 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1684 ph->param_length = htons(oper->m_len); 1685 ippp = (uint32_t *) (ph + 1); 1686 *ippp = htonl(0x20000001); 1687 ippp++; 1688 *ippp = tsn; 1689 ippp++; 1690 *ippp = ((strmno << 16) | strmseq); 1691 1692 } 1693 sctp_abort_an_association(stcb->sctp_ep, stcb, 1694 SCTP_PEER_FAULTY, oper); 1695 *abort_flag = 1; 1696 return (0); 1697 } 1698 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1699 if (last_chunk == 0) { 1700 dmbuf = sctp_m_copym(*m, 1701 (offset + sizeof(struct sctp_data_chunk)), 1702 the_len, M_DONTWAIT); 1703 #ifdef SCTP_MBUF_LOGGING 1704 { 1705 struct mbuf *mat; 1706 1707 mat = dmbuf; 1708 while (mat) { 1709 if (mat->m_flags & M_EXT) { 1710 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1711 } 1712 mat = mat->m_next; 1713 } 1714 } 1715 #endif 1716 } else { 1717 /* We can steal the last chunk */ 1718 dmbuf = *m; 1719 /* lop off the top part */ 1720 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1721 if (dmbuf->m_pkthdr.len > the_len) { 1722 /* Trim the end round bytes off too */ 1723 m_adj(dmbuf, -(dmbuf->m_pkthdr.len - the_len)); 1724 } 1725 } 1726 if (dmbuf == NULL) { 1727 SCTP_STAT_INCR(sctps_nomem); 1728 return (0); 1729 } 1730 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1731 asoc->fragmented_delivery_inprogress == 0 && 1732 TAILQ_EMPTY(&asoc->resetHead) && 1733 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) || 1734 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1735 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1736 /* Candidate for express delivery */ 1737 /* 1738 * Its not fragmented, No PD-API is up, Nothing in the 1739 * delivery queue, Its un-ordered OR ordered and the next to 1740 * deliver AND nothing else is stuck on the stream queue, 1741 * And there is room for it in the socket buffer. Lets just 1742 * stuff it up the buffer.... 1743 */ 1744 1745 /* It would be nice to avoid this copy if we could :< */ 1746 sctp_alloc_a_readq(stcb, control); 1747 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1748 ch->dp.protocol_id, 1749 stcb->asoc.context, 1750 strmno, strmseq, 1751 ch->ch.chunk_flags, 1752 dmbuf); 1753 if (control == NULL) { 1754 goto failed_express_del; 1755 } 1756 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1); 1757 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1758 /* for ordered, bump what we delivered */ 1759 asoc->strmin[strmno].last_sequence_delivered++; 1760 } 1761 SCTP_STAT_INCR(sctps_recvexpress); 1762 #ifdef SCTP_STR_LOGGING 1763 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1764 SCTP_STR_LOG_FROM_EXPRS_DEL); 1765 #endif 1766 control = NULL; 1767 goto finish_express_del; 1768 } 1769 failed_express_del: 1770 /* If we reach here this is a new chunk */ 1771 chk = NULL; 1772 control = NULL; 1773 /* Express for fragmented delivery? */ 1774 if ((asoc->fragmented_delivery_inprogress) && 1775 (stcb->asoc.control_pdapi) && 1776 (asoc->str_of_pdapi == strmno) && 1777 (asoc->ssn_of_pdapi == strmseq) 1778 ) { 1779 control = stcb->asoc.control_pdapi; 1780 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1781 /* Can't be another first? */ 1782 goto failed_pdapi_express_del; 1783 } 1784 if (tsn == (control->sinfo_tsn + 1)) { 1785 /* Yep, we can add it on */ 1786 int end = 0; 1787 uint32_t cumack; 1788 1789 if (ch->ch.chunk_flags & SCTP_DATA_LAST_FRAG) { 1790 end = 1; 1791 } 1792 cumack = asoc->cumulative_tsn; 1793 if ((cumack + 1) == tsn) 1794 cumack = tsn; 1795 1796 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1797 tsn, 1798 &stcb->sctp_socket->so_rcv)) { 1799 printf("Append fails end:%d\n", end); 1800 goto failed_pdapi_express_del; 1801 } 1802 SCTP_STAT_INCR(sctps_recvexpressm); 1803 control->sinfo_tsn = tsn; 1804 asoc->tsn_last_delivered = tsn; 1805 asoc->fragment_flags = ch->ch.chunk_flags; 1806 asoc->tsn_of_pdapi_last_delivered = tsn; 1807 asoc->last_flags_delivered = ch->ch.chunk_flags; 1808 asoc->last_strm_seq_delivered = strmseq; 1809 asoc->last_strm_no_delivered = strmno; 1810 asoc->tsn_last_delivered = tsn; 1811 1812 if (end) { 1813 /* clean up the flags and such */ 1814 asoc->fragmented_delivery_inprogress = 0; 1815 asoc->strmin[strmno].last_sequence_delivered++; 1816 stcb->asoc.control_pdapi = NULL; 1817 } 1818 control = NULL; 1819 goto finish_express_del; 1820 } 1821 } 1822 failed_pdapi_express_del: 1823 control = NULL; 1824 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1825 sctp_alloc_a_chunk(stcb, chk); 1826 if (chk == NULL) { 1827 /* No memory so we drop the chunk */ 1828 SCTP_STAT_INCR(sctps_nomem); 1829 if (last_chunk == 0) { 1830 /* we copied it, free the copy */ 1831 sctp_m_freem(dmbuf); 1832 } 1833 return (0); 1834 } 1835 chk->rec.data.TSN_seq = tsn; 1836 chk->no_fr_allowed = 0; 1837 chk->rec.data.stream_seq = strmseq; 1838 chk->rec.data.stream_number = strmno; 1839 chk->rec.data.payloadtype = ch->dp.protocol_id; 1840 chk->rec.data.context = stcb->asoc.context; 1841 chk->rec.data.doing_fast_retransmit = 0; 1842 chk->rec.data.rcv_flags = ch->ch.chunk_flags; 1843 chk->asoc = asoc; 1844 chk->send_size = the_len; 1845 chk->whoTo = net; 1846 atomic_add_int(&net->ref_count, 1); 1847 chk->data = dmbuf; 1848 } else { 1849 sctp_alloc_a_readq(stcb, control); 1850 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1851 ch->dp.protocol_id, 1852 stcb->asoc.context, 1853 strmno, strmseq, 1854 ch->ch.chunk_flags, 1855 dmbuf); 1856 if (control == NULL) { 1857 /* No memory so we drop the chunk */ 1858 SCTP_STAT_INCR(sctps_nomem); 1859 if (last_chunk == 0) { 1860 /* we copied it, free the copy */ 1861 sctp_m_freem(dmbuf); 1862 } 1863 return (0); 1864 } 1865 control->length = the_len; 1866 } 1867 1868 /* Mark it as received */ 1869 /* Now queue it where it belongs */ 1870 if (control != NULL) { 1871 /* First a sanity check */ 1872 if (asoc->fragmented_delivery_inprogress) { 1873 /* 1874 * Ok, we have a fragmented delivery in progress if 1875 * this chunk is next to deliver OR belongs in our 1876 * view to the reassembly, the peer is evil or 1877 * broken. 1878 */ 1879 uint32_t estimate_tsn; 1880 1881 estimate_tsn = asoc->tsn_last_delivered + 1; 1882 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1883 (estimate_tsn == control->sinfo_tsn)) { 1884 /* Evil/Broke peer */ 1885 sctp_m_freem(control->data); 1886 control->data = NULL; 1887 sctp_free_remote_addr(control->whoFrom); 1888 sctp_free_a_readq(stcb, control); 1889 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1890 0, M_DONTWAIT, 1, MT_DATA); 1891 if (oper) { 1892 struct sctp_paramhdr *ph; 1893 uint32_t *ippp; 1894 1895 oper->m_len = 1896 sizeof(struct sctp_paramhdr) + 1897 (3 * sizeof(uint32_t)); 1898 ph = mtod(oper, struct sctp_paramhdr *); 1899 ph->param_type = 1900 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1901 ph->param_length = htons(oper->m_len); 1902 ippp = (uint32_t *) (ph + 1); 1903 *ippp = htonl(0x20000002); 1904 ippp++; 1905 *ippp = tsn; 1906 ippp++; 1907 *ippp = ((strmno << 16) | strmseq); 1908 } 1909 sctp_abort_an_association(stcb->sctp_ep, stcb, 1910 SCTP_PEER_FAULTY, oper); 1911 1912 *abort_flag = 1; 1913 return (0); 1914 } else { 1915 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1916 sctp_m_freem(control->data); 1917 control->data = NULL; 1918 sctp_free_remote_addr(control->whoFrom); 1919 sctp_free_a_readq(stcb, control); 1920 1921 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1922 0, M_DONTWAIT, 1, MT_DATA); 1923 if (oper) { 1924 struct sctp_paramhdr *ph; 1925 uint32_t *ippp; 1926 1927 oper->m_len = 1928 sizeof(struct sctp_paramhdr) + 1929 (3 * sizeof(uint32_t)); 1930 ph = mtod(oper, 1931 struct sctp_paramhdr *); 1932 ph->param_type = 1933 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1934 ph->param_length = 1935 htons(oper->m_len); 1936 ippp = (uint32_t *) (ph + 1); 1937 *ippp = htonl(0x20000003); 1938 ippp++; 1939 *ippp = tsn; 1940 ippp++; 1941 *ippp = ((strmno << 16) | strmseq); 1942 } 1943 sctp_abort_an_association(stcb->sctp_ep, 1944 stcb, SCTP_PEER_FAULTY, oper); 1945 1946 *abort_flag = 1; 1947 return (0); 1948 } 1949 } 1950 } else { 1951 /* No PDAPI running */ 1952 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1953 /* 1954 * Reassembly queue is NOT empty validate 1955 * that this tsn does not need to be in 1956 * reasembly queue. If it does then our peer 1957 * is broken or evil. 1958 */ 1959 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1960 sctp_m_freem(control->data); 1961 control->data = NULL; 1962 sctp_free_remote_addr(control->whoFrom); 1963 sctp_free_a_readq(stcb, control); 1964 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1965 0, M_DONTWAIT, 1, MT_DATA); 1966 if (oper) { 1967 struct sctp_paramhdr *ph; 1968 uint32_t *ippp; 1969 1970 oper->m_len = 1971 sizeof(struct sctp_paramhdr) + 1972 (3 * sizeof(uint32_t)); 1973 ph = mtod(oper, 1974 struct sctp_paramhdr *); 1975 ph->param_type = 1976 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1977 ph->param_length = 1978 htons(oper->m_len); 1979 ippp = (uint32_t *) (ph + 1); 1980 *ippp = htonl(0x20000004); 1981 ippp++; 1982 *ippp = tsn; 1983 ippp++; 1984 *ippp = ((strmno << 16) | strmseq); 1985 } 1986 sctp_abort_an_association(stcb->sctp_ep, 1987 stcb, SCTP_PEER_FAULTY, oper); 1988 1989 *abort_flag = 1; 1990 return (0); 1991 } 1992 } 1993 } 1994 /* ok, if we reach here we have passed the sanity checks */ 1995 if (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) { 1996 /* queue directly into socket buffer */ 1997 sctp_add_to_readq(stcb->sctp_ep, stcb, 1998 control, 1999 &stcb->sctp_socket->so_rcv, 1); 2000 } else { 2001 /* 2002 * Special check for when streams are resetting. We 2003 * could be more smart about this and check the 2004 * actual stream to see if it is not being reset.. 2005 * that way we would not create a HOLB when amongst 2006 * streams being reset and those not being reset. 2007 * 2008 * We take complete messages that have a stream reset 2009 * intervening (aka the TSN is after where our 2010 * cum-ack needs to be) off and put them on a 2011 * pending_reply_queue. The reassembly ones we do 2012 * not have to worry about since they are all sorted 2013 * and proceessed by TSN order. It is only the 2014 * singletons I must worry about. 2015 */ 2016 struct sctp_stream_reset_list *liste; 2017 2018 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2019 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) || 2020 (tsn == ntohl(liste->tsn))) 2021 ) { 2022 /* 2023 * yep its past where we need to reset... go 2024 * ahead and queue it. 2025 */ 2026 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2027 /* first one on */ 2028 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2029 } else { 2030 struct sctp_queued_to_read *ctlOn; 2031 unsigned char inserted = 0; 2032 2033 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2034 while (ctlOn) { 2035 if (compare_with_wrap(control->sinfo_tsn, 2036 ctlOn->sinfo_tsn, MAX_TSN)) { 2037 ctlOn = TAILQ_NEXT(ctlOn, next); 2038 } else { 2039 /* found it */ 2040 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2041 inserted = 1; 2042 break; 2043 } 2044 } 2045 if (inserted == 0) { 2046 /* 2047 * must be put at end, use 2048 * prevP (all setup from 2049 * loop) to setup nextP. 2050 */ 2051 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2052 } 2053 } 2054 } else { 2055 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2056 if (*abort_flag) { 2057 return (0); 2058 } 2059 } 2060 } 2061 } else { 2062 /* Into the re-assembly queue */ 2063 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2064 if (*abort_flag) { 2065 return (0); 2066 } 2067 } 2068 finish_express_del: 2069 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2070 /* we have a new high score */ 2071 asoc->highest_tsn_inside_map = tsn; 2072 #ifdef SCTP_MAP_LOGGING 2073 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2074 #endif 2075 } 2076 if (tsn == (asoc->cumulative_tsn + 1)) { 2077 /* Update cum-ack */ 2078 asoc->cumulative_tsn = tsn; 2079 } 2080 if (last_chunk) { 2081 *m = NULL; 2082 } 2083 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) { 2084 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2085 } else { 2086 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2087 } 2088 SCTP_STAT_INCR(sctps_recvdata); 2089 /* Set it present please */ 2090 #ifdef SCTP_STR_LOGGING 2091 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2092 #endif 2093 #ifdef SCTP_MAP_LOGGING 2094 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2095 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2096 #endif 2097 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2098 return (1); 2099 } 2100 2101 int8_t sctp_map_lookup_tab[256] = { 2102 -1, 0, -1, 1, -1, 0, -1, 2, 2103 -1, 0, -1, 1, -1, 0, -1, 3, 2104 -1, 0, -1, 1, -1, 0, -1, 2, 2105 -1, 0, -1, 1, -1, 0, -1, 4, 2106 -1, 0, -1, 1, -1, 0, -1, 2, 2107 -1, 0, -1, 1, -1, 0, -1, 3, 2108 -1, 0, -1, 1, -1, 0, -1, 2, 2109 -1, 0, -1, 1, -1, 0, -1, 5, 2110 -1, 0, -1, 1, -1, 0, -1, 2, 2111 -1, 0, -1, 1, -1, 0, -1, 3, 2112 -1, 0, -1, 1, -1, 0, -1, 2, 2113 -1, 0, -1, 1, -1, 0, -1, 4, 2114 -1, 0, -1, 1, -1, 0, -1, 2, 2115 -1, 0, -1, 1, -1, 0, -1, 3, 2116 -1, 0, -1, 1, -1, 0, -1, 2, 2117 -1, 0, -1, 1, -1, 0, -1, 6, 2118 -1, 0, -1, 1, -1, 0, -1, 2, 2119 -1, 0, -1, 1, -1, 0, -1, 3, 2120 -1, 0, -1, 1, -1, 0, -1, 2, 2121 -1, 0, -1, 1, -1, 0, -1, 4, 2122 -1, 0, -1, 1, -1, 0, -1, 2, 2123 -1, 0, -1, 1, -1, 0, -1, 3, 2124 -1, 0, -1, 1, -1, 0, -1, 2, 2125 -1, 0, -1, 1, -1, 0, -1, 5, 2126 -1, 0, -1, 1, -1, 0, -1, 2, 2127 -1, 0, -1, 1, -1, 0, -1, 3, 2128 -1, 0, -1, 1, -1, 0, -1, 2, 2129 -1, 0, -1, 1, -1, 0, -1, 4, 2130 -1, 0, -1, 1, -1, 0, -1, 2, 2131 -1, 0, -1, 1, -1, 0, -1, 3, 2132 -1, 0, -1, 1, -1, 0, -1, 2, 2133 -1, 0, -1, 1, -1, 0, -1, 7, 2134 }; 2135 2136 2137 void 2138 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2139 { 2140 /* 2141 * Now we also need to check the mapping array in a couple of ways. 2142 * 1) Did we move the cum-ack point? 2143 */ 2144 struct sctp_association *asoc; 2145 int i, at; 2146 int all_ones; 2147 int slide_from, slide_end, lgap, distance; 2148 2149 #ifdef SCTP_MAP_LOGGING 2150 uint32_t old_cumack, old_base, old_highest; 2151 unsigned char aux_array[64]; 2152 2153 #endif 2154 struct sctp_stream_reset_list *liste; 2155 2156 asoc = &stcb->asoc; 2157 at = 0; 2158 2159 #ifdef SCTP_MAP_LOGGING 2160 old_cumack = asoc->cumulative_tsn; 2161 old_base = asoc->mapping_array_base_tsn; 2162 old_highest = asoc->highest_tsn_inside_map; 2163 if (asoc->mapping_array_size < 64) 2164 memcpy(aux_array, asoc->mapping_array, 2165 asoc->mapping_array_size); 2166 else 2167 memcpy(aux_array, asoc->mapping_array, 64); 2168 #endif 2169 2170 /* 2171 * We could probably improve this a small bit by calculating the 2172 * offset of the current cum-ack as the starting point. 2173 */ 2174 all_ones = 1; 2175 at = 0; 2176 for (i = 0; i < stcb->asoc.mapping_array_size; i++) { 2177 if (asoc->mapping_array[i] == 0xff) { 2178 at += 8; 2179 } else { 2180 /* there is a 0 bit */ 2181 all_ones = 0; 2182 at += sctp_map_lookup_tab[asoc->mapping_array[i]]; 2183 break; 2184 } 2185 } 2186 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + at; 2187 /* at is one off, since in the table a embedded -1 is present */ 2188 at++; 2189 2190 if (compare_with_wrap(asoc->cumulative_tsn, 2191 asoc->highest_tsn_inside_map, 2192 MAX_TSN)) { 2193 #ifdef INVARIENTS 2194 panic("huh, cumack greater than high-tsn in map"); 2195 #else 2196 printf("huh, cumack greater than high-tsn in map - should panic?\n"); 2197 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2198 #endif 2199 } 2200 if (all_ones || 2201 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) { 2202 /* The complete array was completed by a single FR */ 2203 /* higest becomes the cum-ack */ 2204 int clr; 2205 2206 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2207 /* clear the array */ 2208 if (all_ones) 2209 clr = asoc->mapping_array_size; 2210 else { 2211 clr = (at >> 3) + 1; 2212 /* 2213 * this should be the allones case but just in case 2214 * :> 2215 */ 2216 if (clr > asoc->mapping_array_size) 2217 clr = asoc->mapping_array_size; 2218 } 2219 memset(asoc->mapping_array, 0, clr); 2220 /* base becomes one ahead of the cum-ack */ 2221 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2222 #ifdef SCTP_MAP_LOGGING 2223 sctp_log_map(old_base, old_cumack, old_highest, 2224 SCTP_MAP_PREPARE_SLIDE); 2225 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2226 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2227 #endif 2228 } else if (at >= 8) { 2229 /* we can slide the mapping array down */ 2230 /* Calculate the new byte postion we can move down */ 2231 slide_from = at >> 3; 2232 /* 2233 * now calculate the ceiling of the move using our highest 2234 * TSN value 2235 */ 2236 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2237 lgap = asoc->highest_tsn_inside_map - 2238 asoc->mapping_array_base_tsn; 2239 } else { 2240 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2241 asoc->highest_tsn_inside_map + 1; 2242 } 2243 slide_end = lgap >> 3; 2244 if (slide_end < slide_from) { 2245 panic("impossible slide"); 2246 } 2247 distance = (slide_end - slide_from) + 1; 2248 #ifdef SCTP_MAP_LOGGING 2249 sctp_log_map(old_base, old_cumack, old_highest, 2250 SCTP_MAP_PREPARE_SLIDE); 2251 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2252 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2253 #endif 2254 if (distance + slide_from > asoc->mapping_array_size || 2255 distance < 0) { 2256 /* 2257 * Here we do NOT slide forward the array so that 2258 * hopefully when more data comes in to fill it up 2259 * we will be able to slide it forward. Really I 2260 * don't think this should happen :-0 2261 */ 2262 2263 #ifdef SCTP_MAP_LOGGING 2264 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2265 (uint32_t) asoc->mapping_array_size, 2266 SCTP_MAP_SLIDE_NONE); 2267 #endif 2268 } else { 2269 int ii; 2270 2271 for (ii = 0; ii < distance; ii++) { 2272 asoc->mapping_array[ii] = 2273 asoc->mapping_array[slide_from + ii]; 2274 } 2275 for (ii = distance; ii <= slide_end; ii++) { 2276 asoc->mapping_array[ii] = 0; 2277 } 2278 asoc->mapping_array_base_tsn += (slide_from << 3); 2279 #ifdef SCTP_MAP_LOGGING 2280 sctp_log_map(asoc->mapping_array_base_tsn, 2281 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2282 SCTP_MAP_SLIDE_RESULT); 2283 #endif 2284 } 2285 } 2286 /* check the special flag for stream resets */ 2287 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2288 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2289 (asoc->cumulative_tsn == liste->tsn)) 2290 ) { 2291 /* 2292 * we have finished working through the backlogged TSN's now 2293 * time to reset streams. 1: call reset function. 2: free 2294 * pending_reply space 3: distribute any chunks in 2295 * pending_reply_queue. 2296 */ 2297 struct sctp_queued_to_read *ctl; 2298 2299 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2300 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2301 SCTP_FREE(liste); 2302 liste = TAILQ_FIRST(&asoc->resetHead); 2303 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2304 if (ctl && (liste == NULL)) { 2305 /* All can be removed */ 2306 while (ctl) { 2307 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2308 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2309 if (*abort_flag) { 2310 return; 2311 } 2312 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2313 } 2314 } else if (ctl) { 2315 /* more than one in queue */ 2316 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2317 /* 2318 * if ctl->sinfo_tsn is <= liste->tsn we can 2319 * process it which is the NOT of 2320 * ctl->sinfo_tsn > liste->tsn 2321 */ 2322 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2323 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2324 if (*abort_flag) { 2325 return; 2326 } 2327 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2328 } 2329 } 2330 /* 2331 * Now service re-assembly to pick up anything that has been 2332 * held on reassembly queue? 2333 */ 2334 sctp_deliver_reasm_check(stcb, asoc); 2335 } 2336 /* 2337 * Now we need to see if we need to queue a sack or just start the 2338 * timer (if allowed). 2339 */ 2340 if (ok_to_sack) { 2341 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2342 /* 2343 * Ok special case, in SHUTDOWN-SENT case. here we 2344 * maker sure SACK timer is off and instead send a 2345 * SHUTDOWN and a SACK 2346 */ 2347 if (callout_pending(&stcb->asoc.dack_timer.timer)) { 2348 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2349 stcb->sctp_ep, stcb, NULL); 2350 } 2351 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2352 sctp_send_sack(stcb); 2353 } else { 2354 int is_a_gap; 2355 2356 /* is there a gap now ? */ 2357 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2358 stcb->asoc.cumulative_tsn, MAX_TSN); 2359 2360 /* 2361 * CMT DAC algorithm: increase number of packets 2362 * received since last ack 2363 */ 2364 stcb->asoc.cmt_dac_pkts_rcvd++; 2365 2366 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a 2367 * sack */ 2368 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2369 * longer is one */ 2370 (stcb->asoc.numduptsns) || /* we have dup's */ 2371 (is_a_gap) || /* is still a gap */ 2372 (stcb->asoc.delayed_ack == 0) || 2373 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second 2374 * packet */ 2375 ) { 2376 2377 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) && 2378 (stcb->asoc.first_ack_sent == 1) && 2379 (stcb->asoc.numduptsns == 0) && 2380 (stcb->asoc.delayed_ack) && 2381 (!callout_pending(&stcb->asoc.dack_timer.timer))) { 2382 2383 /* 2384 * CMT DAC algorithm: With CMT, 2385 * delay acks even in the face of 2386 * 2387 * reordering. Therefore, if acks that 2388 * do not have to be sent because of 2389 * the above reasons, will be 2390 * delayed. That is, acks that would 2391 * have been sent due to gap reports 2392 * will be delayed with DAC. Start 2393 * the delayed ack timer. 2394 */ 2395 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2396 stcb->sctp_ep, stcb, NULL); 2397 } else { 2398 /* 2399 * Ok we must build a SACK since the 2400 * timer is pending, we got our 2401 * first packet OR there are gaps or 2402 * duplicates. 2403 */ 2404 stcb->asoc.first_ack_sent = 1; 2405 2406 sctp_send_sack(stcb); 2407 /* The sending will stop the timer */ 2408 } 2409 } else { 2410 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2411 stcb->sctp_ep, stcb, NULL); 2412 } 2413 } 2414 } 2415 } 2416 2417 void 2418 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2419 { 2420 struct sctp_tmit_chunk *chk; 2421 uint32_t tsize; 2422 uint16_t nxt_todel; 2423 2424 if (asoc->fragmented_delivery_inprogress) { 2425 sctp_service_reassembly(stcb, asoc); 2426 } 2427 /* Can we proceed further, i.e. the PD-API is complete */ 2428 if (asoc->fragmented_delivery_inprogress) { 2429 /* no */ 2430 return; 2431 } 2432 /* 2433 * Now is there some other chunk I can deliver from the reassembly 2434 * queue. 2435 */ 2436 chk = TAILQ_FIRST(&asoc->reasmqueue); 2437 if (chk == NULL) { 2438 asoc->size_on_reasm_queue = 0; 2439 asoc->cnt_on_reasm_queue = 0; 2440 return; 2441 } 2442 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2443 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2444 ((nxt_todel == chk->rec.data.stream_seq) || 2445 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2446 /* 2447 * Yep the first one is here. We setup to start reception, 2448 * by backing down the TSN just in case we can't deliver. 2449 */ 2450 2451 /* 2452 * Before we start though either all of the message should 2453 * be here or 1/4 the socket buffer max or nothing on the 2454 * delivery queue and something can be delivered. 2455 */ 2456 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2457 (tsize > stcb->sctp_ep->partial_delivery_point))) { 2458 asoc->fragmented_delivery_inprogress = 1; 2459 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2460 asoc->str_of_pdapi = chk->rec.data.stream_number; 2461 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2462 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2463 asoc->fragment_flags = chk->rec.data.rcv_flags; 2464 sctp_service_reassembly(stcb, asoc); 2465 } 2466 } 2467 } 2468 2469 extern int sctp_strict_data_order; 2470 2471 int 2472 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2473 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2474 struct sctp_nets *net, uint32_t * high_tsn) 2475 { 2476 struct sctp_data_chunk *ch, chunk_buf; 2477 struct sctp_association *asoc; 2478 int num_chunks = 0; /* number of control chunks processed */ 2479 int stop_proc = 0; 2480 int chk_length, break_flag, last_chunk; 2481 int abort_flag = 0, was_a_gap = 0; 2482 struct mbuf *m; 2483 2484 /* set the rwnd */ 2485 sctp_set_rwnd(stcb, &stcb->asoc); 2486 2487 m = *mm; 2488 SCTP_TCB_LOCK_ASSERT(stcb); 2489 asoc = &stcb->asoc; 2490 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2491 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2492 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 2493 /* 2494 * wait a minute, this guy is gone, there is no longer a 2495 * receiver. Send peer an ABORT! 2496 */ 2497 struct mbuf *op_err; 2498 2499 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2500 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 2501 return (2); 2502 } 2503 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2504 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2505 /* there was a gap before this data was processed */ 2506 was_a_gap = 1; 2507 } 2508 /* 2509 * setup where we got the last DATA packet from for any SACK that 2510 * may need to go out. Don't bump the net. This is done ONLY when a 2511 * chunk is assigned. 2512 */ 2513 asoc->last_data_chunk_from = net; 2514 2515 /* 2516 * Now before we proceed we must figure out if this is a wasted 2517 * cluster... i.e. it is a small packet sent in and yet the driver 2518 * underneath allocated a full cluster for it. If so we must copy it 2519 * to a smaller mbuf and free up the cluster mbuf. This will help 2520 * with cluster starvation. 2521 */ 2522 if (m->m_len < (long)MHLEN && m->m_next == NULL) { 2523 /* we only handle mbufs that are singletons.. not chains */ 2524 m = sctp_get_mbuf_for_msg(m->m_len, 1, M_DONTWAIT, 1, MT_DATA); 2525 if (m) { 2526 /* ok lets see if we can copy the data up */ 2527 caddr_t *from, *to; 2528 2529 if ((*mm)->m_flags & M_PKTHDR) { 2530 /* got to copy the header first */ 2531 M_MOVE_PKTHDR(m, (*mm)); 2532 } 2533 /* get the pointers and copy */ 2534 to = mtod(m, caddr_t *); 2535 from = mtod((*mm), caddr_t *); 2536 memcpy(to, from, (*mm)->m_len); 2537 /* copy the length and free up the old */ 2538 m->m_len = (*mm)->m_len; 2539 sctp_m_freem(*mm); 2540 /* sucess, back copy */ 2541 *mm = m; 2542 } else { 2543 /* We are in trouble in the mbuf world .. yikes */ 2544 m = *mm; 2545 } 2546 } 2547 /* get pointer to the first chunk header */ 2548 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2549 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2550 if (ch == NULL) { 2551 return (1); 2552 } 2553 /* 2554 * process all DATA chunks... 2555 */ 2556 *high_tsn = asoc->cumulative_tsn; 2557 break_flag = 0; 2558 while (stop_proc == 0) { 2559 /* validate chunk length */ 2560 chk_length = ntohs(ch->ch.chunk_length); 2561 if (length - *offset < chk_length) { 2562 /* all done, mutulated chunk */ 2563 stop_proc = 1; 2564 break; 2565 } 2566 if (ch->ch.chunk_type == SCTP_DATA) { 2567 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2568 /* 2569 * Need to send an abort since we had a 2570 * invalid data chunk. 2571 */ 2572 struct mbuf *op_err; 2573 2574 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 2575 0, M_DONTWAIT, 1, MT_DATA); 2576 2577 if (op_err) { 2578 struct sctp_paramhdr *ph; 2579 uint32_t *ippp; 2580 2581 op_err->m_len = sizeof(struct sctp_paramhdr) + 2582 (2 * sizeof(uint32_t)); 2583 ph = mtod(op_err, struct sctp_paramhdr *); 2584 ph->param_type = 2585 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2586 ph->param_length = htons(op_err->m_len); 2587 ippp = (uint32_t *) (ph + 1); 2588 *ippp = htonl(0x30000001); 2589 ippp++; 2590 *ippp = asoc->cumulative_tsn; 2591 2592 } 2593 sctp_abort_association(inp, stcb, m, iphlen, sh, 2594 op_err); 2595 return (2); 2596 } 2597 #ifdef SCTP_AUDITING_ENABLED 2598 sctp_audit_log(0xB1, 0); 2599 #endif 2600 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2601 last_chunk = 1; 2602 } else { 2603 last_chunk = 0; 2604 } 2605 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2606 chk_length, net, high_tsn, &abort_flag, &break_flag, 2607 last_chunk)) { 2608 num_chunks++; 2609 } 2610 if (abort_flag) 2611 return (2); 2612 2613 if (break_flag) { 2614 /* 2615 * Set because of out of rwnd space and no 2616 * drop rep space left. 2617 */ 2618 stop_proc = 1; 2619 break; 2620 } 2621 } else { 2622 /* not a data chunk in the data region */ 2623 switch (ch->ch.chunk_type) { 2624 case SCTP_INITIATION: 2625 case SCTP_INITIATION_ACK: 2626 case SCTP_SELECTIVE_ACK: 2627 case SCTP_HEARTBEAT_REQUEST: 2628 case SCTP_HEARTBEAT_ACK: 2629 case SCTP_ABORT_ASSOCIATION: 2630 case SCTP_SHUTDOWN: 2631 case SCTP_SHUTDOWN_ACK: 2632 case SCTP_OPERATION_ERROR: 2633 case SCTP_COOKIE_ECHO: 2634 case SCTP_COOKIE_ACK: 2635 case SCTP_ECN_ECHO: 2636 case SCTP_ECN_CWR: 2637 case SCTP_SHUTDOWN_COMPLETE: 2638 case SCTP_AUTHENTICATION: 2639 case SCTP_ASCONF_ACK: 2640 case SCTP_PACKET_DROPPED: 2641 case SCTP_STREAM_RESET: 2642 case SCTP_FORWARD_CUM_TSN: 2643 case SCTP_ASCONF: 2644 /* 2645 * Now, what do we do with KNOWN chunks that 2646 * are NOT in the right place? 2647 * 2648 * For now, I do nothing but ignore them. We 2649 * may later want to add sysctl stuff to 2650 * switch out and do either an ABORT() or 2651 * possibly process them. 2652 */ 2653 if (sctp_strict_data_order) { 2654 struct mbuf *op_err; 2655 2656 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2657 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err); 2658 return (2); 2659 } 2660 break; 2661 default: 2662 /* unknown chunk type, use bit rules */ 2663 if (ch->ch.chunk_type & 0x40) { 2664 /* Add a error report to the queue */ 2665 struct mbuf *mm; 2666 struct sctp_paramhdr *phd; 2667 2668 mm = sctp_get_mbuf_for_msg(sizeof(*phd), 1, M_DONTWAIT, 1, MT_DATA); 2669 if (mm) { 2670 phd = mtod(mm, struct sctp_paramhdr *); 2671 /* 2672 * We cheat and use param 2673 * type since we did not 2674 * bother to define a error 2675 * cause struct. They are 2676 * the same basic format 2677 * with different names. 2678 */ 2679 phd->param_type = 2680 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2681 phd->param_length = 2682 htons(chk_length + sizeof(*phd)); 2683 mm->m_len = sizeof(*phd); 2684 mm->m_next = sctp_m_copym(m, *offset, 2685 SCTP_SIZE32(chk_length), 2686 M_DONTWAIT); 2687 if (mm->m_next) { 2688 mm->m_pkthdr.len = 2689 SCTP_SIZE32(chk_length) + 2690 sizeof(*phd); 2691 sctp_queue_op_err(stcb, mm); 2692 } else { 2693 sctp_m_freem(mm); 2694 } 2695 } 2696 } 2697 if ((ch->ch.chunk_type & 0x80) == 0) { 2698 /* discard the rest of this packet */ 2699 stop_proc = 1; 2700 } /* else skip this bad chunk and 2701 * continue... */ 2702 break; 2703 }; /* switch of chunk type */ 2704 } 2705 *offset += SCTP_SIZE32(chk_length); 2706 if ((*offset >= length) || stop_proc) { 2707 /* no more data left in the mbuf chain */ 2708 stop_proc = 1; 2709 continue; 2710 } 2711 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2712 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2713 if (ch == NULL) { 2714 *offset = length; 2715 stop_proc = 1; 2716 break; 2717 2718 } 2719 } /* while */ 2720 if (break_flag) { 2721 /* 2722 * we need to report rwnd overrun drops. 2723 */ 2724 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2725 } 2726 if (num_chunks) { 2727 /* 2728 * Did we get data, if so update the time for auto-close and 2729 * give peer credit for being alive. 2730 */ 2731 SCTP_STAT_INCR(sctps_recvpktwithdata); 2732 stcb->asoc.overall_error_count = 0; 2733 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2734 } 2735 /* now service all of the reassm queue if needed */ 2736 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2737 sctp_service_queues(stcb, asoc); 2738 2739 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2740 /* 2741 * Assure that we ack right away by making sure that a d-ack 2742 * timer is running. So the sack_check will send a sack. 2743 */ 2744 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, 2745 net); 2746 } 2747 /* Start a sack timer or QUEUE a SACK for sending */ 2748 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2749 (stcb->asoc.first_ack_sent)) { 2750 /* Everything is in order */ 2751 if (stcb->asoc.mapping_array[0] == 0xff) { 2752 /* need to do the slide */ 2753 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2754 } else { 2755 if (callout_pending(&stcb->asoc.dack_timer.timer)) { 2756 stcb->asoc.first_ack_sent = 1; 2757 callout_stop(&stcb->asoc.dack_timer.timer); 2758 sctp_send_sack(stcb); 2759 } else { 2760 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2761 stcb->sctp_ep, stcb, NULL); 2762 } 2763 } 2764 } else { 2765 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2766 } 2767 if (abort_flag) 2768 return (2); 2769 2770 return (0); 2771 } 2772 2773 static void 2774 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc, 2775 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2776 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, int num_seg, int *ecn_seg_sums) 2777 { 2778 /************************************************/ 2779 /* process fragments and update sendqueue */ 2780 /************************************************/ 2781 struct sctp_sack *sack; 2782 struct sctp_gap_ack_block *frag; 2783 struct sctp_tmit_chunk *tp1; 2784 int i; 2785 unsigned int j; 2786 2787 #ifdef SCTP_FR_LOGGING 2788 int num_frs = 0; 2789 2790 #endif 2791 uint16_t frag_strt, frag_end, primary_flag_set; 2792 u_long last_frag_high; 2793 2794 /* 2795 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2796 */ 2797 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2798 primary_flag_set = 1; 2799 } else { 2800 primary_flag_set = 0; 2801 } 2802 2803 sack = &ch->sack; 2804 frag = (struct sctp_gap_ack_block *)((caddr_t)sack + 2805 sizeof(struct sctp_sack)); 2806 tp1 = NULL; 2807 last_frag_high = 0; 2808 for (i = 0; i < num_seg; i++) { 2809 frag_strt = ntohs(frag->start); 2810 frag_end = ntohs(frag->end); 2811 /* some sanity checks on the fargment offsets */ 2812 if (frag_strt > frag_end) { 2813 /* this one is malformed, skip */ 2814 frag++; 2815 continue; 2816 } 2817 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2818 MAX_TSN)) 2819 *biggest_tsn_acked = frag_end + last_tsn; 2820 2821 /* mark acked dgs and find out the highestTSN being acked */ 2822 if (tp1 == NULL) { 2823 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2824 2825 /* save the locations of the last frags */ 2826 last_frag_high = frag_end + last_tsn; 2827 } else { 2828 /* 2829 * now lets see if we need to reset the queue due to 2830 * a out-of-order SACK fragment 2831 */ 2832 if (compare_with_wrap(frag_strt + last_tsn, 2833 last_frag_high, MAX_TSN)) { 2834 /* 2835 * if the new frag starts after the last TSN 2836 * frag covered, we are ok and this one is 2837 * beyond the last one 2838 */ 2839 ; 2840 } else { 2841 /* 2842 * ok, they have reset us, so we need to 2843 * reset the queue this will cause extra 2844 * hunting but hey, they chose the 2845 * performance hit when they failed to order 2846 * there gaps.. 2847 */ 2848 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2849 } 2850 last_frag_high = frag_end + last_tsn; 2851 } 2852 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2853 while (tp1) { 2854 #ifdef SCTP_FR_LOGGING 2855 if (tp1->rec.data.doing_fast_retransmit) 2856 num_frs++; 2857 #endif 2858 2859 /* 2860 * CMT: CUCv2 algorithm. For each TSN being 2861 * processed from the sent queue, track the 2862 * next expected pseudo-cumack, or 2863 * rtx_pseudo_cumack, if required. Separate 2864 * cumack trackers for first transmissions, 2865 * and retransmissions. 2866 */ 2867 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2868 (tp1->snd_count == 1)) { 2869 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2870 tp1->whoTo->find_pseudo_cumack = 0; 2871 } 2872 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2873 (tp1->snd_count > 1)) { 2874 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2875 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2876 } 2877 if (tp1->rec.data.TSN_seq == j) { 2878 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2879 /* 2880 * must be held until 2881 * cum-ack passes 2882 */ 2883 /* 2884 * ECN Nonce: Add the nonce 2885 * value to the sender's 2886 * nonce sum 2887 */ 2888 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 2889 /* 2890 * If it is less 2891 * than ACKED, it is 2892 * now no-longer in 2893 * flight. Higher 2894 * values may 2895 * already be set 2896 * via previous Gap 2897 * Ack Blocks... 2898 * i.e. ACKED or 2899 * MARKED. 2900 */ 2901 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2902 *biggest_newly_acked_tsn, MAX_TSN)) { 2903 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2904 } 2905 /* 2906 * CMT: SFR algo 2907 * (and HTNA) - set 2908 * saw_newack to 1 2909 * for dest being 2910 * newly acked. 2911 * update 2912 * this_sack_highest_ 2913 * n ewack if 2914 * appropriate. 2915 */ 2916 if (tp1->rec.data.chunk_was_revoked == 0) 2917 tp1->whoTo->saw_newack = 1; 2918 2919 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2920 tp1->whoTo->this_sack_highest_newack, 2921 MAX_TSN)) { 2922 tp1->whoTo->this_sack_highest_newack = 2923 tp1->rec.data.TSN_seq; 2924 } 2925 /* 2926 * CMT DAC algo: 2927 * also update 2928 * this_sack_lowest_n 2929 * e wack 2930 */ 2931 if (*this_sack_lowest_newack == 0) { 2932 #ifdef SCTP_SACK_LOGGING 2933 sctp_log_sack(*this_sack_lowest_newack, 2934 last_tsn, 2935 tp1->rec.data.TSN_seq, 2936 0, 2937 0, 2938 SCTP_LOG_TSN_ACKED); 2939 #endif 2940 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2941 } 2942 /* 2943 * CMT: CUCv2 2944 * algorithm. If 2945 * (rtx-)pseudo-cumac 2946 * k for corresp 2947 * dest is being 2948 * acked, then we 2949 * have a new 2950 * (rtx-)pseudo-cumac 2951 * k . Set 2952 * new_(rtx_)pseudo_c 2953 * u mack to TRUE so 2954 * that the cwnd for 2955 * this dest can be 2956 * updated. Also 2957 * trigger search 2958 * for the next 2959 * expected 2960 * (rtx-)pseudo-cumac 2961 * k . Separate 2962 * pseudo_cumack 2963 * trackers for 2964 * first 2965 * transmissions and 2966 * retransmissions. 2967 */ 2968 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2969 if (tp1->rec.data.chunk_was_revoked == 0) { 2970 tp1->whoTo->new_pseudo_cumack = 1; 2971 } 2972 tp1->whoTo->find_pseudo_cumack = 1; 2973 } 2974 #ifdef SCTP_CWND_LOGGING 2975 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2976 #endif 2977 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2978 if (tp1->rec.data.chunk_was_revoked == 0) { 2979 tp1->whoTo->new_pseudo_cumack = 1; 2980 } 2981 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2982 } 2983 #ifdef SCTP_SACK_LOGGING 2984 sctp_log_sack(*biggest_newly_acked_tsn, 2985 last_tsn, 2986 tp1->rec.data.TSN_seq, 2987 frag_strt, 2988 frag_end, 2989 SCTP_LOG_TSN_ACKED); 2990 #endif 2991 2992 if (tp1->rec.data.chunk_was_revoked == 0) { 2993 /* 2994 * Revoked 2995 * chunks 2996 * don't 2997 * count, 2998 * since we 2999 * previously 3000 * pulled 3001 * them from 3002 * the fs. 3003 */ 3004 if (tp1->whoTo->flight_size >= tp1->book_size) 3005 tp1->whoTo->flight_size -= tp1->book_size; 3006 else 3007 tp1->whoTo->flight_size = 0; 3008 if (asoc->total_flight >= tp1->book_size) { 3009 asoc->total_flight -= tp1->book_size; 3010 if (asoc->total_flight_count > 0) 3011 asoc->total_flight_count--; 3012 } else { 3013 asoc->total_flight = 0; 3014 asoc->total_flight_count = 0; 3015 } 3016 3017 tp1->whoTo->net_ack += tp1->send_size; 3018 3019 if (tp1->snd_count < 2) { 3020 /* 3021 * Tru 3022 * e 3023 * no 3024 * n 3025 * -r 3026 * e 3027 * tr 3028 * a 3029 * ns 3030 * m 3031 * it 3032 * e 3033 * d 3034 * ch 3035 * u 3036 * nk 3037 * */ 3038 tp1->whoTo->net_ack2 += tp1->send_size; 3039 3040 /* 3041 * upd 3042 * 3043 * ate 3044 * 3045 * RTO 3046 * 3047 * too 3048 * ? */ 3049 if (tp1->do_rtt) { 3050 tp1->whoTo->RTO = 3051 sctp_calculate_rto(stcb, 3052 asoc, 3053 tp1->whoTo, 3054 &tp1->sent_rcv_time); 3055 tp1->whoTo->rto_pending = 0; 3056 tp1->do_rtt = 0; 3057 } 3058 } 3059 } 3060 } 3061 if (tp1->sent <= SCTP_DATAGRAM_RESEND && 3062 tp1->sent != SCTP_DATAGRAM_UNSENT && 3063 compare_with_wrap(tp1->rec.data.TSN_seq, 3064 asoc->this_sack_highest_gap, 3065 MAX_TSN)) { 3066 asoc->this_sack_highest_gap = 3067 tp1->rec.data.TSN_seq; 3068 } 3069 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3070 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3071 #ifdef SCTP_AUDITING_ENABLED 3072 sctp_audit_log(0xB2, 3073 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3074 #endif 3075 3076 } 3077 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3078 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3079 3080 tp1->sent = SCTP_DATAGRAM_MARKED; 3081 } 3082 break; 3083 } /* if (tp1->TSN_seq == j) */ 3084 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3085 MAX_TSN)) 3086 break; 3087 3088 tp1 = TAILQ_NEXT(tp1, sctp_next); 3089 } /* end while (tp1) */ 3090 } /* end for (j = fragStart */ 3091 frag++; /* next one */ 3092 } 3093 #ifdef SCTP_FR_LOGGING 3094 /* 3095 * if (num_frs) sctp_log_fr(*biggest_tsn_acked, 3096 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3097 */ 3098 #endif 3099 } 3100 3101 static void 3102 sctp_check_for_revoked(struct sctp_association *asoc, uint32_t cumack, 3103 u_long biggest_tsn_acked) 3104 { 3105 struct sctp_tmit_chunk *tp1; 3106 int tot_revoked = 0; 3107 3108 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3109 while (tp1) { 3110 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3111 MAX_TSN)) { 3112 /* 3113 * ok this guy is either ACK or MARKED. If it is 3114 * ACKED it has been previously acked but not this 3115 * time i.e. revoked. If it is MARKED it was ACK'ed 3116 * again. 3117 */ 3118 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3119 /* it has been revoked */ 3120 /* 3121 * We do NOT add back to flight size here 3122 * since it is really NOT in flight. Resend 3123 * (when/if it occurs will add to flight 3124 * size 3125 */ 3126 tp1->sent = SCTP_DATAGRAM_SENT; 3127 tp1->rec.data.chunk_was_revoked = 1; 3128 tot_revoked++; 3129 #ifdef SCTP_SACK_LOGGING 3130 sctp_log_sack(asoc->last_acked_seq, 3131 cumack, 3132 tp1->rec.data.TSN_seq, 3133 0, 3134 0, 3135 SCTP_LOG_TSN_REVOKED); 3136 #endif 3137 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3138 /* it has been re-acked in this SACK */ 3139 tp1->sent = SCTP_DATAGRAM_ACKED; 3140 } 3141 } 3142 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3143 break; 3144 tp1 = TAILQ_NEXT(tp1, sctp_next); 3145 } 3146 if (tot_revoked > 0) { 3147 /* 3148 * Setup the ecn nonce re-sync point. We do this since once 3149 * data is revoked we begin to retransmit things, which do 3150 * NOT have the ECN bits set. This means we are now out of 3151 * sync and must wait until we get back in sync with the 3152 * peer to check ECN bits. 3153 */ 3154 tp1 = TAILQ_FIRST(&asoc->send_queue); 3155 if (tp1 == NULL) { 3156 asoc->nonce_resync_tsn = asoc->sending_seq; 3157 } else { 3158 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3159 } 3160 asoc->nonce_wait_for_ecne = 0; 3161 asoc->nonce_sum_check = 0; 3162 } 3163 } 3164 3165 extern int sctp_peer_chunk_oh; 3166 3167 static void 3168 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3169 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3170 { 3171 struct sctp_tmit_chunk *tp1; 3172 int strike_flag = 0; 3173 struct timeval now; 3174 int tot_retrans = 0; 3175 uint32_t sending_seq; 3176 struct sctp_nets *net; 3177 int num_dests_sacked = 0; 3178 3179 /* 3180 * select the sending_seq, this is either the next thing ready to be 3181 * sent but not transmitted, OR, the next seq we assign. 3182 */ 3183 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3184 if (tp1 == NULL) { 3185 sending_seq = asoc->sending_seq; 3186 } else { 3187 sending_seq = tp1->rec.data.TSN_seq; 3188 } 3189 3190 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3191 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3192 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3193 if (net->saw_newack) 3194 num_dests_sacked++; 3195 } 3196 } 3197 if (stcb->asoc.peer_supports_prsctp) { 3198 SCTP_GETTIME_TIMEVAL(&now); 3199 } 3200 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3201 while (tp1) { 3202 strike_flag = 0; 3203 if (tp1->no_fr_allowed) { 3204 /* this one had a timeout or something */ 3205 tp1 = TAILQ_NEXT(tp1, sctp_next); 3206 continue; 3207 } 3208 #ifdef SCTP_FR_LOGGING 3209 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3210 sctp_log_fr(biggest_tsn_newly_acked, 3211 tp1->rec.data.TSN_seq, 3212 tp1->sent, 3213 SCTP_FR_LOG_CHECK_STRIKE); 3214 #endif 3215 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3216 MAX_TSN) || 3217 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3218 /* done */ 3219 break; 3220 } 3221 if (stcb->asoc.peer_supports_prsctp) { 3222 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3223 /* Is it expired? */ 3224 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3225 /* Yes so drop it */ 3226 if (tp1->data != NULL) { 3227 sctp_release_pr_sctp_chunk(stcb, tp1, 3228 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3229 &asoc->sent_queue); 3230 } 3231 tp1 = TAILQ_NEXT(tp1, sctp_next); 3232 continue; 3233 } 3234 } 3235 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3236 /* Has it been retransmitted tv_sec times? */ 3237 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3238 /* Yes, so drop it */ 3239 if (tp1->data != NULL) { 3240 sctp_release_pr_sctp_chunk(stcb, tp1, 3241 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3242 &asoc->sent_queue); 3243 } 3244 tp1 = TAILQ_NEXT(tp1, sctp_next); 3245 continue; 3246 } 3247 } 3248 } 3249 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3250 asoc->this_sack_highest_gap, MAX_TSN)) { 3251 /* we are beyond the tsn in the sack */ 3252 break; 3253 } 3254 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3255 /* either a RESEND, ACKED, or MARKED */ 3256 /* skip */ 3257 tp1 = TAILQ_NEXT(tp1, sctp_next); 3258 continue; 3259 } 3260 /* 3261 * CMT : SFR algo (covers part of DAC and HTNA as well) 3262 */ 3263 if (tp1->whoTo->saw_newack == 0) { 3264 /* 3265 * No new acks were receieved for data sent to this 3266 * dest. Therefore, according to the SFR algo for 3267 * CMT, no data sent to this dest can be marked for 3268 * FR using this SACK. (iyengar@cis.udel.edu, 3269 * 2005/05/12) 3270 */ 3271 tp1 = TAILQ_NEXT(tp1, sctp_next); 3272 continue; 3273 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3274 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3275 /* 3276 * CMT: New acks were receieved for data sent to 3277 * this dest. But no new acks were seen for data 3278 * sent after tp1. Therefore, according to the SFR 3279 * algo for CMT, tp1 cannot be marked for FR using 3280 * this SACK. This step covers part of the DAC algo 3281 * and the HTNA algo as well. 3282 */ 3283 tp1 = TAILQ_NEXT(tp1, sctp_next); 3284 continue; 3285 } 3286 /* 3287 * Here we check to see if we were have already done a FR 3288 * and if so we see if the biggest TSN we saw in the sack is 3289 * smaller than the recovery point. If so we don't strike 3290 * the tsn... otherwise we CAN strike the TSN. 3291 */ 3292 /* 3293 * @@@ JRI: Check for CMT 3294 */ 3295 if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { 3296 /* 3297 * Strike the TSN if in fast-recovery and cum-ack 3298 * moved. 3299 */ 3300 #ifdef SCTP_FR_LOGGING 3301 sctp_log_fr(biggest_tsn_newly_acked, 3302 tp1->rec.data.TSN_seq, 3303 tp1->sent, 3304 SCTP_FR_LOG_STRIKE_CHUNK); 3305 #endif 3306 tp1->sent++; 3307 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3308 /* 3309 * CMT DAC algorithm: If SACK flag is set to 3310 * 0, then lowest_newack test will not pass 3311 * because it would have been set to the 3312 * cumack earlier. If not already to be 3313 * rtx'd, If not a mixed sack and if tp1 is 3314 * not between two sacked TSNs, then mark by 3315 * one more. 3316 */ 3317 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3318 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3319 #ifdef SCTP_FR_LOGGING 3320 sctp_log_fr(16 + num_dests_sacked, 3321 tp1->rec.data.TSN_seq, 3322 tp1->sent, 3323 SCTP_FR_LOG_STRIKE_CHUNK); 3324 #endif 3325 tp1->sent++; 3326 } 3327 } 3328 } else if (tp1->rec.data.doing_fast_retransmit) { 3329 /* 3330 * For those that have done a FR we must take 3331 * special consideration if we strike. I.e the 3332 * biggest_newly_acked must be higher than the 3333 * sending_seq at the time we did the FR. 3334 */ 3335 #ifdef SCTP_FR_TO_ALTERNATE 3336 /* 3337 * If FR's go to new networks, then we must only do 3338 * this for singly homed asoc's. However if the FR's 3339 * go to the same network (Armando's work) then its 3340 * ok to FR multiple times. 3341 */ 3342 if (asoc->numnets < 2) 3343 #else 3344 if (1) 3345 #endif 3346 { 3347 if ((compare_with_wrap(biggest_tsn_newly_acked, 3348 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3349 (biggest_tsn_newly_acked == 3350 tp1->rec.data.fast_retran_tsn)) { 3351 /* 3352 * Strike the TSN, since this ack is 3353 * beyond where things were when we 3354 * did a FR. 3355 */ 3356 #ifdef SCTP_FR_LOGGING 3357 sctp_log_fr(biggest_tsn_newly_acked, 3358 tp1->rec.data.TSN_seq, 3359 tp1->sent, 3360 SCTP_FR_LOG_STRIKE_CHUNK); 3361 #endif 3362 tp1->sent++; 3363 strike_flag = 1; 3364 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3365 /* 3366 * CMT DAC algorithm: If 3367 * SACK flag is set to 0, 3368 * then lowest_newack test 3369 * will not pass because it 3370 * would have been set to 3371 * the cumack earlier. If 3372 * not already to be rtx'd, 3373 * If not a mixed sack and 3374 * if tp1 is not between two 3375 * sacked TSNs, then mark by 3376 * one more. 3377 */ 3378 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3379 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3380 #ifdef SCTP_FR_LOGGING 3381 sctp_log_fr(32 + num_dests_sacked, 3382 tp1->rec.data.TSN_seq, 3383 tp1->sent, 3384 SCTP_FR_LOG_STRIKE_CHUNK); 3385 #endif 3386 tp1->sent++; 3387 } 3388 } 3389 } 3390 } 3391 /* 3392 * @@@ JRI: TODO: remove code for HTNA algo. CMT's 3393 * SFR algo covers HTNA. 3394 */ 3395 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3396 biggest_tsn_newly_acked, MAX_TSN)) { 3397 /* 3398 * We don't strike these: This is the HTNA 3399 * algorithm i.e. we don't strike If our TSN is 3400 * larger than the Highest TSN Newly Acked. 3401 */ 3402 ; 3403 } else { 3404 /* Strike the TSN */ 3405 #ifdef SCTP_FR_LOGGING 3406 sctp_log_fr(biggest_tsn_newly_acked, 3407 tp1->rec.data.TSN_seq, 3408 tp1->sent, 3409 SCTP_FR_LOG_STRIKE_CHUNK); 3410 #endif 3411 tp1->sent++; 3412 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3413 /* 3414 * CMT DAC algorithm: If SACK flag is set to 3415 * 0, then lowest_newack test will not pass 3416 * because it would have been set to the 3417 * cumack earlier. If not already to be 3418 * rtx'd, If not a mixed sack and if tp1 is 3419 * not between two sacked TSNs, then mark by 3420 * one more. 3421 */ 3422 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3423 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3424 #ifdef SCTP_FR_LOGGING 3425 sctp_log_fr(48 + num_dests_sacked, 3426 tp1->rec.data.TSN_seq, 3427 tp1->sent, 3428 SCTP_FR_LOG_STRIKE_CHUNK); 3429 #endif 3430 tp1->sent++; 3431 } 3432 } 3433 } 3434 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3435 /* Increment the count to resend */ 3436 struct sctp_nets *alt; 3437 3438 /* printf("OK, we are now ready to FR this guy\n"); */ 3439 #ifdef SCTP_FR_LOGGING 3440 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3441 0, SCTP_FR_MARKED); 3442 #endif 3443 if (strike_flag) { 3444 /* This is a subsequent FR */ 3445 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3446 } 3447 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3448 3449 if (sctp_cmt_on_off) { 3450 /* 3451 * CMT: Using RTX_SSTHRESH policy for CMT. 3452 * If CMT is being used, then pick dest with 3453 * largest ssthresh for any retransmission. 3454 * (iyengar@cis.udel.edu, 2005/08/12) 3455 */ 3456 tp1->no_fr_allowed = 1; 3457 alt = tp1->whoTo; 3458 alt = sctp_find_alternate_net(stcb, alt, 1); 3459 /* 3460 * CUCv2: If a different dest is picked for 3461 * the retransmission, then new 3462 * (rtx-)pseudo_cumack needs to be tracked 3463 * for orig dest. Let CUCv2 track new (rtx-) 3464 * pseudo-cumack always. 3465 */ 3466 tp1->whoTo->find_pseudo_cumack = 1; 3467 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3468 3469 3470 } else {/* CMT is OFF */ 3471 3472 #ifdef SCTP_FR_TO_ALTERNATE 3473 /* Can we find an alternate? */ 3474 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3475 #else 3476 /* 3477 * default behavior is to NOT retransmit 3478 * FR's to an alternate. Armando Caro's 3479 * paper details why. 3480 */ 3481 alt = tp1->whoTo; 3482 #endif 3483 } 3484 3485 tp1->rec.data.doing_fast_retransmit = 1; 3486 tot_retrans++; 3487 /* mark the sending seq for possible subsequent FR's */ 3488 /* 3489 * printf("Marking TSN for FR new value %x\n", 3490 * (uint32_t)tpi->rec.data.TSN_seq); 3491 */ 3492 if (TAILQ_EMPTY(&asoc->send_queue)) { 3493 /* 3494 * If the queue of send is empty then its 3495 * the next sequence number that will be 3496 * assigned so we subtract one from this to 3497 * get the one we last sent. 3498 */ 3499 tp1->rec.data.fast_retran_tsn = sending_seq; 3500 } else { 3501 /* 3502 * If there are chunks on the send queue 3503 * (unsent data that has made it from the 3504 * stream queues but not out the door, we 3505 * take the first one (which will have the 3506 * lowest TSN) and subtract one to get the 3507 * one we last sent. 3508 */ 3509 struct sctp_tmit_chunk *ttt; 3510 3511 ttt = TAILQ_FIRST(&asoc->send_queue); 3512 tp1->rec.data.fast_retran_tsn = 3513 ttt->rec.data.TSN_seq; 3514 } 3515 3516 if (tp1->do_rtt) { 3517 /* 3518 * this guy had a RTO calculation pending on 3519 * it, cancel it 3520 */ 3521 tp1->whoTo->rto_pending = 0; 3522 tp1->do_rtt = 0; 3523 } 3524 /* fix counts and things */ 3525 3526 tp1->whoTo->net_ack++; 3527 if (tp1->whoTo->flight_size >= tp1->book_size) 3528 tp1->whoTo->flight_size -= tp1->book_size; 3529 else 3530 tp1->whoTo->flight_size = 0; 3531 3532 #ifdef SCTP_LOG_RWND 3533 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3534 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh); 3535 #endif 3536 /* add back to the rwnd */ 3537 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3538 3539 /* remove from the total flight */ 3540 if (asoc->total_flight >= tp1->book_size) { 3541 asoc->total_flight -= tp1->book_size; 3542 if (asoc->total_flight_count > 0) 3543 asoc->total_flight_count--; 3544 } else { 3545 asoc->total_flight = 0; 3546 asoc->total_flight_count = 0; 3547 } 3548 3549 3550 if (alt != tp1->whoTo) { 3551 /* yes, there is an alternate. */ 3552 sctp_free_remote_addr(tp1->whoTo); 3553 tp1->whoTo = alt; 3554 atomic_add_int(&alt->ref_count, 1); 3555 } 3556 } 3557 tp1 = TAILQ_NEXT(tp1, sctp_next); 3558 } /* while (tp1) */ 3559 3560 if (tot_retrans > 0) { 3561 /* 3562 * Setup the ecn nonce re-sync point. We do this since once 3563 * we go to FR something we introduce a Karn's rule scenario 3564 * and won't know the totals for the ECN bits. 3565 */ 3566 asoc->nonce_resync_tsn = sending_seq; 3567 asoc->nonce_wait_for_ecne = 0; 3568 asoc->nonce_sum_check = 0; 3569 } 3570 } 3571 3572 struct sctp_tmit_chunk * 3573 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3574 struct sctp_association *asoc) 3575 { 3576 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3577 struct timeval now; 3578 int now_filled = 0; 3579 3580 if (asoc->peer_supports_prsctp == 0) { 3581 return (NULL); 3582 } 3583 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3584 while (tp1) { 3585 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3586 tp1->sent != SCTP_DATAGRAM_RESEND) { 3587 /* no chance to advance, out of here */ 3588 break; 3589 } 3590 if (!PR_SCTP_ENABLED(tp1->flags)) { 3591 /* 3592 * We can't fwd-tsn past any that are reliable aka 3593 * retransmitted until the asoc fails. 3594 */ 3595 break; 3596 } 3597 if (!now_filled) { 3598 SCTP_GETTIME_TIMEVAL(&now); 3599 now_filled = 1; 3600 } 3601 tp2 = TAILQ_NEXT(tp1, sctp_next); 3602 /* 3603 * now we got a chunk which is marked for another 3604 * retransmission to a PR-stream but has run out its chances 3605 * already maybe OR has been marked to skip now. Can we skip 3606 * it if its a resend? 3607 */ 3608 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3609 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3610 /* 3611 * Now is this one marked for resend and its time is 3612 * now up? 3613 */ 3614 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3615 /* Yes so drop it */ 3616 if (tp1->data) { 3617 sctp_release_pr_sctp_chunk(stcb, tp1, 3618 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3619 &asoc->sent_queue); 3620 } 3621 } else { 3622 /* 3623 * No, we are done when hit one for resend 3624 * whos time as not expired. 3625 */ 3626 break; 3627 } 3628 } 3629 /* 3630 * Ok now if this chunk is marked to drop it we can clean up 3631 * the chunk, advance our peer ack point and we can check 3632 * the next chunk. 3633 */ 3634 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3635 /* advance PeerAckPoint goes forward */ 3636 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3637 a_adv = tp1; 3638 /* 3639 * we don't want to de-queue it here. Just wait for 3640 * the next peer SACK to come with a new cumTSN and 3641 * then the chunk will be droped in the normal 3642 * fashion. 3643 */ 3644 if (tp1->data) { 3645 sctp_free_bufspace(stcb, asoc, tp1, 1); 3646 /* 3647 * Maybe there should be another 3648 * notification type 3649 */ 3650 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3651 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3652 tp1); 3653 sctp_m_freem(tp1->data); 3654 tp1->data = NULL; 3655 if (stcb->sctp_socket) { 3656 sctp_sowwakeup(stcb->sctp_ep, 3657 stcb->sctp_socket); 3658 #ifdef SCTP_WAKE_LOGGING 3659 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3660 #endif 3661 } 3662 } 3663 } else { 3664 /* 3665 * If it is still in RESEND we can advance no 3666 * further 3667 */ 3668 break; 3669 } 3670 /* 3671 * If we hit here we just dumped tp1, move to next tsn on 3672 * sent queue. 3673 */ 3674 tp1 = tp2; 3675 } 3676 return (a_adv); 3677 } 3678 3679 #ifdef SCTP_HIGH_SPEED 3680 struct sctp_hs_raise_drop { 3681 int32_t cwnd; 3682 int32_t increase; 3683 int32_t drop_percent; 3684 }; 3685 3686 #define SCTP_HS_TABLE_SIZE 73 3687 3688 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 3689 {38, 1, 50}, /* 0 */ 3690 {118, 2, 44}, /* 1 */ 3691 {221, 3, 41}, /* 2 */ 3692 {347, 4, 38}, /* 3 */ 3693 {495, 5, 37}, /* 4 */ 3694 {663, 6, 35}, /* 5 */ 3695 {851, 7, 34}, /* 6 */ 3696 {1058, 8, 33}, /* 7 */ 3697 {1284, 9, 32}, /* 8 */ 3698 {1529, 10, 31}, /* 9 */ 3699 {1793, 11, 30}, /* 10 */ 3700 {2076, 12, 29}, /* 11 */ 3701 {2378, 13, 28}, /* 12 */ 3702 {2699, 14, 28}, /* 13 */ 3703 {3039, 15, 27}, /* 14 */ 3704 {3399, 16, 27}, /* 15 */ 3705 {3778, 17, 26}, /* 16 */ 3706 {4177, 18, 26}, /* 17 */ 3707 {4596, 19, 25}, /* 18 */ 3708 {5036, 20, 25}, /* 19 */ 3709 {5497, 21, 24}, /* 20 */ 3710 {5979, 22, 24}, /* 21 */ 3711 {6483, 23, 23}, /* 22 */ 3712 {7009, 24, 23}, /* 23 */ 3713 {7558, 25, 22}, /* 24 */ 3714 {8130, 26, 22}, /* 25 */ 3715 {8726, 27, 22}, /* 26 */ 3716 {9346, 28, 21}, /* 27 */ 3717 {9991, 29, 21}, /* 28 */ 3718 {10661, 30, 21}, /* 29 */ 3719 {11358, 31, 20}, /* 30 */ 3720 {12082, 32, 20}, /* 31 */ 3721 {12834, 33, 20}, /* 32 */ 3722 {13614, 34, 19}, /* 33 */ 3723 {14424, 35, 19}, /* 34 */ 3724 {15265, 36, 19}, /* 35 */ 3725 {16137, 37, 19}, /* 36 */ 3726 {17042, 38, 18}, /* 37 */ 3727 {17981, 39, 18}, /* 38 */ 3728 {18955, 40, 18}, /* 39 */ 3729 {19965, 41, 17}, /* 40 */ 3730 {21013, 42, 17}, /* 41 */ 3731 {22101, 43, 17}, /* 42 */ 3732 {23230, 44, 17}, /* 43 */ 3733 {24402, 45, 16}, /* 44 */ 3734 {25618, 46, 16}, /* 45 */ 3735 {26881, 47, 16}, /* 46 */ 3736 {28193, 48, 16}, /* 47 */ 3737 {29557, 49, 15}, /* 48 */ 3738 {30975, 50, 15}, /* 49 */ 3739 {32450, 51, 15}, /* 50 */ 3740 {33986, 52, 15}, /* 51 */ 3741 {35586, 53, 14}, /* 52 */ 3742 {37253, 54, 14}, /* 53 */ 3743 {38992, 55, 14}, /* 54 */ 3744 {40808, 56, 14}, /* 55 */ 3745 {42707, 57, 13}, /* 56 */ 3746 {44694, 58, 13}, /* 57 */ 3747 {46776, 59, 13}, /* 58 */ 3748 {48961, 60, 13}, /* 59 */ 3749 {51258, 61, 13}, /* 60 */ 3750 {53677, 62, 12}, /* 61 */ 3751 {56230, 63, 12}, /* 62 */ 3752 {58932, 64, 12}, /* 63 */ 3753 {61799, 65, 12}, /* 64 */ 3754 {64851, 66, 11}, /* 65 */ 3755 {68113, 67, 11}, /* 66 */ 3756 {71617, 68, 11}, /* 67 */ 3757 {75401, 69, 10}, /* 68 */ 3758 {79517, 70, 10}, /* 69 */ 3759 {84035, 71, 10}, /* 70 */ 3760 {89053, 72, 10}, /* 71 */ 3761 {94717, 73, 9} /* 72 */ 3762 }; 3763 3764 static void 3765 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 3766 { 3767 int cur_val, i, indx, incr; 3768 3769 cur_val = net->cwnd >> 10; 3770 indx = SCTP_HS_TABLE_SIZE - 1; 3771 3772 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3773 /* normal mode */ 3774 if (net->net_ack > net->mtu) { 3775 net->cwnd += net->mtu; 3776 #ifdef SCTP_CWND_MONITOR 3777 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 3778 #endif 3779 } else { 3780 net->cwnd += net->net_ack; 3781 #ifdef SCTP_CWND_MONITOR 3782 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 3783 #endif 3784 } 3785 } else { 3786 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 3787 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 3788 indx = i; 3789 break; 3790 } 3791 } 3792 net->last_hs_used = indx; 3793 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 3794 net->cwnd += incr; 3795 #ifdef SCTP_CWND_MONITOR 3796 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 3797 #endif 3798 } 3799 } 3800 3801 static void 3802 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 3803 { 3804 int cur_val, i, indx; 3805 3806 #ifdef SCTP_CWND_MONITOR 3807 int old_cwnd = net->cwnd; 3808 3809 #endif 3810 3811 cur_val = net->cwnd >> 10; 3812 indx = net->last_hs_used; 3813 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3814 /* normal mode */ 3815 net->ssthresh = net->cwnd / 2; 3816 if (net->ssthresh < (net->mtu * 2)) { 3817 net->ssthresh = 2 * net->mtu; 3818 } 3819 net->cwnd = net->ssthresh; 3820 } else { 3821 /* drop by the proper amount */ 3822 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 3823 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 3824 net->cwnd = net->ssthresh; 3825 /* now where are we */ 3826 indx = net->last_hs_used; 3827 cur_val = net->cwnd >> 10; 3828 /* reset where we are in the table */ 3829 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3830 /* feel out of hs */ 3831 net->last_hs_used = 0; 3832 } else { 3833 for (i = indx; i >= 1; i--) { 3834 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 3835 break; 3836 } 3837 } 3838 net->last_hs_used = indx; 3839 } 3840 } 3841 #ifdef SCTP_CWND_MONITOR 3842 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 3843 #endif 3844 3845 } 3846 3847 #endif 3848 3849 extern int sctp_early_fr; 3850 extern int sctp_L2_abc_variable; 3851 3852 3853 static __inline void 3854 sctp_cwnd_update(struct sctp_tcb *stcb, 3855 struct sctp_association *asoc, 3856 int accum_moved, int reneged_all, int will_exit) 3857 { 3858 struct sctp_nets *net; 3859 3860 /******************************/ 3861 /* update cwnd and Early FR */ 3862 /******************************/ 3863 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3864 #ifdef JANA_CODE_WHY_THIS 3865 /* 3866 * CMT fast recovery code. Need to debug. 3867 */ 3868 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 3869 if (compare_with_wrap(asoc->last_acked_seq, 3870 net->fast_recovery_tsn, MAX_TSN) || 3871 (asoc->last_acked_seq == net->fast_recovery_tsn) || 3872 compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) || 3873 (net->pseudo_cumack == net->fast_recovery_tsn)) { 3874 net->will_exit_fast_recovery = 1; 3875 } 3876 } 3877 #endif 3878 if (sctp_early_fr) { 3879 /* 3880 * So, first of all do we need to have a Early FR 3881 * timer running? 3882 */ 3883 if (((TAILQ_FIRST(&asoc->sent_queue)) && 3884 (net->ref_count > 1) && 3885 (net->flight_size < net->cwnd)) || 3886 (reneged_all)) { 3887 /* 3888 * yes, so in this case stop it if its 3889 * running, and then restart it. Reneging 3890 * all is a special case where we want to 3891 * run the Early FR timer and then force the 3892 * last few unacked to be sent, causing us 3893 * to illicit a sack with gaps to force out 3894 * the others. 3895 */ 3896 if (callout_pending(&net->fr_timer.timer)) { 3897 SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 3898 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3899 } 3900 SCTP_STAT_INCR(sctps_earlyfrstrid); 3901 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3902 } else { 3903 /* No, stop it if its running */ 3904 if (callout_pending(&net->fr_timer.timer)) { 3905 SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 3906 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3907 } 3908 } 3909 } 3910 /* if nothing was acked on this destination skip it */ 3911 if (net->net_ack == 0) { 3912 #ifdef SCTP_CWND_LOGGING 3913 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 3914 #endif 3915 continue; 3916 } 3917 if (net->net_ack2 > 0) { 3918 /* 3919 * Karn's rule applies to clearing error count, this 3920 * is optional. 3921 */ 3922 net->error_count = 0; 3923 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 3924 SCTP_ADDR_NOT_REACHABLE) { 3925 /* addr came good */ 3926 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 3927 net->dest_state |= SCTP_ADDR_REACHABLE; 3928 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 3929 SCTP_RECEIVED_SACK, (void *)net); 3930 /* now was it the primary? if so restore */ 3931 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 3932 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 3933 } 3934 } 3935 } 3936 #ifdef JANA_CODE_WHY_THIS 3937 /* 3938 * Cannot skip for CMT. Need to come back and check these 3939 * variables for CMT. CMT fast recovery code. Need to debug. 3940 */ 3941 if (sctp_cmt_on_off == 1 && 3942 net->fast_retran_loss_recovery && 3943 net->will_exit_fast_recovery == 0) 3944 #endif 3945 if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) { 3946 /* 3947 * If we are in loss recovery we skip any 3948 * cwnd update 3949 */ 3950 goto skip_cwnd_update; 3951 } 3952 /* 3953 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 3954 * moved. 3955 */ 3956 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) { 3957 /* If the cumulative ack moved we can proceed */ 3958 if (net->cwnd <= net->ssthresh) { 3959 /* We are in slow start */ 3960 if (net->flight_size + net->net_ack >= 3961 net->cwnd) { 3962 #ifdef SCTP_HIGH_SPEED 3963 sctp_hs_cwnd_increase(stcb, net); 3964 #else 3965 if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) { 3966 net->cwnd += (net->mtu * sctp_L2_abc_variable); 3967 #ifdef SCTP_CWND_MONITOR 3968 sctp_log_cwnd(stcb, net, net->mtu, 3969 SCTP_CWND_LOG_FROM_SS); 3970 #endif 3971 3972 } else { 3973 net->cwnd += net->net_ack; 3974 #ifdef SCTP_CWND_MONITOR 3975 sctp_log_cwnd(stcb, net, net->net_ack, 3976 SCTP_CWND_LOG_FROM_SS); 3977 #endif 3978 3979 } 3980 #endif 3981 } else { 3982 unsigned int dif; 3983 3984 dif = net->cwnd - (net->flight_size + 3985 net->net_ack); 3986 #ifdef SCTP_CWND_LOGGING 3987 sctp_log_cwnd(stcb, net, net->net_ack, 3988 SCTP_CWND_LOG_NOADV_SS); 3989 #endif 3990 } 3991 } else { 3992 /* We are in congestion avoidance */ 3993 if (net->flight_size + net->net_ack >= 3994 net->cwnd) { 3995 /* 3996 * add to pba only if we had a 3997 * cwnd's worth (or so) in flight OR 3998 * the burst limit was applied. 3999 */ 4000 net->partial_bytes_acked += 4001 net->net_ack; 4002 4003 /* 4004 * Do we need to increase (if pba is 4005 * > cwnd)? 4006 */ 4007 if (net->partial_bytes_acked >= 4008 net->cwnd) { 4009 if (net->cwnd < 4010 net->partial_bytes_acked) { 4011 net->partial_bytes_acked -= 4012 net->cwnd; 4013 } else { 4014 net->partial_bytes_acked = 4015 0; 4016 } 4017 net->cwnd += net->mtu; 4018 #ifdef SCTP_CWND_MONITOR 4019 sctp_log_cwnd(stcb, net, net->mtu, 4020 SCTP_CWND_LOG_FROM_CA); 4021 #endif 4022 } 4023 #ifdef SCTP_CWND_LOGGING 4024 else { 4025 sctp_log_cwnd(stcb, net, net->net_ack, 4026 SCTP_CWND_LOG_NOADV_CA); 4027 } 4028 #endif 4029 } else { 4030 unsigned int dif; 4031 4032 #ifdef SCTP_CWND_LOGGING 4033 sctp_log_cwnd(stcb, net, net->net_ack, 4034 SCTP_CWND_LOG_NOADV_CA); 4035 #endif 4036 dif = net->cwnd - (net->flight_size + 4037 net->net_ack); 4038 } 4039 } 4040 } else { 4041 #ifdef SCTP_CWND_LOGGING 4042 sctp_log_cwnd(stcb, net, net->mtu, 4043 SCTP_CWND_LOG_NO_CUMACK); 4044 #endif 4045 } 4046 skip_cwnd_update: 4047 /* 4048 * NOW, according to Karn's rule do we need to restore the 4049 * RTO timer back? Check our net_ack2. If not set then we 4050 * have a ambiguity.. i.e. all data ack'd was sent to more 4051 * than one place. 4052 */ 4053 if (net->net_ack2) { 4054 /* restore any doubled timers */ 4055 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 4056 if (net->RTO < stcb->asoc.minrto) { 4057 net->RTO = stcb->asoc.minrto; 4058 } 4059 if (net->RTO > stcb->asoc.maxrto) { 4060 net->RTO = stcb->asoc.maxrto; 4061 } 4062 } 4063 } 4064 } 4065 4066 4067 void 4068 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 4069 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 4070 { 4071 struct sctp_nets *net; 4072 struct sctp_association *asoc; 4073 struct sctp_tmit_chunk *tp1, *tp2; 4074 4075 SCTP_TCB_LOCK_ASSERT(stcb); 4076 asoc = &stcb->asoc; 4077 /* First setup for CC stuff */ 4078 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4079 net->prev_cwnd = net->cwnd; 4080 net->net_ack = 0; 4081 net->net_ack2 = 0; 4082 } 4083 asoc->this_sack_highest_gap = cumack; 4084 stcb->asoc.overall_error_count = 0; 4085 /* process the new consecutive TSN first */ 4086 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4087 while (tp1) { 4088 tp2 = TAILQ_NEXT(tp1, sctp_next); 4089 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 4090 MAX_TSN) || 4091 cumack == tp1->rec.data.TSN_seq) { 4092 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4093 /* 4094 * ECN Nonce: Add the nonce to the sender's 4095 * nonce sum 4096 */ 4097 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4098 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4099 /* 4100 * If it is less than ACKED, it is 4101 * now no-longer in flight. Higher 4102 * values may occur during marking 4103 */ 4104 if (tp1->rec.data.chunk_was_revoked == 1) { 4105 /* 4106 * If its been revoked, and 4107 * now ack'd we do NOT take 4108 * away fs etc. since when 4109 * it is retransmitted we 4110 * clear this flag. 4111 */ 4112 goto skip_fs_update; 4113 } 4114 if (tp1->whoTo->flight_size >= tp1->book_size) { 4115 tp1->whoTo->flight_size -= tp1->book_size; 4116 } else { 4117 tp1->whoTo->flight_size = 0; 4118 } 4119 if (asoc->total_flight >= tp1->book_size) { 4120 asoc->total_flight -= tp1->book_size; 4121 if (asoc->total_flight_count > 0) 4122 asoc->total_flight_count--; 4123 } else { 4124 asoc->total_flight = 0; 4125 asoc->total_flight_count = 0; 4126 } 4127 tp1->whoTo->net_ack += tp1->send_size; 4128 if (tp1->snd_count < 2) { 4129 /* 4130 * True non-retransmited 4131 * chunk 4132 */ 4133 tp1->whoTo->net_ack2 += 4134 tp1->send_size; 4135 4136 /* update RTO too? */ 4137 if ((tp1->do_rtt) && (tp1->whoTo->rto_pending)) { 4138 tp1->whoTo->RTO = 4139 sctp_calculate_rto(stcb, 4140 asoc, tp1->whoTo, 4141 &tp1->sent_rcv_time); 4142 tp1->whoTo->rto_pending = 0; 4143 tp1->do_rtt = 0; 4144 } 4145 } 4146 #ifdef SCTP_CWND_LOGGING 4147 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4148 #endif 4149 } 4150 skip_fs_update: 4151 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4152 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4153 } 4154 tp1->sent = SCTP_DATAGRAM_ACKED; 4155 } 4156 } else { 4157 break; 4158 } 4159 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4160 if (tp1->data) { 4161 sctp_free_bufspace(stcb, asoc, tp1, 1); 4162 sctp_m_freem(tp1->data); 4163 } 4164 #ifdef SCTP_SACK_LOGGING 4165 sctp_log_sack(asoc->last_acked_seq, 4166 cumack, 4167 tp1->rec.data.TSN_seq, 4168 0, 4169 0, 4170 SCTP_LOG_FREE_SENT); 4171 #endif 4172 tp1->data = NULL; 4173 asoc->sent_queue_cnt--; 4174 sctp_free_remote_addr(tp1->whoTo); 4175 sctp_free_a_chunk(stcb, tp1); 4176 tp1 = tp2; 4177 } 4178 if (stcb->sctp_socket) { 4179 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4180 #ifdef SCTP_WAKE_LOGGING 4181 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 4182 #endif 4183 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4184 #ifdef SCTP_WAKE_LOGGING 4185 } else { 4186 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 4187 #endif 4188 } 4189 4190 if (asoc->last_acked_seq != cumack) 4191 sctp_cwnd_update(stcb, asoc, 1, 0, 0); 4192 asoc->last_acked_seq = cumack; 4193 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4194 /* nothing left in-flight */ 4195 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4196 net->flight_size = 0; 4197 net->partial_bytes_acked = 0; 4198 } 4199 asoc->total_flight = 0; 4200 asoc->total_flight_count = 0; 4201 } 4202 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4203 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4204 asoc->advanced_peer_ack_point = cumack; 4205 } 4206 /* ECN Nonce updates */ 4207 if (asoc->ecn_nonce_allowed) { 4208 if (asoc->nonce_sum_check) { 4209 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4210 if (asoc->nonce_wait_for_ecne == 0) { 4211 struct sctp_tmit_chunk *lchk; 4212 4213 lchk = TAILQ_FIRST(&asoc->send_queue); 4214 asoc->nonce_wait_for_ecne = 1; 4215 if (lchk) { 4216 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4217 } else { 4218 asoc->nonce_wait_tsn = asoc->sending_seq; 4219 } 4220 } else { 4221 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4222 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4223 /* 4224 * Misbehaving peer. We need 4225 * to react to this guy 4226 */ 4227 asoc->ecn_allowed = 0; 4228 asoc->ecn_nonce_allowed = 0; 4229 } 4230 } 4231 } 4232 } else { 4233 /* See if Resynchronization Possible */ 4234 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4235 asoc->nonce_sum_check = 1; 4236 /* 4237 * now we must calculate what the base is. 4238 * We do this based on two things, we know 4239 * the total's for all the segments 4240 * gap-acked in the SACK (none), We also 4241 * know the SACK's nonce sum, its in 4242 * nonce_sum_flag. So we can build a truth 4243 * table to back-calculate the new value of 4244 * asoc->nonce_sum_expect_base: 4245 * 4246 * SACK-flag-Value Seg-Sums Base 0 0 0 4247 * 1 0 1 0 1 1 1 4248 * 1 0 4249 */ 4250 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4251 } 4252 } 4253 } 4254 /* RWND update */ 4255 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4256 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4257 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4258 /* SWS sender side engages */ 4259 asoc->peers_rwnd = 0; 4260 } 4261 /* Now assure a timer where data is queued at */ 4262 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4263 if (net->flight_size) { 4264 int to_ticks; 4265 4266 if (net->RTO == 0) { 4267 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4268 } else { 4269 to_ticks = MSEC_TO_TICKS(net->RTO); 4270 } 4271 callout_reset(&net->rxt_timer.timer, to_ticks, 4272 sctp_timeout_handler, &net->rxt_timer); 4273 } else { 4274 if (callout_pending(&net->rxt_timer.timer)) { 4275 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4276 stcb, net); 4277 } 4278 if (sctp_early_fr) { 4279 if (callout_pending(&net->fr_timer.timer)) { 4280 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4281 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 4282 } 4283 } 4284 } 4285 } 4286 4287 /**********************************/ 4288 /* Now what about shutdown issues */ 4289 /**********************************/ 4290 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4291 /* nothing left on sendqueue.. consider done */ 4292 /* clean up */ 4293 if ((asoc->stream_queue_cnt == 1) && 4294 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4295 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4296 (asoc->locked_on_sending) 4297 ) { 4298 struct sctp_stream_queue_pending *sp; 4299 4300 /* 4301 * I may be in a state where we got all across.. but 4302 * cannot write more due to a shutdown... we abort 4303 * since the user did not indicate EOR in this case. 4304 * The sp will be cleaned during free of the asoc. 4305 */ 4306 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4307 sctp_streamhead); 4308 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4309 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4310 asoc->locked_on_sending = NULL; 4311 asoc->stream_queue_cnt--; 4312 } 4313 } 4314 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4315 (asoc->stream_queue_cnt == 0)) { 4316 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4317 /* Need to abort here */ 4318 struct mbuf *oper; 4319 4320 abort_out_now: 4321 *abort_now = 1; 4322 /* XXX */ 4323 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4324 0, M_DONTWAIT, 1, MT_DATA); 4325 if (oper) { 4326 struct sctp_paramhdr *ph; 4327 uint32_t *ippp; 4328 4329 oper->m_len = sizeof(struct sctp_paramhdr) + 4330 sizeof(uint32_t); 4331 ph = mtod(oper, struct sctp_paramhdr *); 4332 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4333 ph->param_length = htons(oper->m_len); 4334 ippp = (uint32_t *) (ph + 1); 4335 *ippp = htonl(0x30000003); 4336 } 4337 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4338 } else { 4339 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4340 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4341 sctp_stop_timers_for_shutdown(stcb); 4342 sctp_send_shutdown(stcb, 4343 stcb->asoc.primary_destination); 4344 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4345 stcb->sctp_ep, stcb, asoc->primary_destination); 4346 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4347 stcb->sctp_ep, stcb, asoc->primary_destination); 4348 } 4349 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4350 (asoc->stream_queue_cnt == 0)) { 4351 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4352 goto abort_out_now; 4353 } 4354 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4355 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4356 sctp_send_shutdown_ack(stcb, 4357 stcb->asoc.primary_destination); 4358 4359 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4360 stcb->sctp_ep, stcb, asoc->primary_destination); 4361 } 4362 } 4363 #ifdef SCTP_SACK_RWND_LOGGING 4364 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4365 rwnd, 4366 stcb->asoc.peers_rwnd, 4367 stcb->asoc.total_flight, 4368 stcb->asoc.total_output_queue_size); 4369 4370 #endif 4371 } 4372 4373 4374 4375 void 4376 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4377 struct sctp_nets *net_from, int *abort_now) 4378 { 4379 struct sctp_association *asoc; 4380 struct sctp_sack *sack; 4381 struct sctp_tmit_chunk *tp1, *tp2; 4382 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4383 this_sack_lowest_newack; 4384 uint16_t num_seg, num_dup; 4385 uint16_t wake_him = 0; 4386 unsigned int sack_length; 4387 uint32_t send_s; 4388 long j; 4389 int accum_moved = 0; 4390 int will_exit_fast_recovery = 0; 4391 uint32_t a_rwnd; 4392 struct sctp_nets *net = NULL; 4393 int nonce_sum_flag, ecn_seg_sums = 0; 4394 uint8_t reneged_all = 0; 4395 uint8_t cmt_dac_flag; 4396 4397 /* 4398 * we take any chance we can to service our queues since we cannot 4399 * get awoken when the socket is read from :< 4400 */ 4401 /* 4402 * Now perform the actual SACK handling: 1) Verify that it is not an 4403 * old sack, if so discard. 2) If there is nothing left in the send 4404 * queue (cum-ack is equal to last acked) then you have a duplicate 4405 * too, update any rwnd change and verify no timers are running. 4406 * then return. 3) Process any new consequtive data i.e. cum-ack 4407 * moved process these first and note that it moved. 4) Process any 4408 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4409 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4410 * sync up flightsizes and things, stop all timers and also check 4411 * for shutdown_pending state. If so then go ahead and send off the 4412 * shutdown. If in shutdown recv, send off the shutdown-ack and 4413 * start that timer, Ret. 9) Strike any non-acked things and do FR 4414 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4415 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4416 * if in shutdown_recv state. 4417 */ 4418 SCTP_TCB_LOCK_ASSERT(stcb); 4419 sack = &ch->sack; 4420 /* CMT DAC algo */ 4421 this_sack_lowest_newack = 0; 4422 j = 0; 4423 sack_length = ntohs(ch->ch.chunk_length); 4424 if (sack_length < sizeof(struct sctp_sack_chunk)) { 4425 #ifdef SCTP_DEBUG 4426 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 4427 printf("Bad size on sack chunk .. to small\n"); 4428 } 4429 #endif 4430 return; 4431 } 4432 /* ECN Nonce */ 4433 SCTP_STAT_INCR(sctps_slowpath_sack); 4434 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4435 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4436 num_seg = ntohs(sack->num_gap_ack_blks); 4437 a_rwnd = (uint32_t) ntohl(sack->a_rwnd); 4438 4439 /* CMT DAC algo */ 4440 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4441 num_dup = ntohs(sack->num_dup_tsns); 4442 4443 4444 stcb->asoc.overall_error_count = 0; 4445 asoc = &stcb->asoc; 4446 #ifdef SCTP_SACK_LOGGING 4447 sctp_log_sack(asoc->last_acked_seq, 4448 cum_ack, 4449 0, 4450 num_seg, 4451 num_dup, 4452 SCTP_LOG_NEW_SACK); 4453 #endif 4454 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 4455 if (num_dup) { 4456 int off_to_dup, iii; 4457 uint32_t *dupdata; 4458 4459 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4460 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4461 dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup); 4462 for (iii = 0; iii < num_dup; iii++) { 4463 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4464 dupdata++; 4465 4466 } 4467 } else { 4468 printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4469 off_to_dup, num_dup, sack_length, num_seg); 4470 } 4471 } 4472 #endif 4473 /* reality check */ 4474 if (TAILQ_EMPTY(&asoc->send_queue)) { 4475 send_s = asoc->sending_seq; 4476 } else { 4477 tp1 = TAILQ_FIRST(&asoc->send_queue); 4478 send_s = tp1->rec.data.TSN_seq; 4479 } 4480 4481 if (sctp_strict_sacks) { 4482 if (cum_ack == send_s || 4483 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4484 struct mbuf *oper; 4485 4486 /* 4487 * no way, we have not even sent this TSN out yet. 4488 * Peer is hopelessly messed up with us. 4489 */ 4490 hopeless_peer: 4491 *abort_now = 1; 4492 /* XXX */ 4493 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4494 0, M_DONTWAIT, 1, MT_DATA); 4495 if (oper) { 4496 struct sctp_paramhdr *ph; 4497 uint32_t *ippp; 4498 4499 oper->m_len = sizeof(struct sctp_paramhdr) + 4500 sizeof(uint32_t); 4501 ph = mtod(oper, struct sctp_paramhdr *); 4502 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4503 ph->param_length = htons(oper->m_len); 4504 ippp = (uint32_t *) (ph + 1); 4505 *ippp = htonl(0x30000002); 4506 } 4507 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4508 return; 4509 } 4510 } 4511 /**********************/ 4512 /* 1) check the range */ 4513 /**********************/ 4514 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4515 /* acking something behind */ 4516 return; 4517 } 4518 /* update the Rwnd of the peer */ 4519 if (TAILQ_EMPTY(&asoc->sent_queue) && 4520 TAILQ_EMPTY(&asoc->send_queue) && 4521 (asoc->stream_queue_cnt == 0) 4522 ) { 4523 /* nothing left on send/sent and strmq */ 4524 #ifdef SCTP_LOG_RWND 4525 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4526 asoc->peers_rwnd, 0, 0, a_rwnd); 4527 #endif 4528 asoc->peers_rwnd = a_rwnd; 4529 if (asoc->sent_queue_retran_cnt) { 4530 asoc->sent_queue_retran_cnt = 0; 4531 } 4532 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4533 /* SWS sender side engages */ 4534 asoc->peers_rwnd = 0; 4535 } 4536 /* stop any timers */ 4537 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4538 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4539 stcb, net); 4540 if (sctp_early_fr) { 4541 if (callout_pending(&net->fr_timer.timer)) { 4542 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4543 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 4544 } 4545 } 4546 net->partial_bytes_acked = 0; 4547 net->flight_size = 0; 4548 } 4549 asoc->total_flight = 0; 4550 asoc->total_flight_count = 0; 4551 return; 4552 } 4553 /* 4554 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4555 * things. The total byte count acked is tracked in netAckSz AND 4556 * netAck2 is used to track the total bytes acked that are un- 4557 * amibguious and were never retransmitted. We track these on a per 4558 * destination address basis. 4559 */ 4560 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4561 net->prev_cwnd = net->cwnd; 4562 net->net_ack = 0; 4563 net->net_ack2 = 0; 4564 4565 /* 4566 * CMT: Reset CUC algo variable before SACK processing 4567 */ 4568 net->new_pseudo_cumack = 0; 4569 net->will_exit_fast_recovery = 0; 4570 } 4571 /* process the new consecutive TSN first */ 4572 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4573 while (tp1) { 4574 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4575 MAX_TSN) || 4576 last_tsn == tp1->rec.data.TSN_seq) { 4577 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4578 /* 4579 * ECN Nonce: Add the nonce to the sender's 4580 * nonce sum 4581 */ 4582 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4583 accum_moved = 1; 4584 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4585 /* 4586 * If it is less than ACKED, it is 4587 * now no-longer in flight. Higher 4588 * values may occur during marking 4589 */ 4590 if ((tp1->whoTo->dest_state & 4591 SCTP_ADDR_UNCONFIRMED) && 4592 (tp1->snd_count < 2)) { 4593 /* 4594 * If there was no retran 4595 * and the address is 4596 * un-confirmed and we sent 4597 * there and are now 4598 * sacked.. its confirmed, 4599 * mark it so. 4600 */ 4601 tp1->whoTo->dest_state &= 4602 ~SCTP_ADDR_UNCONFIRMED; 4603 } 4604 if (tp1->rec.data.chunk_was_revoked == 1) { 4605 /* 4606 * If its been revoked, and 4607 * now ack'd we do NOT take 4608 * away fs etc. since when 4609 * it is retransmitted we 4610 * clear this flag. 4611 */ 4612 goto skip_fs_update; 4613 } 4614 if (tp1->whoTo->flight_size >= tp1->book_size) { 4615 tp1->whoTo->flight_size -= tp1->book_size; 4616 } else { 4617 tp1->whoTo->flight_size = 0; 4618 } 4619 if (asoc->total_flight >= tp1->book_size) { 4620 asoc->total_flight -= tp1->book_size; 4621 if (asoc->total_flight_count > 0) 4622 asoc->total_flight_count--; 4623 } else { 4624 asoc->total_flight = 0; 4625 asoc->total_flight_count = 0; 4626 } 4627 tp1->whoTo->net_ack += tp1->send_size; 4628 4629 /* CMT SFR and DAC algos */ 4630 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4631 tp1->whoTo->saw_newack = 1; 4632 4633 if (tp1->snd_count < 2) { 4634 /* 4635 * True non-retransmited 4636 * chunk 4637 */ 4638 tp1->whoTo->net_ack2 += 4639 tp1->send_size; 4640 4641 /* update RTO too? */ 4642 if (tp1->do_rtt) { 4643 tp1->whoTo->RTO = 4644 sctp_calculate_rto(stcb, 4645 asoc, tp1->whoTo, 4646 &tp1->sent_rcv_time); 4647 tp1->whoTo->rto_pending = 0; 4648 tp1->do_rtt = 0; 4649 } 4650 } 4651 skip_fs_update: 4652 /* 4653 * CMT: CUCv2 algorithm. From the 4654 * cumack'd TSNs, for each TSN being 4655 * acked for the first time, set the 4656 * following variables for the 4657 * corresp destination. 4658 * new_pseudo_cumack will trigger a 4659 * cwnd update. 4660 * find_(rtx_)pseudo_cumack will 4661 * trigger search for the next 4662 * expected (rtx-)pseudo-cumack. 4663 */ 4664 tp1->whoTo->new_pseudo_cumack = 1; 4665 tp1->whoTo->find_pseudo_cumack = 1; 4666 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4667 4668 4669 #ifdef SCTP_SACK_LOGGING 4670 sctp_log_sack(asoc->last_acked_seq, 4671 cum_ack, 4672 tp1->rec.data.TSN_seq, 4673 0, 4674 0, 4675 SCTP_LOG_TSN_ACKED); 4676 #endif 4677 #ifdef SCTP_CWND_LOGGING 4678 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4679 #endif 4680 } 4681 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4682 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4683 #ifdef SCTP_AUDITING_ENABLED 4684 sctp_audit_log(0xB3, 4685 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4686 #endif 4687 } 4688 tp1->sent = SCTP_DATAGRAM_ACKED; 4689 } 4690 } else { 4691 break; 4692 } 4693 tp1 = TAILQ_NEXT(tp1, sctp_next); 4694 } 4695 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4696 /* always set this up to cum-ack */ 4697 asoc->this_sack_highest_gap = last_tsn; 4698 4699 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4700 4701 /* skip corrupt segments */ 4702 goto skip_segments; 4703 } 4704 if (num_seg > 0) { 4705 4706 /* 4707 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4708 * to be greater than the cumack. Also reset saw_newack to 0 4709 * for all dests. 4710 */ 4711 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4712 net->saw_newack = 0; 4713 net->this_sack_highest_newack = last_tsn; 4714 } 4715 4716 /* 4717 * thisSackHighestGap will increase while handling NEW 4718 * segments this_sack_highest_newack will increase while 4719 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4720 * used for CMT DAC algo. saw_newack will also change. 4721 */ 4722 sctp_handle_segments(stcb, asoc, ch, last_tsn, 4723 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4724 num_seg, &ecn_seg_sums); 4725 4726 if (sctp_strict_sacks) { 4727 /* 4728 * validate the biggest_tsn_acked in the gap acks if 4729 * strict adherence is wanted. 4730 */ 4731 if ((biggest_tsn_acked == send_s) || 4732 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4733 /* 4734 * peer is either confused or we are under 4735 * attack. We must abort. 4736 */ 4737 goto hopeless_peer; 4738 } 4739 } 4740 } 4741 skip_segments: 4742 /*******************************************/ 4743 /* cancel ALL T3-send timer if accum moved */ 4744 /*******************************************/ 4745 if (sctp_cmt_on_off) { 4746 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4747 if (net->new_pseudo_cumack) 4748 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4749 stcb, net); 4750 4751 } 4752 } else { 4753 if (accum_moved) { 4754 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4755 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4756 stcb, net); 4757 } 4758 } 4759 } 4760 /********************************************/ 4761 /* drop the acked chunks from the sendqueue */ 4762 /********************************************/ 4763 asoc->last_acked_seq = cum_ack; 4764 4765 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4766 if (tp1 == NULL) 4767 goto done_with_it; 4768 do { 4769 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4770 MAX_TSN)) { 4771 break; 4772 } 4773 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4774 /* no more sent on list */ 4775 break; 4776 } 4777 tp2 = TAILQ_NEXT(tp1, sctp_next); 4778 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4779 /* 4780 * Friendlier printf in lieu of panic now that I think its 4781 * fixed 4782 */ 4783 4784 if (tp1->pr_sctp_on) { 4785 if (asoc->pr_sctp_cnt != 0) 4786 asoc->pr_sctp_cnt--; 4787 } 4788 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4789 (asoc->total_flight > 0)) { 4790 printf("Warning flight size incorrect should be 0 is %d\n", 4791 asoc->total_flight); 4792 asoc->total_flight = 0; 4793 } 4794 if (tp1->data) { 4795 sctp_free_bufspace(stcb, asoc, tp1, 1); 4796 sctp_m_freem(tp1->data); 4797 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4798 asoc->sent_queue_cnt_removeable--; 4799 } 4800 } 4801 #ifdef SCTP_SACK_LOGGING 4802 sctp_log_sack(asoc->last_acked_seq, 4803 cum_ack, 4804 tp1->rec.data.TSN_seq, 4805 0, 4806 0, 4807 SCTP_LOG_FREE_SENT); 4808 #endif 4809 tp1->data = NULL; 4810 asoc->sent_queue_cnt--; 4811 sctp_free_remote_addr(tp1->whoTo); 4812 4813 sctp_free_a_chunk(stcb, tp1); 4814 wake_him++; 4815 tp1 = tp2; 4816 } while (tp1 != NULL); 4817 4818 done_with_it: 4819 if ((wake_him) && (stcb->sctp_socket)) { 4820 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4821 #ifdef SCTP_WAKE_LOGGING 4822 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 4823 #endif 4824 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4825 #ifdef SCTP_WAKE_LOGGING 4826 } else { 4827 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 4828 #endif 4829 } 4830 4831 if ((sctp_cmt_on_off == 0) && asoc->fast_retran_loss_recovery && accum_moved) { 4832 if (compare_with_wrap(asoc->last_acked_seq, 4833 asoc->fast_recovery_tsn, MAX_TSN) || 4834 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 4835 /* Setup so we will exit RFC2582 fast recovery */ 4836 will_exit_fast_recovery = 1; 4837 } 4838 } 4839 /* 4840 * Check for revoked fragments: 4841 * 4842 * if Previous sack - Had no frags then we can't have any revoked if 4843 * Previous sack - Had frag's then - If we now have frags aka 4844 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4845 * some of them. else - The peer revoked all ACKED fragments, since 4846 * we had some before and now we have NONE. 4847 */ 4848 4849 if (sctp_cmt_on_off) { 4850 /* 4851 * Don't check for revoked if CMT is ON. CMT causes 4852 * reordering of data and acks (received on different 4853 * interfaces) can be persistently reordered. Acking 4854 * followed by apparent revoking and re-acking causes 4855 * unexpected weird behavior. So, at this time, CMT does not 4856 * respect renegs. Renegs will have to be recovered through 4857 * a timeout. Not a big deal for such a rare event. 4858 */ 4859 } else if (num_seg) 4860 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked); 4861 else if (asoc->saw_sack_with_frags) { 4862 int cnt_revoked = 0; 4863 4864 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4865 if (tp1 != NULL) { 4866 /* Peer revoked all dg's marked or acked */ 4867 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4868 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 4869 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 4870 tp1->sent = SCTP_DATAGRAM_SENT; 4871 cnt_revoked++; 4872 } 4873 } 4874 if (cnt_revoked) { 4875 reneged_all = 1; 4876 } 4877 } 4878 asoc->saw_sack_with_frags = 0; 4879 } 4880 if (num_seg) 4881 asoc->saw_sack_with_frags = 1; 4882 else 4883 asoc->saw_sack_with_frags = 0; 4884 4885 4886 sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4887 4888 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4889 /* nothing left in-flight */ 4890 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4891 /* stop all timers */ 4892 if (sctp_early_fr) { 4893 if (callout_pending(&net->fr_timer.timer)) { 4894 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4895 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 4896 } 4897 } 4898 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4899 stcb, net); 4900 net->flight_size = 0; 4901 net->partial_bytes_acked = 0; 4902 } 4903 asoc->total_flight = 0; 4904 asoc->total_flight_count = 0; 4905 } 4906 /**********************************/ 4907 /* Now what about shutdown issues */ 4908 /**********************************/ 4909 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4910 /* nothing left on sendqueue.. consider done */ 4911 #ifdef SCTP_LOG_RWND 4912 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4913 asoc->peers_rwnd, 0, 0, a_rwnd); 4914 #endif 4915 asoc->peers_rwnd = a_rwnd; 4916 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4917 /* SWS sender side engages */ 4918 asoc->peers_rwnd = 0; 4919 } 4920 /* clean up */ 4921 if ((asoc->stream_queue_cnt == 1) && 4922 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4923 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4924 (asoc->locked_on_sending) 4925 ) { 4926 struct sctp_stream_queue_pending *sp; 4927 4928 /* 4929 * I may be in a state where we got all across.. but 4930 * cannot write more due to a shutdown... we abort 4931 * since the user did not indicate EOR in this case. 4932 */ 4933 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4934 sctp_streamhead); 4935 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4936 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4937 asoc->locked_on_sending = NULL; 4938 asoc->stream_queue_cnt--; 4939 } 4940 } 4941 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4942 (asoc->stream_queue_cnt == 0)) { 4943 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4944 /* Need to abort here */ 4945 struct mbuf *oper; 4946 4947 abort_out_now: 4948 *abort_now = 1; 4949 /* XXX */ 4950 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4951 0, M_DONTWAIT, 1, MT_DATA); 4952 if (oper) { 4953 struct sctp_paramhdr *ph; 4954 uint32_t *ippp; 4955 4956 oper->m_len = sizeof(struct sctp_paramhdr) + 4957 sizeof(uint32_t); 4958 ph = mtod(oper, struct sctp_paramhdr *); 4959 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4960 ph->param_length = htons(oper->m_len); 4961 ippp = (uint32_t *) (ph + 1); 4962 *ippp = htonl(0x30000003); 4963 } 4964 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4965 return; 4966 } else { 4967 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4968 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4969 sctp_stop_timers_for_shutdown(stcb); 4970 sctp_send_shutdown(stcb, 4971 stcb->asoc.primary_destination); 4972 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4973 stcb->sctp_ep, stcb, asoc->primary_destination); 4974 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4975 stcb->sctp_ep, stcb, asoc->primary_destination); 4976 } 4977 return; 4978 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4979 (asoc->stream_queue_cnt == 0)) { 4980 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4981 goto abort_out_now; 4982 } 4983 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4984 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4985 sctp_send_shutdown_ack(stcb, 4986 stcb->asoc.primary_destination); 4987 4988 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4989 stcb->sctp_ep, stcb, asoc->primary_destination); 4990 return; 4991 } 4992 } 4993 /* 4994 * Now here we are going to recycle net_ack for a different use... 4995 * HEADS UP. 4996 */ 4997 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4998 net->net_ack = 0; 4999 } 5000 5001 /* 5002 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5003 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5004 * automatically ensure that. 5005 */ 5006 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) { 5007 this_sack_lowest_newack = cum_ack; 5008 } 5009 if (num_seg > 0) { 5010 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5011 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5012 } 5013 /*********************************************/ 5014 /* Here we perform PR-SCTP procedures */ 5015 /* (section 4.2) */ 5016 /*********************************************/ 5017 /* C1. update advancedPeerAckPoint */ 5018 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 5019 asoc->advanced_peer_ack_point = cum_ack; 5020 } 5021 /* C2. try to further move advancedPeerAckPoint ahead */ 5022 5023 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 5024 struct sctp_tmit_chunk *lchk; 5025 5026 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5027 /* C3. See if we need to send a Fwd-TSN */ 5028 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 5029 MAX_TSN)) { 5030 /* 5031 * ISSUE with ECN, see FWD-TSN processing for notes 5032 * on issues that will occur when the ECN NONCE 5033 * stuff is put into SCTP for cross checking. 5034 */ 5035 send_forward_tsn(stcb, asoc); 5036 5037 /* 5038 * ECN Nonce: Disable Nonce Sum check when FWD TSN 5039 * is sent and store resync tsn 5040 */ 5041 asoc->nonce_sum_check = 0; 5042 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 5043 if (lchk) { 5044 /* Assure a timer is up */ 5045 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5046 stcb->sctp_ep, stcb, lchk->whoTo); 5047 } 5048 } 5049 } 5050 /* 5051 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) && 5052 * (net->fast_retran_loss_recovery == 0))) 5053 */ 5054 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5055 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) { 5056 /* out of a RFC2582 Fast recovery window? */ 5057 if (net->net_ack > 0) { 5058 /* 5059 * per section 7.2.3, are there any 5060 * destinations that had a fast retransmit 5061 * to them. If so what we need to do is 5062 * adjust ssthresh and cwnd. 5063 */ 5064 struct sctp_tmit_chunk *lchk; 5065 5066 #ifdef SCTP_HIGH_SPEED 5067 sctp_hs_cwnd_decrease(stcb, net); 5068 #else 5069 #ifdef SCTP_CWND_MONITOR 5070 int old_cwnd = net->cwnd; 5071 5072 #endif 5073 net->ssthresh = net->cwnd / 2; 5074 if (net->ssthresh < (net->mtu * 2)) { 5075 net->ssthresh = 2 * net->mtu; 5076 } 5077 net->cwnd = net->ssthresh; 5078 #ifdef SCTP_CWND_MONITOR 5079 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 5080 SCTP_CWND_LOG_FROM_FR); 5081 #endif 5082 #endif 5083 5084 lchk = TAILQ_FIRST(&asoc->send_queue); 5085 5086 net->partial_bytes_acked = 0; 5087 /* Turn on fast recovery window */ 5088 asoc->fast_retran_loss_recovery = 1; 5089 if (lchk == NULL) { 5090 /* Mark end of the window */ 5091 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 5092 } else { 5093 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5094 } 5095 5096 /* 5097 * CMT fast recovery -- per destination 5098 * recovery variable. 5099 */ 5100 net->fast_retran_loss_recovery = 1; 5101 5102 if (lchk == NULL) { 5103 /* Mark end of the window */ 5104 net->fast_recovery_tsn = asoc->sending_seq - 1; 5105 } else { 5106 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5107 } 5108 5109 5110 5111 /* 5112 * Disable Nonce Sum Checking and store the 5113 * resync tsn 5114 */ 5115 asoc->nonce_sum_check = 0; 5116 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 5117 5118 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 5119 stcb->sctp_ep, stcb, net); 5120 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5121 stcb->sctp_ep, stcb, net); 5122 } 5123 } else if (net->net_ack > 0) { 5124 /* 5125 * Mark a peg that we WOULD have done a cwnd 5126 * reduction but RFC2582 prevented this action. 5127 */ 5128 SCTP_STAT_INCR(sctps_fastretransinrtt); 5129 } 5130 } 5131 5132 5133 /****************************************************************** 5134 * Here we do the stuff with ECN Nonce checking. 5135 * We basically check to see if the nonce sum flag was incorrect 5136 * or if resynchronization needs to be done. Also if we catch a 5137 * misbehaving receiver we give him the kick. 5138 ******************************************************************/ 5139 5140 if (asoc->ecn_nonce_allowed) { 5141 if (asoc->nonce_sum_check) { 5142 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 5143 if (asoc->nonce_wait_for_ecne == 0) { 5144 struct sctp_tmit_chunk *lchk; 5145 5146 lchk = TAILQ_FIRST(&asoc->send_queue); 5147 asoc->nonce_wait_for_ecne = 1; 5148 if (lchk) { 5149 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 5150 } else { 5151 asoc->nonce_wait_tsn = asoc->sending_seq; 5152 } 5153 } else { 5154 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 5155 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 5156 /* 5157 * Misbehaving peer. We need 5158 * to react to this guy 5159 */ 5160 asoc->ecn_allowed = 0; 5161 asoc->ecn_nonce_allowed = 0; 5162 } 5163 } 5164 } 5165 } else { 5166 /* See if Resynchronization Possible */ 5167 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 5168 asoc->nonce_sum_check = 1; 5169 /* 5170 * now we must calculate what the base is. 5171 * We do this based on two things, we know 5172 * the total's for all the segments 5173 * gap-acked in the SACK, its stored in 5174 * ecn_seg_sums. We also know the SACK's 5175 * nonce sum, its in nonce_sum_flag. So we 5176 * can build a truth table to back-calculate 5177 * the new value of 5178 * asoc->nonce_sum_expect_base: 5179 * 5180 * SACK-flag-Value Seg-Sums Base 0 0 0 5181 * 1 0 1 0 1 1 1 5182 * 1 0 5183 */ 5184 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 5185 } 5186 } 5187 } 5188 /* Now are we exiting loss recovery ? */ 5189 if (will_exit_fast_recovery) { 5190 /* Ok, we must exit fast recovery */ 5191 asoc->fast_retran_loss_recovery = 0; 5192 } 5193 if ((asoc->sat_t3_loss_recovery) && 5194 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 5195 MAX_TSN) || 5196 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 5197 /* end satellite t3 loss recovery */ 5198 asoc->sat_t3_loss_recovery = 0; 5199 } 5200 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5201 if (net->will_exit_fast_recovery) { 5202 /* Ok, we must exit fast recovery */ 5203 net->fast_retran_loss_recovery = 0; 5204 } 5205 } 5206 5207 /* Adjust and set the new rwnd value */ 5208 #ifdef SCTP_LOG_RWND 5209 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5210 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 5211 #endif 5212 5213 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5214 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 5215 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5216 /* SWS sender side engages */ 5217 asoc->peers_rwnd = 0; 5218 } 5219 /* 5220 * Now we must setup so we have a timer up for anyone with 5221 * outstanding data. 5222 */ 5223 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5224 if (net->flight_size) { 5225 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5226 stcb->sctp_ep, stcb, net); 5227 } 5228 } 5229 #ifdef SCTP_SACK_RWND_LOGGING 5230 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5231 a_rwnd, 5232 stcb->asoc.peers_rwnd, 5233 stcb->asoc.total_flight, 5234 stcb->asoc.total_output_queue_size); 5235 5236 #endif 5237 5238 } 5239 5240 void 5241 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5242 struct sctp_nets *netp, int *abort_flag) 5243 { 5244 /* Copy cum-ack */ 5245 uint32_t cum_ack, a_rwnd; 5246 5247 cum_ack = ntohl(cp->cumulative_tsn_ack); 5248 /* Arrange so a_rwnd does NOT change */ 5249 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5250 5251 /* Now call the express sack handling */ 5252 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5253 } 5254 5255 static void 5256 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5257 struct sctp_stream_in *strmin) 5258 { 5259 struct sctp_queued_to_read *ctl, *nctl; 5260 struct sctp_association *asoc; 5261 int tt; 5262 5263 asoc = &stcb->asoc; 5264 tt = strmin->last_sequence_delivered; 5265 /* 5266 * First deliver anything prior to and including the stream no that 5267 * came in 5268 */ 5269 ctl = TAILQ_FIRST(&strmin->inqueue); 5270 while (ctl) { 5271 nctl = TAILQ_NEXT(ctl, next); 5272 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5273 (tt == ctl->sinfo_ssn)) { 5274 /* this is deliverable now */ 5275 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5276 /* subtract pending on streams */ 5277 asoc->size_on_all_streams -= ctl->length; 5278 sctp_ucount_decr(asoc->cnt_on_all_streams); 5279 /* deliver it to at least the delivery-q */ 5280 if (stcb->sctp_socket) { 5281 sctp_add_to_readq(stcb->sctp_ep, stcb, 5282 ctl, 5283 &stcb->sctp_socket->so_rcv, 1); 5284 } 5285 } else { 5286 /* no more delivery now. */ 5287 break; 5288 } 5289 ctl = nctl; 5290 } 5291 /* 5292 * now we must deliver things in queue the normal way if any are 5293 * now ready. 5294 */ 5295 tt = strmin->last_sequence_delivered + 1; 5296 ctl = TAILQ_FIRST(&strmin->inqueue); 5297 while (ctl) { 5298 nctl = TAILQ_NEXT(ctl, next); 5299 if (tt == ctl->sinfo_ssn) { 5300 /* this is deliverable now */ 5301 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5302 /* subtract pending on streams */ 5303 asoc->size_on_all_streams -= ctl->length; 5304 sctp_ucount_decr(asoc->cnt_on_all_streams); 5305 /* deliver it to at least the delivery-q */ 5306 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5307 if (stcb->sctp_socket) { 5308 sctp_add_to_readq(stcb->sctp_ep, stcb, 5309 ctl, 5310 &stcb->sctp_socket->so_rcv, 1); 5311 } 5312 tt = strmin->last_sequence_delivered + 1; 5313 } else { 5314 break; 5315 } 5316 ctl = nctl; 5317 } 5318 } 5319 5320 void 5321 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5322 struct sctp_forward_tsn_chunk *fwd, int *abort_flag) 5323 { 5324 /* 5325 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5326 * forward TSN, when the SACK comes back that acknowledges the 5327 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5328 * get quite tricky since we may have sent more data interveneing 5329 * and must carefully account for what the SACK says on the nonce 5330 * and any gaps that are reported. This work will NOT be done here, 5331 * but I note it here since it is really related to PR-SCTP and 5332 * FWD-TSN's 5333 */ 5334 5335 /* The pr-sctp fwd tsn */ 5336 /* 5337 * here we will perform all the data receiver side steps for 5338 * processing FwdTSN, as required in by pr-sctp draft: 5339 * 5340 * Assume we get FwdTSN(x): 5341 * 5342 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5343 * others we have 3) examine and update re-ordering queue on 5344 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5345 * report where we are. 5346 */ 5347 struct sctp_strseq *stseq; 5348 struct sctp_association *asoc; 5349 uint32_t new_cum_tsn, gap, back_out_htsn; 5350 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5351 struct sctp_stream_in *strm; 5352 struct sctp_tmit_chunk *chk, *at; 5353 5354 cumack_set_flag = 0; 5355 asoc = &stcb->asoc; 5356 cnt_gone = 0; 5357 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5358 #ifdef SCTP_DEBUG 5359 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 5360 printf("Bad size too small/big fwd-tsn\n"); 5361 } 5362 #endif 5363 return; 5364 } 5365 m_size = (stcb->asoc.mapping_array_size << 3); 5366 /*************************************************************/ 5367 /* 1. Here we update local cumTSN and shift the bitmap array */ 5368 /*************************************************************/ 5369 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5370 5371 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5372 asoc->cumulative_tsn == new_cum_tsn) { 5373 /* Already got there ... */ 5374 return; 5375 } 5376 back_out_htsn = asoc->highest_tsn_inside_map; 5377 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5378 MAX_TSN)) { 5379 asoc->highest_tsn_inside_map = new_cum_tsn; 5380 #ifdef SCTP_MAP_LOGGING 5381 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5382 #endif 5383 } 5384 /* 5385 * now we know the new TSN is more advanced, let's find the actual 5386 * gap 5387 */ 5388 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5389 MAX_TSN)) || 5390 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5391 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5392 } else { 5393 /* try to prevent underflow here */ 5394 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5395 } 5396 5397 if (gap > m_size || gap < 0) { 5398 asoc->highest_tsn_inside_map = back_out_htsn; 5399 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5400 /* 5401 * out of range (of single byte chunks in the rwnd I 5402 * give out) too questionable. better to drop it 5403 * silently 5404 */ 5405 return; 5406 } 5407 if (asoc->highest_tsn_inside_map > 5408 asoc->mapping_array_base_tsn) { 5409 gap = asoc->highest_tsn_inside_map - 5410 asoc->mapping_array_base_tsn; 5411 } else { 5412 gap = asoc->highest_tsn_inside_map + 5413 (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5414 } 5415 cumack_set_flag = 1; 5416 } 5417 for (i = 0; i <= gap; i++) { 5418 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5419 } 5420 /* 5421 * Now after marking all, slide thing forward but no sack please. 5422 */ 5423 sctp_sack_check(stcb, 0, 0, abort_flag); 5424 if (*abort_flag) 5425 return; 5426 5427 if (cumack_set_flag) { 5428 /* 5429 * fwd-tsn went outside my gap array - not a common 5430 * occurance. Do the same thing we do when a cookie-echo 5431 * arrives. 5432 */ 5433 asoc->highest_tsn_inside_map = new_cum_tsn - 1; 5434 asoc->mapping_array_base_tsn = new_cum_tsn; 5435 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 5436 #ifdef SCTP_MAP_LOGGING 5437 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5438 #endif 5439 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5440 } 5441 /*************************************************************/ 5442 /* 2. Clear up re-assembly queue */ 5443 /*************************************************************/ 5444 5445 /* 5446 * First service it if pd-api is up, just in case we can progress it 5447 * forward 5448 */ 5449 if (asoc->fragmented_delivery_inprogress) { 5450 sctp_service_reassembly(stcb, asoc); 5451 } 5452 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5453 /* For each one on here see if we need to toss it */ 5454 /* 5455 * For now large messages held on the reasmqueue that are 5456 * complete will be tossed too. We could in theory do more 5457 * work to spin through and stop after dumping one msg aka 5458 * seeing the start of a new msg at the head, and call the 5459 * delivery function... to see if it can be delivered... But 5460 * for now we just dump everything on the queue. 5461 */ 5462 chk = TAILQ_FIRST(&asoc->reasmqueue); 5463 while (chk) { 5464 at = TAILQ_NEXT(chk, sctp_next); 5465 if (compare_with_wrap(asoc->cumulative_tsn, 5466 chk->rec.data.TSN_seq, MAX_TSN) || 5467 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5468 /* It needs to be tossed */ 5469 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5470 if (compare_with_wrap(chk->rec.data.TSN_seq, 5471 asoc->tsn_last_delivered, MAX_TSN)) { 5472 asoc->tsn_last_delivered = 5473 chk->rec.data.TSN_seq; 5474 asoc->str_of_pdapi = 5475 chk->rec.data.stream_number; 5476 asoc->ssn_of_pdapi = 5477 chk->rec.data.stream_seq; 5478 asoc->fragment_flags = 5479 chk->rec.data.rcv_flags; 5480 } 5481 asoc->size_on_reasm_queue -= chk->send_size; 5482 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5483 cnt_gone++; 5484 5485 /* Clear up any stream problem */ 5486 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5487 SCTP_DATA_UNORDERED && 5488 (compare_with_wrap(chk->rec.data.stream_seq, 5489 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5490 MAX_SEQ))) { 5491 /* 5492 * We must dump forward this streams 5493 * sequence number if the chunk is 5494 * not unordered that is being 5495 * skipped. There is a chance that 5496 * if the peer does not include the 5497 * last fragment in its FWD-TSN we 5498 * WILL have a problem here since 5499 * you would have a partial chunk in 5500 * queue that may not be 5501 * deliverable. Also if a Partial 5502 * delivery API as started the user 5503 * may get a partial chunk. The next 5504 * read returning a new chunk... 5505 * really ugly but I see no way 5506 * around it! Maybe a notify?? 5507 */ 5508 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5509 chk->rec.data.stream_seq; 5510 } 5511 if (chk->data) { 5512 sctp_m_freem(chk->data); 5513 chk->data = NULL; 5514 } 5515 sctp_free_remote_addr(chk->whoTo); 5516 sctp_free_a_chunk(stcb, chk); 5517 } else { 5518 /* 5519 * Ok we have gone beyond the end of the 5520 * fwd-tsn's mark. Some checks... 5521 */ 5522 if ((asoc->fragmented_delivery_inprogress) && 5523 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5524 /* 5525 * Special case PD-API is up and 5526 * what we fwd-tsn' over includes 5527 * one that had the LAST_FRAG. We no 5528 * longer need to do the PD-API. 5529 */ 5530 asoc->fragmented_delivery_inprogress = 0; 5531 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5532 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 5533 5534 } 5535 break; 5536 } 5537 chk = at; 5538 } 5539 } 5540 if (asoc->fragmented_delivery_inprogress) { 5541 /* 5542 * Ok we removed cnt_gone chunks in the PD-API queue that 5543 * were being delivered. So now we must turn off the flag. 5544 */ 5545 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5546 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 5547 asoc->fragmented_delivery_inprogress = 0; 5548 } 5549 /*************************************************************/ 5550 /* 3. Update the PR-stream re-ordering queues */ 5551 /*************************************************************/ 5552 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd)); 5553 fwd_sz -= sizeof(*fwd); 5554 { 5555 /* New method. */ 5556 int num_str, i; 5557 5558 num_str = fwd_sz / sizeof(struct sctp_strseq); 5559 for (i = 0; i < num_str; i++) { 5560 uint16_t st; 5561 unsigned char *xx; 5562 5563 /* Convert */ 5564 xx = (unsigned char *)&stseq[i]; 5565 st = ntohs(stseq[i].stream); 5566 stseq[i].stream = st; 5567 st = ntohs(stseq[i].sequence); 5568 stseq[i].sequence = st; 5569 /* now process */ 5570 if (stseq[i].stream > asoc->streamincnt) { 5571 /* 5572 * It is arguable if we should continue. 5573 * Since the peer sent bogus stream info we 5574 * may be in deep trouble.. a return may be 5575 * a better choice? 5576 */ 5577 continue; 5578 } 5579 strm = &asoc->strmin[stseq[i].stream]; 5580 if (compare_with_wrap(stseq[i].sequence, 5581 strm->last_sequence_delivered, MAX_SEQ)) { 5582 /* Update the sequence number */ 5583 strm->last_sequence_delivered = 5584 stseq[i].sequence; 5585 } 5586 /* now kick the stream the new way */ 5587 sctp_kick_prsctp_reorder_queue(stcb, strm); 5588 } 5589 } 5590 } 5591