1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $kejKAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 37 #include "opt_ipsec.h" 38 #include "opt_inet6.h" 39 #include "opt_inet.h" 40 41 #include "opt_sctp.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/mbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/socket.h> 48 #include <sys/socketvar.h> 49 #include <sys/sysctl.h> 50 51 #include <net/if.h> 52 #include <net/route.h> 53 54 55 #include <sys/limits.h> 56 #include <machine/cpu.h> 57 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 #include <netinet/ip.h> 61 #ifdef INET6 62 #include <netinet/ip6.h> 63 #endif /* INET6 */ 64 #include <netinet/in_pcb.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 #ifdef INET6 68 #include <netinet6/ip6_var.h> 69 #endif /* INET6 */ 70 #include <netinet/ip_icmp.h> 71 #include <netinet/icmp_var.h> 72 73 #include <netinet/sctp_os.h> 74 #include <netinet/sctp_var.h> 75 #include <netinet/sctp_pcb.h> 76 #include <netinet/sctp_header.h> 77 #include <netinet/sctputil.h> 78 #include <netinet/sctp_output.h> 79 #include <netinet/sctp_input.h> 80 #include <netinet/sctp_indata.h> 81 #include <netinet/sctp_uio.h> 82 #include <netinet/sctp_timer.h> 83 #ifdef IPSEC 84 #include <netinet6/ipsec.h> 85 #include <netkey/key.h> 86 #endif /* IPSEC */ 87 88 89 #ifdef SCTP_DEBUG 90 extern uint32_t sctp_debug_on; 91 92 #endif 93 94 /* 95 * NOTES: On the outbound side of things I need to check the sack timer to 96 * see if I should generate a sack into the chunk queue (if I have data to 97 * send that is and will be sending it .. for bundling. 98 * 99 * The callback in sctp_usrreq.c will get called when the socket is read from. 100 * This will cause sctp_service_queues() to get called on the top entry in 101 * the list. 102 */ 103 104 extern int sctp_strict_sacks; 105 106 __inline void 107 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 108 { 109 uint32_t calc, calc_w_oh; 110 111 /* 112 * This is really set wrong with respect to a 1-2-m socket. Since 113 * the sb_cc is the count that everyone as put up. When we re-write 114 * sctp_soreceive then we will fix this so that ONLY this 115 * associations data is taken into account. 116 */ 117 if (stcb->sctp_socket == NULL) 118 return; 119 120 if (stcb->asoc.sb_cc == 0 && 121 asoc->size_on_reasm_queue == 0 && 122 asoc->size_on_all_streams == 0) { 123 /* Full rwnd granted */ 124 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.sb_hiwat, 125 SCTP_MINIMAL_RWND); 126 return; 127 } 128 /* get actual space */ 129 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 130 131 /* 132 * take out what has NOT been put on socket queue and we yet hold 133 * for putting up. 134 */ 135 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 136 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 137 138 if (calc == 0) { 139 /* out of space */ 140 asoc->my_rwnd = 0; 141 return; 142 } 143 /* what is the overhead of all these rwnd's */ 144 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 145 asoc->my_rwnd = calc; 146 if (calc_w_oh == 0) { 147 /* 148 * If our overhead is greater than the advertised rwnd, we 149 * clamp the rwnd to 1. This lets us still accept inbound 150 * segments, but hopefully will shut the sender down when he 151 * finally gets the message. 152 */ 153 asoc->my_rwnd = 1; 154 } else { 155 /* SWS threshold */ 156 if (asoc->my_rwnd && 157 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 158 /* SWS engaged, tell peer none left */ 159 asoc->my_rwnd = 1; 160 } 161 } 162 } 163 164 /* Calculate what the rwnd would be */ 165 166 __inline uint32_t 167 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 168 { 169 uint32_t calc = 0, calc_w_oh; 170 171 /* 172 * This is really set wrong with respect to a 1-2-m socket. Since 173 * the sb_cc is the count that everyone as put up. When we re-write 174 * sctp_soreceive then we will fix this so that ONLY this 175 * associations data is taken into account. 176 */ 177 if (stcb->sctp_socket == NULL) 178 return (calc); 179 180 if (stcb->asoc.sb_cc == 0 && 181 asoc->size_on_reasm_queue == 0 && 182 asoc->size_on_all_streams == 0) { 183 /* Full rwnd granted */ 184 calc = max(stcb->sctp_socket->so_rcv.sb_hiwat, 185 SCTP_MINIMAL_RWND); 186 return (calc); 187 } 188 /* get actual space */ 189 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 190 191 /* 192 * take out what has NOT been put on socket queue and we yet hold 193 * for putting up. 194 */ 195 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 196 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 197 198 if (calc == 0) { 199 /* out of space */ 200 return (calc); 201 } 202 /* what is the overhead of all these rwnd's */ 203 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 204 if (calc_w_oh == 0) { 205 /* 206 * If our overhead is greater than the advertised rwnd, we 207 * clamp the rwnd to 1. This lets us still accept inbound 208 * segments, but hopefully will shut the sender down when he 209 * finally gets the message. 210 */ 211 calc = 1; 212 } else { 213 /* SWS threshold */ 214 if (calc && 215 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 216 /* SWS engaged, tell peer none left */ 217 calc = 1; 218 } 219 } 220 return (calc); 221 } 222 223 224 225 /* 226 * Build out our readq entry based on the incoming packet. 227 */ 228 struct sctp_queued_to_read * 229 sctp_build_readq_entry(struct sctp_tcb *stcb, 230 struct sctp_nets *net, 231 uint32_t tsn, uint32_t ppid, 232 uint32_t context, uint16_t stream_no, 233 uint16_t stream_seq, uint8_t flags, 234 struct mbuf *dm) 235 { 236 struct sctp_queued_to_read *read_queue_e = NULL; 237 238 sctp_alloc_a_readq(stcb, read_queue_e); 239 if (read_queue_e == NULL) { 240 goto failed_build; 241 } 242 read_queue_e->sinfo_stream = stream_no; 243 read_queue_e->sinfo_ssn = stream_seq; 244 read_queue_e->sinfo_flags = (flags << 8); 245 read_queue_e->sinfo_ppid = ppid; 246 read_queue_e->sinfo_context = stcb->asoc.context; 247 read_queue_e->sinfo_timetolive = 0; 248 read_queue_e->sinfo_tsn = tsn; 249 read_queue_e->sinfo_cumtsn = tsn; 250 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 251 read_queue_e->whoFrom = net; 252 read_queue_e->length = 0; 253 atomic_add_int(&net->ref_count, 1); 254 read_queue_e->data = dm; 255 read_queue_e->tail_mbuf = NULL; 256 read_queue_e->stcb = stcb; 257 read_queue_e->port_from = stcb->rport; 258 read_queue_e->do_not_ref_stcb = 0; 259 read_queue_e->end_added = 0; 260 failed_build: 261 return (read_queue_e); 262 } 263 264 265 /* 266 * Build out our readq entry based on the incoming packet. 267 */ 268 static struct sctp_queued_to_read * 269 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 270 struct sctp_tmit_chunk *chk) 271 { 272 struct sctp_queued_to_read *read_queue_e = NULL; 273 274 sctp_alloc_a_readq(stcb, read_queue_e); 275 if (read_queue_e == NULL) { 276 goto failed_build; 277 } 278 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 279 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 280 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 281 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 282 read_queue_e->sinfo_context = stcb->asoc.context; 283 read_queue_e->sinfo_timetolive = 0; 284 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 285 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 286 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 287 read_queue_e->whoFrom = chk->whoTo; 288 read_queue_e->length = 0; 289 atomic_add_int(&chk->whoTo->ref_count, 1); 290 read_queue_e->data = chk->data; 291 read_queue_e->tail_mbuf = NULL; 292 read_queue_e->stcb = stcb; 293 read_queue_e->port_from = stcb->rport; 294 read_queue_e->do_not_ref_stcb = 0; 295 read_queue_e->end_added = 0; 296 failed_build: 297 return (read_queue_e); 298 } 299 300 301 struct mbuf * 302 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 303 struct sctp_sndrcvinfo *sinfo) 304 { 305 struct sctp_sndrcvinfo *outinfo; 306 struct cmsghdr *cmh; 307 struct mbuf *ret; 308 int len; 309 int use_extended = 0; 310 311 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 312 /* user does not want the sndrcv ctl */ 313 return (NULL); 314 } 315 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 316 use_extended = 1; 317 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 318 } else { 319 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 320 } 321 322 323 ret = sctp_get_mbuf_for_msg(len, 324 1, M_DONTWAIT, 1, MT_DATA); 325 326 if (ret == NULL) { 327 /* No space */ 328 return (ret); 329 } 330 /* We need a CMSG header followed by the struct */ 331 cmh = mtod(ret, struct cmsghdr *); 332 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 333 cmh->cmsg_level = IPPROTO_SCTP; 334 if (use_extended) { 335 cmh->cmsg_type = SCTP_EXTRCV; 336 cmh->cmsg_len = len; 337 memcpy(outinfo, sinfo, len); 338 } else { 339 cmh->cmsg_type = SCTP_SNDRCV; 340 cmh->cmsg_len = len; 341 *outinfo = *sinfo; 342 } 343 ret->m_len = cmh->cmsg_len; 344 ret->m_pkthdr.len = ret->m_len; 345 return (ret); 346 } 347 348 /* 349 * We are delivering currently from the reassembly queue. We must continue to 350 * deliver until we either: 1) run out of space. 2) run out of sequential 351 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 352 */ 353 static void 354 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 355 { 356 struct sctp_tmit_chunk *chk; 357 struct mbuf *m; 358 uint16_t nxt_todel; 359 uint16_t stream_no; 360 int end = 0; 361 int cntDel; 362 363 cntDel = stream_no = 0; 364 struct sctp_queued_to_read *control, *ctl, *ctlat; 365 366 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 367 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 368 ) { 369 /* socket above is long gone */ 370 asoc->fragmented_delivery_inprogress = 0; 371 chk = TAILQ_FIRST(&asoc->reasmqueue); 372 while (chk) { 373 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 374 asoc->size_on_reasm_queue -= chk->send_size; 375 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 376 /* 377 * Lose the data pointer, since its in the socket 378 * buffer 379 */ 380 if (chk->data) { 381 sctp_m_freem(chk->data); 382 chk->data = NULL; 383 } 384 /* Now free the address and data */ 385 sctp_free_remote_addr(chk->whoTo); 386 sctp_free_a_chunk(stcb, chk); 387 chk = TAILQ_FIRST(&asoc->reasmqueue); 388 } 389 return; 390 } 391 SCTP_TCB_LOCK_ASSERT(stcb); 392 do { 393 chk = TAILQ_FIRST(&asoc->reasmqueue); 394 if (chk == NULL) { 395 return; 396 } 397 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 398 /* Can't deliver more :< */ 399 return; 400 } 401 stream_no = chk->rec.data.stream_number; 402 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 403 if (nxt_todel != chk->rec.data.stream_seq && 404 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 405 /* 406 * Not the next sequence to deliver in its stream OR 407 * unordered 408 */ 409 return; 410 } 411 if ((chk->data->m_flags & M_PKTHDR) == 0) { 412 m = sctp_get_mbuf_for_msg(1, 413 1, M_DONTWAIT, 1, MT_DATA); 414 if (m == NULL) { 415 /* no room! */ 416 return; 417 } 418 m->m_pkthdr.len = chk->send_size; 419 m->m_len = 0; 420 m->m_next = chk->data; 421 chk->data = m; 422 } 423 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 424 if (chk->data->m_next == NULL) { 425 /* hopefully we hit here most of the time */ 426 chk->data->m_flags |= M_EOR; 427 } else { 428 /* 429 * Add the flag to the LAST mbuf in the 430 * chain 431 */ 432 m = chk->data; 433 while (m->m_next != NULL) { 434 m = m->m_next; 435 } 436 m->m_flags |= M_EOR; 437 } 438 } 439 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 440 441 control = sctp_build_readq_entry_chk(stcb, chk); 442 if (control == NULL) { 443 /* out of memory? */ 444 return; 445 } 446 /* save it off for our future deliveries */ 447 stcb->asoc.control_pdapi = control; 448 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 449 end = 1; 450 else 451 end = 0; 452 sctp_add_to_readq(stcb->sctp_ep, 453 stcb, control, &stcb->sctp_socket->so_rcv, end); 454 cntDel++; 455 } else { 456 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 457 end = 1; 458 else 459 end = 0; 460 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 461 stcb->asoc.control_pdapi, 462 chk->data, end, chk->rec.data.TSN_seq, 463 &stcb->sctp_socket->so_rcv)) { 464 /* 465 * something is very wrong, either 466 * control_pdapi is NULL, or the tail_mbuf 467 * is corrupt, or there is a EOM already on 468 * the mbuf chain. 469 */ 470 if (stcb->asoc.control_pdapi == NULL) { 471 panic("This should not happen control_pdapi NULL?"); 472 } 473 if (stcb->asoc.control_pdapi->tail_mbuf == NULL) { 474 panic("This should not happen, tail_mbuf not being maintained?"); 475 } 476 /* if we did not panic, it was a EOM */ 477 panic("Bad chunking ??"); 478 } 479 cntDel++; 480 } 481 /* pull it we did it */ 482 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 483 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 484 asoc->fragmented_delivery_inprogress = 0; 485 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 486 asoc->strmin[stream_no].last_sequence_delivered++; 487 } 488 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 489 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 490 } 491 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 492 /* 493 * turn the flag back on since we just delivered 494 * yet another one. 495 */ 496 asoc->fragmented_delivery_inprogress = 1; 497 } 498 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 499 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 500 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 501 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 502 503 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 504 asoc->size_on_reasm_queue -= chk->send_size; 505 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 506 /* free up the chk */ 507 chk->data = NULL; 508 sctp_free_remote_addr(chk->whoTo); 509 sctp_free_a_chunk(stcb, chk); 510 511 if (asoc->fragmented_delivery_inprogress == 0) { 512 /* 513 * Now lets see if we can deliver the next one on 514 * the stream 515 */ 516 uint16_t nxt_todel; 517 struct sctp_stream_in *strm; 518 519 strm = &asoc->strmin[stream_no]; 520 nxt_todel = strm->last_sequence_delivered + 1; 521 ctl = TAILQ_FIRST(&strm->inqueue); 522 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 523 while (ctl != NULL) { 524 /* Deliver more if we can. */ 525 if (nxt_todel == ctl->sinfo_ssn) { 526 ctlat = TAILQ_NEXT(ctl, next); 527 TAILQ_REMOVE(&strm->inqueue, ctl, next); 528 asoc->size_on_all_streams -= ctl->length; 529 sctp_ucount_decr(asoc->cnt_on_all_streams); 530 strm->last_sequence_delivered++; 531 sctp_add_to_readq(stcb->sctp_ep, stcb, 532 ctl, 533 &stcb->sctp_socket->so_rcv, 1); 534 ctl = ctlat; 535 } else { 536 break; 537 } 538 nxt_todel = strm->last_sequence_delivered + 1; 539 } 540 } 541 return; 542 } 543 chk = TAILQ_FIRST(&asoc->reasmqueue); 544 } while (chk); 545 } 546 547 /* 548 * Queue the chunk either right into the socket buffer if it is the next one 549 * to go OR put it in the correct place in the delivery queue. If we do 550 * append to the so_buf, keep doing so until we are out of order. One big 551 * question still remains, what to do when the socket buffer is FULL?? 552 */ 553 static void 554 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 555 struct sctp_queued_to_read *control, int *abort_flag) 556 { 557 /* 558 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 559 * all the data in one stream this could happen quite rapidly. One 560 * could use the TSN to keep track of things, but this scheme breaks 561 * down in the other type of stream useage that could occur. Send a 562 * single msg to stream 0, send 4Billion messages to stream 1, now 563 * send a message to stream 0. You have a situation where the TSN 564 * has wrapped but not in the stream. Is this worth worrying about 565 * or should we just change our queue sort at the bottom to be by 566 * TSN. 567 * 568 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 569 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 570 * assignment this could happen... and I don't see how this would be 571 * a violation. So for now I am undecided an will leave the sort by 572 * SSN alone. Maybe a hybred approach is the answer 573 * 574 */ 575 struct sctp_stream_in *strm; 576 struct sctp_queued_to_read *at; 577 int queue_needed; 578 uint16_t nxt_todel; 579 struct mbuf *oper; 580 581 queue_needed = 1; 582 asoc->size_on_all_streams += control->length; 583 sctp_ucount_incr(asoc->cnt_on_all_streams); 584 strm = &asoc->strmin[control->sinfo_stream]; 585 nxt_todel = strm->last_sequence_delivered + 1; 586 #ifdef SCTP_STR_LOGGING 587 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 588 #endif 589 #ifdef SCTP_DEBUG 590 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 591 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 592 (uint32_t) control->sinfo_stream, 593 (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel); 594 } 595 #endif 596 if (compare_with_wrap(strm->last_sequence_delivered, 597 control->sinfo_ssn, MAX_SEQ) || 598 (strm->last_sequence_delivered == control->sinfo_ssn)) { 599 /* The incoming sseq is behind where we last delivered? */ 600 #ifdef SCTP_DEBUG 601 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 602 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 603 control->sinfo_ssn, 604 strm->last_sequence_delivered); 605 } 606 #endif 607 /* 608 * throw it in the stream so it gets cleaned up in 609 * association destruction 610 */ 611 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 612 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 613 0, M_DONTWAIT, 1, MT_DATA); 614 if (oper) { 615 struct sctp_paramhdr *ph; 616 uint32_t *ippp; 617 618 oper->m_len = sizeof(struct sctp_paramhdr) + 619 (sizeof(uint32_t) * 3); 620 ph = mtod(oper, struct sctp_paramhdr *); 621 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 622 ph->param_length = htons(oper->m_len); 623 ippp = (uint32_t *) (ph + 1); 624 *ippp = htonl(0x00000001); 625 ippp++; 626 *ippp = control->sinfo_tsn; 627 ippp++; 628 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 629 } 630 sctp_abort_an_association(stcb->sctp_ep, stcb, 631 SCTP_PEER_FAULTY, oper); 632 633 *abort_flag = 1; 634 return; 635 636 } 637 if (nxt_todel == control->sinfo_ssn) { 638 /* can be delivered right away? */ 639 #ifdef SCTP_STR_LOGGING 640 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 641 #endif 642 queue_needed = 0; 643 asoc->size_on_all_streams -= control->length; 644 sctp_ucount_decr(asoc->cnt_on_all_streams); 645 strm->last_sequence_delivered++; 646 sctp_add_to_readq(stcb->sctp_ep, stcb, 647 control, 648 &stcb->sctp_socket->so_rcv, 1); 649 control = TAILQ_FIRST(&strm->inqueue); 650 while (control != NULL) { 651 /* all delivered */ 652 nxt_todel = strm->last_sequence_delivered + 1; 653 if (nxt_todel == control->sinfo_ssn) { 654 at = TAILQ_NEXT(control, next); 655 TAILQ_REMOVE(&strm->inqueue, control, next); 656 asoc->size_on_all_streams -= control->length; 657 sctp_ucount_decr(asoc->cnt_on_all_streams); 658 strm->last_sequence_delivered++; 659 /* 660 * We ignore the return of deliver_data here 661 * since we always can hold the chunk on the 662 * d-queue. And we have a finite number that 663 * can be delivered from the strq. 664 */ 665 #ifdef SCTP_STR_LOGGING 666 sctp_log_strm_del(control, NULL, 667 SCTP_STR_LOG_FROM_IMMED_DEL); 668 #endif 669 sctp_add_to_readq(stcb->sctp_ep, stcb, 670 control, 671 &stcb->sctp_socket->so_rcv, 1); 672 control = at; 673 continue; 674 } 675 break; 676 } 677 } 678 if (queue_needed) { 679 /* 680 * Ok, we did not deliver this guy, find the correct place 681 * to put it on the queue. 682 */ 683 if (TAILQ_EMPTY(&strm->inqueue)) { 684 /* Empty queue */ 685 #ifdef SCTP_STR_LOGGING 686 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 687 #endif 688 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 689 } else { 690 TAILQ_FOREACH(at, &strm->inqueue, next) { 691 if (compare_with_wrap(at->sinfo_ssn, 692 control->sinfo_ssn, MAX_SEQ)) { 693 /* 694 * one in queue is bigger than the 695 * new one, insert before this one 696 */ 697 #ifdef SCTP_STR_LOGGING 698 sctp_log_strm_del(control, at, 699 SCTP_STR_LOG_FROM_INSERT_MD); 700 #endif 701 TAILQ_INSERT_BEFORE(at, control, next); 702 break; 703 } else if (at->sinfo_ssn == control->sinfo_ssn) { 704 /* 705 * Gak, He sent me a duplicate str 706 * seq number 707 */ 708 /* 709 * foo bar, I guess I will just free 710 * this new guy, should we abort 711 * too? FIX ME MAYBE? Or it COULD be 712 * that the SSN's have wrapped. 713 * Maybe I should compare to TSN 714 * somehow... sigh for now just blow 715 * away the chunk! 716 */ 717 718 if (control->data) 719 sctp_m_freem(control->data); 720 control->data = NULL; 721 asoc->size_on_all_streams -= control->length; 722 sctp_ucount_decr(asoc->cnt_on_all_streams); 723 sctp_free_remote_addr(control->whoFrom); 724 sctp_free_a_readq(stcb, control); 725 return; 726 } else { 727 if (TAILQ_NEXT(at, next) == NULL) { 728 /* 729 * We are at the end, insert 730 * it after this one 731 */ 732 #ifdef SCTP_STR_LOGGING 733 sctp_log_strm_del(control, at, 734 SCTP_STR_LOG_FROM_INSERT_TL); 735 #endif 736 TAILQ_INSERT_AFTER(&strm->inqueue, 737 at, control, next); 738 break; 739 } 740 } 741 } 742 } 743 } 744 } 745 746 /* 747 * Returns two things: You get the total size of the deliverable parts of the 748 * first fragmented message on the reassembly queue. And you get a 1 back if 749 * all of the message is ready or a 0 back if the message is still incomplete 750 */ 751 static int 752 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 753 { 754 struct sctp_tmit_chunk *chk; 755 uint32_t tsn; 756 757 *t_size = 0; 758 chk = TAILQ_FIRST(&asoc->reasmqueue); 759 if (chk == NULL) { 760 /* nothing on the queue */ 761 return (0); 762 } 763 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 764 /* Not a first on the queue */ 765 return (0); 766 } 767 tsn = chk->rec.data.TSN_seq; 768 while (chk) { 769 if (tsn != chk->rec.data.TSN_seq) { 770 return (0); 771 } 772 *t_size += chk->send_size; 773 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 774 return (1); 775 } 776 tsn++; 777 chk = TAILQ_NEXT(chk, sctp_next); 778 } 779 return (0); 780 } 781 782 static void 783 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 784 { 785 struct sctp_tmit_chunk *chk; 786 uint16_t nxt_todel; 787 uint32_t tsize; 788 789 chk = TAILQ_FIRST(&asoc->reasmqueue); 790 if (chk == NULL) { 791 /* Huh? */ 792 asoc->size_on_reasm_queue = 0; 793 asoc->cnt_on_reasm_queue = 0; 794 return; 795 } 796 if (asoc->fragmented_delivery_inprogress == 0) { 797 nxt_todel = 798 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 799 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 800 (nxt_todel == chk->rec.data.stream_seq || 801 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 802 /* 803 * Yep the first one is here and its ok to deliver 804 * but should we? 805 */ 806 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 807 (tsize > stcb->sctp_ep->partial_delivery_point))) { 808 809 /* 810 * Yes, we setup to start reception, by 811 * backing down the TSN just in case we 812 * can't deliver. If we 813 */ 814 asoc->fragmented_delivery_inprogress = 1; 815 asoc->tsn_last_delivered = 816 chk->rec.data.TSN_seq - 1; 817 asoc->str_of_pdapi = 818 chk->rec.data.stream_number; 819 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 820 asoc->pdapi_ppid = chk->rec.data.payloadtype; 821 asoc->fragment_flags = chk->rec.data.rcv_flags; 822 sctp_service_reassembly(stcb, asoc); 823 } 824 } 825 } else { 826 sctp_service_reassembly(stcb, asoc); 827 } 828 } 829 830 /* 831 * Dump onto the re-assembly queue, in its proper place. After dumping on the 832 * queue, see if anthing can be delivered. If so pull it off (or as much as 833 * we can. If we run out of space then we must dump what we can and set the 834 * appropriate flag to say we queued what we could. 835 */ 836 static void 837 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 838 struct sctp_tmit_chunk *chk, int *abort_flag) 839 { 840 struct mbuf *oper; 841 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 842 u_char last_flags; 843 struct sctp_tmit_chunk *at, *prev, *next; 844 845 prev = next = NULL; 846 cum_ackp1 = asoc->tsn_last_delivered + 1; 847 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 848 /* This is the first one on the queue */ 849 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 850 /* 851 * we do not check for delivery of anything when only one 852 * fragment is here 853 */ 854 asoc->size_on_reasm_queue = chk->send_size; 855 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 856 if (chk->rec.data.TSN_seq == cum_ackp1) { 857 if (asoc->fragmented_delivery_inprogress == 0 && 858 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 859 SCTP_DATA_FIRST_FRAG) { 860 /* 861 * An empty queue, no delivery inprogress, 862 * we hit the next one and it does NOT have 863 * a FIRST fragment mark. 864 */ 865 #ifdef SCTP_DEBUG 866 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 867 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 868 } 869 #endif 870 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 871 0, M_DONTWAIT, 1, MT_DATA); 872 873 if (oper) { 874 struct sctp_paramhdr *ph; 875 uint32_t *ippp; 876 877 oper->m_len = 878 sizeof(struct sctp_paramhdr) + 879 (sizeof(uint32_t) * 3); 880 ph = mtod(oper, struct sctp_paramhdr *); 881 ph->param_type = 882 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 883 ph->param_length = htons(oper->m_len); 884 ippp = (uint32_t *) (ph + 1); 885 *ippp = htonl(0x10000001); 886 ippp++; 887 *ippp = chk->rec.data.TSN_seq; 888 ippp++; 889 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 890 891 } 892 sctp_abort_an_association(stcb->sctp_ep, stcb, 893 SCTP_PEER_FAULTY, oper); 894 *abort_flag = 1; 895 } else if (asoc->fragmented_delivery_inprogress && 896 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 897 /* 898 * We are doing a partial delivery and the 899 * NEXT chunk MUST be either the LAST or 900 * MIDDLE fragment NOT a FIRST 901 */ 902 #ifdef SCTP_DEBUG 903 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 904 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 905 } 906 #endif 907 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 908 0, M_DONTWAIT, 1, MT_DATA); 909 if (oper) { 910 struct sctp_paramhdr *ph; 911 uint32_t *ippp; 912 913 oper->m_len = 914 sizeof(struct sctp_paramhdr) + 915 (3 * sizeof(uint32_t)); 916 ph = mtod(oper, struct sctp_paramhdr *); 917 ph->param_type = 918 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 919 ph->param_length = htons(oper->m_len); 920 ippp = (uint32_t *) (ph + 1); 921 *ippp = htonl(0x10000002); 922 ippp++; 923 *ippp = chk->rec.data.TSN_seq; 924 ippp++; 925 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 926 } 927 sctp_abort_an_association(stcb->sctp_ep, stcb, 928 SCTP_PEER_FAULTY, oper); 929 *abort_flag = 1; 930 } else if (asoc->fragmented_delivery_inprogress) { 931 /* 932 * Here we are ok with a MIDDLE or LAST 933 * piece 934 */ 935 if (chk->rec.data.stream_number != 936 asoc->str_of_pdapi) { 937 /* Got to be the right STR No */ 938 #ifdef SCTP_DEBUG 939 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 940 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n", 941 chk->rec.data.stream_number, 942 asoc->str_of_pdapi); 943 } 944 #endif 945 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 946 0, M_DONTWAIT, 1, MT_DATA); 947 if (oper) { 948 struct sctp_paramhdr *ph; 949 uint32_t *ippp; 950 951 oper->m_len = 952 sizeof(struct sctp_paramhdr) + 953 (sizeof(uint32_t) * 3); 954 ph = mtod(oper, 955 struct sctp_paramhdr *); 956 ph->param_type = 957 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 958 ph->param_length = 959 htons(oper->m_len); 960 ippp = (uint32_t *) (ph + 1); 961 *ippp = htonl(0x10000003); 962 ippp++; 963 *ippp = chk->rec.data.TSN_seq; 964 ippp++; 965 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 966 } 967 sctp_abort_an_association(stcb->sctp_ep, 968 stcb, SCTP_PEER_FAULTY, oper); 969 *abort_flag = 1; 970 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 971 SCTP_DATA_UNORDERED && 972 chk->rec.data.stream_seq != 973 asoc->ssn_of_pdapi) { 974 /* Got to be the right STR Seq */ 975 #ifdef SCTP_DEBUG 976 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 977 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n", 978 chk->rec.data.stream_seq, 979 asoc->ssn_of_pdapi); 980 } 981 #endif 982 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 983 0, M_DONTWAIT, 1, MT_DATA); 984 if (oper) { 985 struct sctp_paramhdr *ph; 986 uint32_t *ippp; 987 988 oper->m_len = 989 sizeof(struct sctp_paramhdr) + 990 (3 * sizeof(uint32_t)); 991 ph = mtod(oper, 992 struct sctp_paramhdr *); 993 ph->param_type = 994 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 995 ph->param_length = 996 htons(oper->m_len); 997 ippp = (uint32_t *) (ph + 1); 998 *ippp = htonl(0x10000004); 999 ippp++; 1000 *ippp = chk->rec.data.TSN_seq; 1001 ippp++; 1002 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1003 1004 } 1005 sctp_abort_an_association(stcb->sctp_ep, 1006 stcb, SCTP_PEER_FAULTY, oper); 1007 *abort_flag = 1; 1008 } 1009 } 1010 } 1011 return; 1012 } 1013 /* Find its place */ 1014 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1015 if (compare_with_wrap(at->rec.data.TSN_seq, 1016 chk->rec.data.TSN_seq, MAX_TSN)) { 1017 /* 1018 * one in queue is bigger than the new one, insert 1019 * before this one 1020 */ 1021 /* A check */ 1022 asoc->size_on_reasm_queue += chk->send_size; 1023 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1024 next = at; 1025 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1026 break; 1027 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1028 /* Gak, He sent me a duplicate str seq number */ 1029 /* 1030 * foo bar, I guess I will just free this new guy, 1031 * should we abort too? FIX ME MAYBE? Or it COULD be 1032 * that the SSN's have wrapped. Maybe I should 1033 * compare to TSN somehow... sigh for now just blow 1034 * away the chunk! 1035 */ 1036 if (chk->data) { 1037 sctp_m_freem(chk->data); 1038 chk->data = NULL; 1039 } 1040 sctp_free_remote_addr(chk->whoTo); 1041 sctp_free_a_chunk(stcb, chk); 1042 return; 1043 } else { 1044 last_flags = at->rec.data.rcv_flags; 1045 last_tsn = at->rec.data.TSN_seq; 1046 prev = at; 1047 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1048 /* 1049 * We are at the end, insert it after this 1050 * one 1051 */ 1052 /* check it first */ 1053 asoc->size_on_reasm_queue += chk->send_size; 1054 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1055 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1056 break; 1057 } 1058 } 1059 } 1060 /* Now the audits */ 1061 if (prev) { 1062 prev_tsn = chk->rec.data.TSN_seq - 1; 1063 if (prev_tsn == prev->rec.data.TSN_seq) { 1064 /* 1065 * Ok the one I am dropping onto the end is the 1066 * NEXT. A bit of valdiation here. 1067 */ 1068 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1069 SCTP_DATA_FIRST_FRAG || 1070 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1071 SCTP_DATA_MIDDLE_FRAG) { 1072 /* 1073 * Insert chk MUST be a MIDDLE or LAST 1074 * fragment 1075 */ 1076 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1077 SCTP_DATA_FIRST_FRAG) { 1078 #ifdef SCTP_DEBUG 1079 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1080 printf("Prev check - It can be a midlle or last but not a first\n"); 1081 printf("Gak, Evil plot, it's a FIRST!\n"); 1082 } 1083 #endif 1084 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1085 0, M_DONTWAIT, 1, MT_DATA); 1086 if (oper) { 1087 struct sctp_paramhdr *ph; 1088 uint32_t *ippp; 1089 1090 oper->m_len = 1091 sizeof(struct sctp_paramhdr) + 1092 (3 * sizeof(uint32_t)); 1093 ph = mtod(oper, 1094 struct sctp_paramhdr *); 1095 ph->param_type = 1096 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1097 ph->param_length = 1098 htons(oper->m_len); 1099 ippp = (uint32_t *) (ph + 1); 1100 *ippp = htonl(0x10000005); 1101 ippp++; 1102 *ippp = chk->rec.data.TSN_seq; 1103 ippp++; 1104 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1105 1106 } 1107 sctp_abort_an_association(stcb->sctp_ep, 1108 stcb, SCTP_PEER_FAULTY, oper); 1109 *abort_flag = 1; 1110 return; 1111 } 1112 if (chk->rec.data.stream_number != 1113 prev->rec.data.stream_number) { 1114 /* 1115 * Huh, need the correct STR here, 1116 * they must be the same. 1117 */ 1118 #ifdef SCTP_DEBUG 1119 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1120 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1121 chk->rec.data.stream_number, 1122 prev->rec.data.stream_number); 1123 } 1124 #endif 1125 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1126 0, M_DONTWAIT, 1, MT_DATA); 1127 if (oper) { 1128 struct sctp_paramhdr *ph; 1129 uint32_t *ippp; 1130 1131 oper->m_len = 1132 sizeof(struct sctp_paramhdr) + 1133 (3 * sizeof(uint32_t)); 1134 ph = mtod(oper, 1135 struct sctp_paramhdr *); 1136 ph->param_type = 1137 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1138 ph->param_length = 1139 htons(oper->m_len); 1140 ippp = (uint32_t *) (ph + 1); 1141 *ippp = htonl(0x10000006); 1142 ippp++; 1143 *ippp = chk->rec.data.TSN_seq; 1144 ippp++; 1145 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1146 } 1147 sctp_abort_an_association(stcb->sctp_ep, 1148 stcb, SCTP_PEER_FAULTY, oper); 1149 1150 *abort_flag = 1; 1151 return; 1152 } 1153 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1154 chk->rec.data.stream_seq != 1155 prev->rec.data.stream_seq) { 1156 /* 1157 * Huh, need the correct STR here, 1158 * they must be the same. 1159 */ 1160 #ifdef SCTP_DEBUG 1161 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1162 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1163 chk->rec.data.stream_seq, 1164 prev->rec.data.stream_seq); 1165 } 1166 #endif 1167 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1168 0, M_DONTWAIT, 1, MT_DATA); 1169 if (oper) { 1170 struct sctp_paramhdr *ph; 1171 uint32_t *ippp; 1172 1173 oper->m_len = 1174 sizeof(struct sctp_paramhdr) + 1175 (3 * sizeof(uint32_t)); 1176 ph = mtod(oper, 1177 struct sctp_paramhdr *); 1178 ph->param_type = 1179 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1180 ph->param_length = 1181 htons(oper->m_len); 1182 ippp = (uint32_t *) (ph + 1); 1183 *ippp = htonl(0x10000007); 1184 ippp++; 1185 *ippp = chk->rec.data.TSN_seq; 1186 ippp++; 1187 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1188 } 1189 sctp_abort_an_association(stcb->sctp_ep, 1190 stcb, SCTP_PEER_FAULTY, oper); 1191 1192 *abort_flag = 1; 1193 return; 1194 } 1195 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1196 SCTP_DATA_LAST_FRAG) { 1197 /* Insert chk MUST be a FIRST */ 1198 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1199 SCTP_DATA_FIRST_FRAG) { 1200 #ifdef SCTP_DEBUG 1201 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1202 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1203 } 1204 #endif 1205 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1206 0, M_DONTWAIT, 1, MT_DATA); 1207 if (oper) { 1208 struct sctp_paramhdr *ph; 1209 uint32_t *ippp; 1210 1211 oper->m_len = 1212 sizeof(struct sctp_paramhdr) + 1213 (3 * sizeof(uint32_t)); 1214 ph = mtod(oper, 1215 struct sctp_paramhdr *); 1216 ph->param_type = 1217 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1218 ph->param_length = 1219 htons(oper->m_len); 1220 ippp = (uint32_t *) (ph + 1); 1221 *ippp = htonl(0x10000008); 1222 ippp++; 1223 *ippp = chk->rec.data.TSN_seq; 1224 ippp++; 1225 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1226 1227 } 1228 sctp_abort_an_association(stcb->sctp_ep, 1229 stcb, SCTP_PEER_FAULTY, oper); 1230 1231 *abort_flag = 1; 1232 return; 1233 } 1234 } 1235 } 1236 } 1237 if (next) { 1238 post_tsn = chk->rec.data.TSN_seq + 1; 1239 if (post_tsn == next->rec.data.TSN_seq) { 1240 /* 1241 * Ok the one I am inserting ahead of is my NEXT 1242 * one. A bit of valdiation here. 1243 */ 1244 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1245 /* Insert chk MUST be a last fragment */ 1246 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1247 != SCTP_DATA_LAST_FRAG) { 1248 #ifdef SCTP_DEBUG 1249 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1250 printf("Next chk - Next is FIRST, we must be LAST\n"); 1251 printf("Gak, Evil plot, its not a last!\n"); 1252 } 1253 #endif 1254 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1255 0, M_DONTWAIT, 1, MT_DATA); 1256 if (oper) { 1257 struct sctp_paramhdr *ph; 1258 uint32_t *ippp; 1259 1260 oper->m_len = 1261 sizeof(struct sctp_paramhdr) + 1262 (3 * sizeof(uint32_t)); 1263 ph = mtod(oper, 1264 struct sctp_paramhdr *); 1265 ph->param_type = 1266 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1267 ph->param_length = 1268 htons(oper->m_len); 1269 ippp = (uint32_t *) (ph + 1); 1270 *ippp = htonl(0x10000009); 1271 ippp++; 1272 *ippp = chk->rec.data.TSN_seq; 1273 ippp++; 1274 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1275 } 1276 sctp_abort_an_association(stcb->sctp_ep, 1277 stcb, SCTP_PEER_FAULTY, oper); 1278 1279 *abort_flag = 1; 1280 return; 1281 } 1282 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1283 SCTP_DATA_MIDDLE_FRAG || 1284 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1285 SCTP_DATA_LAST_FRAG) { 1286 /* 1287 * Insert chk CAN be MIDDLE or FIRST NOT 1288 * LAST 1289 */ 1290 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1291 SCTP_DATA_LAST_FRAG) { 1292 #ifdef SCTP_DEBUG 1293 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1294 printf("Next chk - Next is a MIDDLE/LAST\n"); 1295 printf("Gak, Evil plot, new prev chunk is a LAST\n"); 1296 } 1297 #endif 1298 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1299 0, M_DONTWAIT, 1, MT_DATA); 1300 if (oper) { 1301 struct sctp_paramhdr *ph; 1302 uint32_t *ippp; 1303 1304 oper->m_len = 1305 sizeof(struct sctp_paramhdr) + 1306 (3 * sizeof(uint32_t)); 1307 ph = mtod(oper, 1308 struct sctp_paramhdr *); 1309 ph->param_type = 1310 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1311 ph->param_length = 1312 htons(oper->m_len); 1313 ippp = (uint32_t *) (ph + 1); 1314 *ippp = htonl(0x1000000a); 1315 ippp++; 1316 *ippp = chk->rec.data.TSN_seq; 1317 ippp++; 1318 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1319 1320 } 1321 sctp_abort_an_association(stcb->sctp_ep, 1322 stcb, SCTP_PEER_FAULTY, oper); 1323 1324 *abort_flag = 1; 1325 return; 1326 } 1327 if (chk->rec.data.stream_number != 1328 next->rec.data.stream_number) { 1329 /* 1330 * Huh, need the correct STR here, 1331 * they must be the same. 1332 */ 1333 #ifdef SCTP_DEBUG 1334 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1335 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1336 chk->rec.data.stream_number, 1337 next->rec.data.stream_number); 1338 } 1339 #endif 1340 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1341 0, M_DONTWAIT, 1, MT_DATA); 1342 if (oper) { 1343 struct sctp_paramhdr *ph; 1344 uint32_t *ippp; 1345 1346 oper->m_len = 1347 sizeof(struct sctp_paramhdr) + 1348 (3 * sizeof(uint32_t)); 1349 ph = mtod(oper, 1350 struct sctp_paramhdr *); 1351 ph->param_type = 1352 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1353 ph->param_length = 1354 htons(oper->m_len); 1355 ippp = (uint32_t *) (ph + 1); 1356 *ippp = htonl(0x1000000b); 1357 ippp++; 1358 *ippp = chk->rec.data.TSN_seq; 1359 ippp++; 1360 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1361 1362 } 1363 sctp_abort_an_association(stcb->sctp_ep, 1364 stcb, SCTP_PEER_FAULTY, oper); 1365 1366 *abort_flag = 1; 1367 return; 1368 } 1369 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1370 chk->rec.data.stream_seq != 1371 next->rec.data.stream_seq) { 1372 /* 1373 * Huh, need the correct STR here, 1374 * they must be the same. 1375 */ 1376 #ifdef SCTP_DEBUG 1377 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1378 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1379 chk->rec.data.stream_seq, 1380 next->rec.data.stream_seq); 1381 } 1382 #endif 1383 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1384 0, M_DONTWAIT, 1, MT_DATA); 1385 if (oper) { 1386 struct sctp_paramhdr *ph; 1387 uint32_t *ippp; 1388 1389 oper->m_len = 1390 sizeof(struct sctp_paramhdr) + 1391 (3 * sizeof(uint32_t)); 1392 ph = mtod(oper, 1393 struct sctp_paramhdr *); 1394 ph->param_type = 1395 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1396 ph->param_length = 1397 htons(oper->m_len); 1398 ippp = (uint32_t *) (ph + 1); 1399 *ippp = htonl(0x1000000c); 1400 ippp++; 1401 *ippp = chk->rec.data.TSN_seq; 1402 ippp++; 1403 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1404 } 1405 sctp_abort_an_association(stcb->sctp_ep, 1406 stcb, SCTP_PEER_FAULTY, oper); 1407 1408 *abort_flag = 1; 1409 return; 1410 1411 } 1412 } 1413 } 1414 } 1415 /* Do we need to do some delivery? check */ 1416 sctp_deliver_reasm_check(stcb, asoc); 1417 } 1418 1419 /* 1420 * This is an unfortunate routine. It checks to make sure a evil guy is not 1421 * stuffing us full of bad packet fragments. A broken peer could also do this 1422 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1423 * :< more cycles. 1424 */ 1425 static int 1426 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1427 uint32_t TSN_seq) 1428 { 1429 struct sctp_tmit_chunk *at; 1430 uint32_t tsn_est; 1431 1432 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1433 if (compare_with_wrap(TSN_seq, 1434 at->rec.data.TSN_seq, MAX_TSN)) { 1435 /* is it one bigger? */ 1436 tsn_est = at->rec.data.TSN_seq + 1; 1437 if (tsn_est == TSN_seq) { 1438 /* yep. It better be a last then */ 1439 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1440 SCTP_DATA_LAST_FRAG) { 1441 /* 1442 * Ok this guy belongs next to a guy 1443 * that is NOT last, it should be a 1444 * middle/last, not a complete 1445 * chunk. 1446 */ 1447 return (1); 1448 } else { 1449 /* 1450 * This guy is ok since its a LAST 1451 * and the new chunk is a fully 1452 * self- contained one. 1453 */ 1454 return (0); 1455 } 1456 } 1457 } else if (TSN_seq == at->rec.data.TSN_seq) { 1458 /* Software error since I have a dup? */ 1459 return (1); 1460 } else { 1461 /* 1462 * Ok, 'at' is larger than new chunk but does it 1463 * need to be right before it. 1464 */ 1465 tsn_est = TSN_seq + 1; 1466 if (tsn_est == at->rec.data.TSN_seq) { 1467 /* Yep, It better be a first */ 1468 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1469 SCTP_DATA_FIRST_FRAG) { 1470 return (1); 1471 } else { 1472 return (0); 1473 } 1474 } 1475 } 1476 } 1477 return (0); 1478 } 1479 1480 1481 extern unsigned int sctp_max_chunks_on_queue; 1482 static int 1483 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1484 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1485 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1486 int *break_flag, int last_chunk) 1487 { 1488 /* Process a data chunk */ 1489 /* struct sctp_tmit_chunk *chk; */ 1490 struct sctp_tmit_chunk *chk; 1491 uint32_t tsn, gap; 1492 struct mbuf *dmbuf; 1493 int indx, the_len; 1494 uint16_t strmno, strmseq; 1495 struct mbuf *oper; 1496 struct sctp_queued_to_read *control; 1497 1498 chk = NULL; 1499 tsn = ntohl(ch->dp.tsn); 1500 #ifdef SCTP_MAP_LOGGING 1501 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE); 1502 #endif 1503 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1504 asoc->cumulative_tsn == tsn) { 1505 /* It is a duplicate */ 1506 SCTP_STAT_INCR(sctps_recvdupdata); 1507 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1508 /* Record a dup for the next outbound sack */ 1509 asoc->dup_tsns[asoc->numduptsns] = tsn; 1510 asoc->numduptsns++; 1511 } 1512 return (0); 1513 } 1514 /* Calculate the number of TSN's between the base and this TSN */ 1515 if (tsn >= asoc->mapping_array_base_tsn) { 1516 gap = tsn - asoc->mapping_array_base_tsn; 1517 } else { 1518 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1519 } 1520 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1521 /* Can't hold the bit in the mapping at max array, toss it */ 1522 return (0); 1523 } 1524 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1525 if (sctp_expand_mapping_array(asoc)) { 1526 /* Can't expand, drop it */ 1527 return (0); 1528 } 1529 } 1530 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1531 *high_tsn = tsn; 1532 } 1533 /* See if we have received this one already */ 1534 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1535 SCTP_STAT_INCR(sctps_recvdupdata); 1536 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1537 /* Record a dup for the next outbound sack */ 1538 asoc->dup_tsns[asoc->numduptsns] = tsn; 1539 asoc->numduptsns++; 1540 } 1541 if (!callout_pending(&asoc->dack_timer.timer)) { 1542 /* 1543 * By starting the timer we assure that we WILL sack 1544 * at the end of the packet when sctp_sack_check 1545 * gets called. 1546 */ 1547 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, 1548 stcb, NULL); 1549 } 1550 return (0); 1551 } 1552 /* 1553 * Check to see about the GONE flag, duplicates would cause a sack 1554 * to be sent up above 1555 */ 1556 if (stcb && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1557 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1558 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1559 ) { 1560 /* 1561 * wait a minute, this guy is gone, there is no longer a 1562 * receiver. Send peer an ABORT! 1563 */ 1564 struct mbuf *op_err; 1565 1566 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1567 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 1568 *abort_flag = 1; 1569 return (0); 1570 } 1571 /* 1572 * Now before going further we see if there is room. If NOT then we 1573 * MAY let one through only IF this TSN is the one we are waiting 1574 * for on a partial delivery API. 1575 */ 1576 1577 /* now do the tests */ 1578 if (((asoc->cnt_on_all_streams + 1579 asoc->cnt_on_reasm_queue + 1580 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1581 (((int)asoc->my_rwnd) <= 0)) { 1582 /* 1583 * When we have NO room in the rwnd we check to make sure 1584 * the reader is doing its job... 1585 */ 1586 if (stcb->sctp_socket->so_rcv.sb_cc) { 1587 /* some to read, wake-up */ 1588 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1589 } 1590 /* now is it in the mapping array of what we have accepted? */ 1591 if (compare_with_wrap(tsn, 1592 asoc->highest_tsn_inside_map, MAX_TSN)) { 1593 1594 /* Nope not in the valid range dump it */ 1595 #ifdef SCTP_DEBUG 1596 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1597 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n", 1598 (u_long)tsn, (u_long)asoc->my_rwnd, 1599 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)); 1600 1601 } 1602 #endif 1603 sctp_set_rwnd(stcb, asoc); 1604 if ((asoc->cnt_on_all_streams + 1605 asoc->cnt_on_reasm_queue + 1606 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1607 SCTP_STAT_INCR(sctps_datadropchklmt); 1608 } else { 1609 SCTP_STAT_INCR(sctps_datadroprwnd); 1610 } 1611 indx = *break_flag; 1612 *break_flag = 1; 1613 return (0); 1614 } 1615 } 1616 strmno = ntohs(ch->dp.stream_id); 1617 if (strmno >= asoc->streamincnt) { 1618 struct sctp_paramhdr *phdr; 1619 struct mbuf *mb; 1620 1621 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1622 1, M_DONTWAIT, 1, MT_DATA); 1623 if (mb != NULL) { 1624 /* add some space up front so prepend will work well */ 1625 mb->m_data += sizeof(struct sctp_chunkhdr); 1626 phdr = mtod(mb, struct sctp_paramhdr *); 1627 /* 1628 * Error causes are just param's and this one has 1629 * two back to back phdr, one with the error type 1630 * and size, the other with the streamid and a rsvd 1631 */ 1632 mb->m_pkthdr.len = mb->m_len = 1633 (sizeof(struct sctp_paramhdr) * 2); 1634 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1635 phdr->param_length = 1636 htons(sizeof(struct sctp_paramhdr) * 2); 1637 phdr++; 1638 /* We insert the stream in the type field */ 1639 phdr->param_type = ch->dp.stream_id; 1640 /* And set the length to 0 for the rsvd field */ 1641 phdr->param_length = 0; 1642 sctp_queue_op_err(stcb, mb); 1643 } 1644 SCTP_STAT_INCR(sctps_badsid); 1645 return (0); 1646 } 1647 /* 1648 * Before we continue lets validate that we are not being fooled by 1649 * an evil attacker. We can only have 4k chunks based on our TSN 1650 * spread allowed by the mapping array 512 * 8 bits, so there is no 1651 * way our stream sequence numbers could have wrapped. We of course 1652 * only validate the FIRST fragment so the bit must be set. 1653 */ 1654 strmseq = ntohs(ch->dp.stream_sequence); 1655 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) && 1656 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1657 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1658 strmseq, MAX_SEQ) || 1659 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1660 /* The incoming sseq is behind where we last delivered? */ 1661 #ifdef SCTP_DEBUG 1662 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1663 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1664 strmseq, 1665 asoc->strmin[strmno].last_sequence_delivered); 1666 } 1667 #endif 1668 /* 1669 * throw it in the stream so it gets cleaned up in 1670 * association destruction 1671 */ 1672 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1673 0, M_DONTWAIT, 1, MT_DATA); 1674 if (oper) { 1675 struct sctp_paramhdr *ph; 1676 uint32_t *ippp; 1677 1678 oper->m_len = sizeof(struct sctp_paramhdr) + 1679 (3 * sizeof(uint32_t)); 1680 ph = mtod(oper, struct sctp_paramhdr *); 1681 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1682 ph->param_length = htons(oper->m_len); 1683 ippp = (uint32_t *) (ph + 1); 1684 *ippp = htonl(0x20000001); 1685 ippp++; 1686 *ippp = tsn; 1687 ippp++; 1688 *ippp = ((strmno << 16) | strmseq); 1689 1690 } 1691 sctp_abort_an_association(stcb->sctp_ep, stcb, 1692 SCTP_PEER_FAULTY, oper); 1693 *abort_flag = 1; 1694 return (0); 1695 } 1696 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1697 if (last_chunk == 0) { 1698 dmbuf = sctp_m_copym(*m, 1699 (offset + sizeof(struct sctp_data_chunk)), 1700 the_len, M_DONTWAIT); 1701 #ifdef SCTP_MBUF_LOGGING 1702 { 1703 struct mbuf *mat; 1704 1705 mat = dmbuf; 1706 while (mat) { 1707 if (mat->m_flags & M_EXT) { 1708 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1709 } 1710 mat = mat->m_next; 1711 } 1712 } 1713 #endif 1714 } else { 1715 /* We can steal the last chunk */ 1716 dmbuf = *m; 1717 /* lop off the top part */ 1718 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1719 if (dmbuf->m_pkthdr.len > the_len) { 1720 /* Trim the end round bytes off too */ 1721 m_adj(dmbuf, -(dmbuf->m_pkthdr.len - the_len)); 1722 } 1723 } 1724 if (dmbuf == NULL) { 1725 SCTP_STAT_INCR(sctps_nomem); 1726 return (0); 1727 } 1728 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1729 asoc->fragmented_delivery_inprogress == 0 && 1730 TAILQ_EMPTY(&asoc->resetHead) && 1731 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) || 1732 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1733 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1734 /* Candidate for express delivery */ 1735 /* 1736 * Its not fragmented, No PD-API is up, Nothing in the 1737 * delivery queue, Its un-ordered OR ordered and the next to 1738 * deliver AND nothing else is stuck on the stream queue, 1739 * And there is room for it in the socket buffer. Lets just 1740 * stuff it up the buffer.... 1741 */ 1742 1743 /* It would be nice to avoid this copy if we could :< */ 1744 sctp_alloc_a_readq(stcb, control); 1745 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1746 ch->dp.protocol_id, 1747 stcb->asoc.context, 1748 strmno, strmseq, 1749 ch->ch.chunk_flags, 1750 dmbuf); 1751 if (control == NULL) { 1752 goto failed_express_del; 1753 } 1754 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1); 1755 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1756 /* for ordered, bump what we delivered */ 1757 asoc->strmin[strmno].last_sequence_delivered++; 1758 } 1759 SCTP_STAT_INCR(sctps_recvexpress); 1760 #ifdef SCTP_STR_LOGGING 1761 sctp_log_strm_del_alt(tsn, strmseq, 1762 SCTP_STR_LOG_FROM_EXPRS_DEL); 1763 #endif 1764 control = NULL; 1765 goto finish_express_del; 1766 } 1767 failed_express_del: 1768 /* If we reach here this is a new chunk */ 1769 chk = NULL; 1770 control = NULL; 1771 /* Express for fragmented delivery? */ 1772 if ((asoc->fragmented_delivery_inprogress) && 1773 (stcb->asoc.control_pdapi) && 1774 (asoc->str_of_pdapi == strmno) && 1775 (asoc->ssn_of_pdapi == strmseq) 1776 ) { 1777 control = stcb->asoc.control_pdapi; 1778 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1779 /* Can't be another first? */ 1780 goto failed_pdapi_express_del; 1781 } 1782 if (tsn == (control->sinfo_tsn + 1)) { 1783 /* Yep, we can add it on */ 1784 int end = 0; 1785 uint32_t cumack; 1786 1787 if (ch->ch.chunk_flags & SCTP_DATA_LAST_FRAG) { 1788 end = 1; 1789 } 1790 cumack = asoc->cumulative_tsn; 1791 if ((cumack + 1) == tsn) 1792 cumack = tsn; 1793 1794 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1795 tsn, 1796 &stcb->sctp_socket->so_rcv)) { 1797 printf("Append fails end:%d\n", end); 1798 goto failed_pdapi_express_del; 1799 } 1800 SCTP_STAT_INCR(sctps_recvexpressm); 1801 control->sinfo_tsn = tsn; 1802 asoc->tsn_last_delivered = tsn; 1803 asoc->fragment_flags = ch->ch.chunk_flags; 1804 asoc->tsn_of_pdapi_last_delivered = tsn; 1805 asoc->last_flags_delivered = ch->ch.chunk_flags; 1806 asoc->last_strm_seq_delivered = strmseq; 1807 asoc->last_strm_no_delivered = strmno; 1808 asoc->tsn_last_delivered = tsn; 1809 1810 if (end) { 1811 /* clean up the flags and such */ 1812 asoc->fragmented_delivery_inprogress = 0; 1813 asoc->strmin[strmno].last_sequence_delivered++; 1814 stcb->asoc.control_pdapi = NULL; 1815 } 1816 control = NULL; 1817 goto finish_express_del; 1818 } 1819 } 1820 failed_pdapi_express_del: 1821 control = NULL; 1822 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1823 sctp_alloc_a_chunk(stcb, chk); 1824 if (chk == NULL) { 1825 /* No memory so we drop the chunk */ 1826 SCTP_STAT_INCR(sctps_nomem); 1827 if (last_chunk == 0) { 1828 /* we copied it, free the copy */ 1829 sctp_m_freem(dmbuf); 1830 } 1831 return (0); 1832 } 1833 chk->rec.data.TSN_seq = tsn; 1834 chk->no_fr_allowed = 0; 1835 chk->rec.data.stream_seq = strmseq; 1836 chk->rec.data.stream_number = strmno; 1837 chk->rec.data.payloadtype = ch->dp.protocol_id; 1838 chk->rec.data.context = stcb->asoc.context; 1839 chk->rec.data.doing_fast_retransmit = 0; 1840 chk->rec.data.rcv_flags = ch->ch.chunk_flags; 1841 chk->asoc = asoc; 1842 chk->send_size = the_len; 1843 chk->whoTo = net; 1844 atomic_add_int(&net->ref_count, 1); 1845 chk->data = dmbuf; 1846 } else { 1847 sctp_alloc_a_readq(stcb, control); 1848 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1849 ch->dp.protocol_id, 1850 stcb->asoc.context, 1851 strmno, strmseq, 1852 ch->ch.chunk_flags, 1853 dmbuf); 1854 if (control == NULL) { 1855 /* No memory so we drop the chunk */ 1856 SCTP_STAT_INCR(sctps_nomem); 1857 if (last_chunk == 0) { 1858 /* we copied it, free the copy */ 1859 sctp_m_freem(dmbuf); 1860 } 1861 return (0); 1862 } 1863 control->length = the_len; 1864 } 1865 1866 /* Mark it as received */ 1867 /* Now queue it where it belongs */ 1868 if (control != NULL) { 1869 /* First a sanity check */ 1870 if (asoc->fragmented_delivery_inprogress) { 1871 /* 1872 * Ok, we have a fragmented delivery in progress if 1873 * this chunk is next to deliver OR belongs in our 1874 * view to the reassembly, the peer is evil or 1875 * broken. 1876 */ 1877 uint32_t estimate_tsn; 1878 1879 estimate_tsn = asoc->tsn_last_delivered + 1; 1880 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1881 (estimate_tsn == control->sinfo_tsn)) { 1882 /* Evil/Broke peer */ 1883 sctp_m_freem(control->data); 1884 control->data = NULL; 1885 sctp_free_remote_addr(control->whoFrom); 1886 sctp_free_a_readq(stcb, control); 1887 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1888 0, M_DONTWAIT, 1, MT_DATA); 1889 if (oper) { 1890 struct sctp_paramhdr *ph; 1891 uint32_t *ippp; 1892 1893 oper->m_len = 1894 sizeof(struct sctp_paramhdr) + 1895 (3 * sizeof(uint32_t)); 1896 ph = mtod(oper, struct sctp_paramhdr *); 1897 ph->param_type = 1898 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1899 ph->param_length = htons(oper->m_len); 1900 ippp = (uint32_t *) (ph + 1); 1901 *ippp = htonl(0x20000002); 1902 ippp++; 1903 *ippp = tsn; 1904 ippp++; 1905 *ippp = ((strmno << 16) | strmseq); 1906 } 1907 sctp_abort_an_association(stcb->sctp_ep, stcb, 1908 SCTP_PEER_FAULTY, oper); 1909 1910 *abort_flag = 1; 1911 return (0); 1912 } else { 1913 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1914 sctp_m_freem(control->data); 1915 control->data = NULL; 1916 sctp_free_remote_addr(control->whoFrom); 1917 sctp_free_a_readq(stcb, control); 1918 1919 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1920 0, M_DONTWAIT, 1, MT_DATA); 1921 if (oper) { 1922 struct sctp_paramhdr *ph; 1923 uint32_t *ippp; 1924 1925 oper->m_len = 1926 sizeof(struct sctp_paramhdr) + 1927 (3 * sizeof(uint32_t)); 1928 ph = mtod(oper, 1929 struct sctp_paramhdr *); 1930 ph->param_type = 1931 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1932 ph->param_length = 1933 htons(oper->m_len); 1934 ippp = (uint32_t *) (ph + 1); 1935 *ippp = htonl(0x20000003); 1936 ippp++; 1937 *ippp = tsn; 1938 ippp++; 1939 *ippp = ((strmno << 16) | strmseq); 1940 } 1941 sctp_abort_an_association(stcb->sctp_ep, 1942 stcb, SCTP_PEER_FAULTY, oper); 1943 1944 *abort_flag = 1; 1945 return (0); 1946 } 1947 } 1948 } else { 1949 /* No PDAPI running */ 1950 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1951 /* 1952 * Reassembly queue is NOT empty validate 1953 * that this tsn does not need to be in 1954 * reasembly queue. If it does then our peer 1955 * is broken or evil. 1956 */ 1957 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1958 sctp_m_freem(control->data); 1959 control->data = NULL; 1960 sctp_free_remote_addr(control->whoFrom); 1961 sctp_free_a_readq(stcb, control); 1962 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1963 0, M_DONTWAIT, 1, MT_DATA); 1964 if (oper) { 1965 struct sctp_paramhdr *ph; 1966 uint32_t *ippp; 1967 1968 oper->m_len = 1969 sizeof(struct sctp_paramhdr) + 1970 (3 * sizeof(uint32_t)); 1971 ph = mtod(oper, 1972 struct sctp_paramhdr *); 1973 ph->param_type = 1974 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1975 ph->param_length = 1976 htons(oper->m_len); 1977 ippp = (uint32_t *) (ph + 1); 1978 *ippp = htonl(0x20000004); 1979 ippp++; 1980 *ippp = tsn; 1981 ippp++; 1982 *ippp = ((strmno << 16) | strmseq); 1983 } 1984 sctp_abort_an_association(stcb->sctp_ep, 1985 stcb, SCTP_PEER_FAULTY, oper); 1986 1987 *abort_flag = 1; 1988 return (0); 1989 } 1990 } 1991 } 1992 /* ok, if we reach here we have passed the sanity checks */ 1993 if (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) { 1994 /* queue directly into socket buffer */ 1995 sctp_add_to_readq(stcb->sctp_ep, stcb, 1996 control, 1997 &stcb->sctp_socket->so_rcv, 1); 1998 } else { 1999 /* 2000 * Special check for when streams are resetting. We 2001 * could be more smart about this and check the 2002 * actual stream to see if it is not being reset.. 2003 * that way we would not create a HOLB when amongst 2004 * streams being reset and those not being reset. 2005 * 2006 * We take complete messages that have a stream reset 2007 * intervening (aka the TSN is after where our 2008 * cum-ack needs to be) off and put them on a 2009 * pending_reply_queue. The reassembly ones we do 2010 * not have to worry about since they are all sorted 2011 * and proceessed by TSN order. It is only the 2012 * singletons I must worry about. 2013 */ 2014 struct sctp_stream_reset_list *liste; 2015 2016 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2017 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)) || 2018 (tsn == ntohl(liste->tsn))) 2019 ) { 2020 /* 2021 * yep its past where we need to reset... go 2022 * ahead and queue it. 2023 */ 2024 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2025 /* first one on */ 2026 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2027 } else { 2028 struct sctp_queued_to_read *ctlOn; 2029 unsigned char inserted = 0; 2030 2031 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2032 while (ctlOn) { 2033 if (compare_with_wrap(control->sinfo_tsn, 2034 ctlOn->sinfo_tsn, MAX_TSN)) { 2035 ctlOn = TAILQ_NEXT(ctlOn, next); 2036 } else { 2037 /* found it */ 2038 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2039 inserted = 1; 2040 break; 2041 } 2042 } 2043 if (inserted == 0) { 2044 /* 2045 * must be put at end, use 2046 * prevP (all setup from 2047 * loop) to setup nextP. 2048 */ 2049 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2050 } 2051 } 2052 } else { 2053 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2054 if (*abort_flag) { 2055 return (0); 2056 } 2057 } 2058 } 2059 } else { 2060 /* Into the re-assembly queue */ 2061 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2062 if (*abort_flag) { 2063 return (0); 2064 } 2065 } 2066 finish_express_del: 2067 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2068 /* we have a new high score */ 2069 asoc->highest_tsn_inside_map = tsn; 2070 #ifdef SCTP_MAP_LOGGING 2071 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2072 #endif 2073 } 2074 if (tsn == (asoc->cumulative_tsn + 1)) { 2075 /* Update cum-ack */ 2076 asoc->cumulative_tsn = tsn; 2077 } 2078 if (last_chunk) { 2079 *m = NULL; 2080 } 2081 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) { 2082 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2083 } else { 2084 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2085 } 2086 SCTP_STAT_INCR(sctps_recvdata); 2087 /* Set it present please */ 2088 #ifdef SCTP_STR_LOGGING 2089 sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN); 2090 #endif 2091 #ifdef SCTP_MAP_LOGGING 2092 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2093 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2094 #endif 2095 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2096 return (1); 2097 } 2098 2099 int8_t sctp_map_lookup_tab[256] = { 2100 -1, 0, -1, 1, -1, 0, -1, 2, 2101 -1, 0, -1, 1, -1, 0, -1, 3, 2102 -1, 0, -1, 1, -1, 0, -1, 2, 2103 -1, 0, -1, 1, -1, 0, -1, 4, 2104 -1, 0, -1, 1, -1, 0, -1, 2, 2105 -1, 0, -1, 1, -1, 0, -1, 3, 2106 -1, 0, -1, 1, -1, 0, -1, 2, 2107 -1, 0, -1, 1, -1, 0, -1, 5, 2108 -1, 0, -1, 1, -1, 0, -1, 2, 2109 -1, 0, -1, 1, -1, 0, -1, 3, 2110 -1, 0, -1, 1, -1, 0, -1, 2, 2111 -1, 0, -1, 1, -1, 0, -1, 4, 2112 -1, 0, -1, 1, -1, 0, -1, 2, 2113 -1, 0, -1, 1, -1, 0, -1, 3, 2114 -1, 0, -1, 1, -1, 0, -1, 2, 2115 -1, 0, -1, 1, -1, 0, -1, 6, 2116 -1, 0, -1, 1, -1, 0, -1, 2, 2117 -1, 0, -1, 1, -1, 0, -1, 3, 2118 -1, 0, -1, 1, -1, 0, -1, 2, 2119 -1, 0, -1, 1, -1, 0, -1, 4, 2120 -1, 0, -1, 1, -1, 0, -1, 2, 2121 -1, 0, -1, 1, -1, 0, -1, 3, 2122 -1, 0, -1, 1, -1, 0, -1, 2, 2123 -1, 0, -1, 1, -1, 0, -1, 5, 2124 -1, 0, -1, 1, -1, 0, -1, 2, 2125 -1, 0, -1, 1, -1, 0, -1, 3, 2126 -1, 0, -1, 1, -1, 0, -1, 2, 2127 -1, 0, -1, 1, -1, 0, -1, 4, 2128 -1, 0, -1, 1, -1, 0, -1, 2, 2129 -1, 0, -1, 1, -1, 0, -1, 3, 2130 -1, 0, -1, 1, -1, 0, -1, 2, 2131 -1, 0, -1, 1, -1, 0, -1, 7, 2132 }; 2133 2134 2135 void 2136 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2137 { 2138 /* 2139 * Now we also need to check the mapping array in a couple of ways. 2140 * 1) Did we move the cum-ack point? 2141 */ 2142 struct sctp_association *asoc; 2143 int i, at; 2144 int all_ones; 2145 int slide_from, slide_end, lgap, distance; 2146 2147 #ifdef SCTP_MAP_LOGGING 2148 uint32_t old_cumack, old_base, old_highest; 2149 unsigned char aux_array[64]; 2150 2151 #endif 2152 struct sctp_stream_reset_list *liste; 2153 2154 asoc = &stcb->asoc; 2155 at = 0; 2156 2157 #ifdef SCTP_MAP_LOGGING 2158 old_cumack = asoc->cumulative_tsn; 2159 old_base = asoc->mapping_array_base_tsn; 2160 old_highest = asoc->highest_tsn_inside_map; 2161 if (asoc->mapping_array_size < 64) 2162 memcpy(aux_array, asoc->mapping_array, 2163 asoc->mapping_array_size); 2164 else 2165 memcpy(aux_array, asoc->mapping_array, 64); 2166 #endif 2167 2168 /* 2169 * We could probably improve this a small bit by calculating the 2170 * offset of the current cum-ack as the starting point. 2171 */ 2172 all_ones = 1; 2173 at = 0; 2174 for (i = 0; i < stcb->asoc.mapping_array_size; i++) { 2175 if (asoc->mapping_array[i] == 0xff) { 2176 at += 8; 2177 } else { 2178 /* there is a 0 bit */ 2179 all_ones = 0; 2180 at += sctp_map_lookup_tab[asoc->mapping_array[i]]; 2181 break; 2182 } 2183 } 2184 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + at; 2185 /* at is one off, since in the table a embedded -1 is present */ 2186 at++; 2187 2188 if (compare_with_wrap(asoc->cumulative_tsn, 2189 asoc->highest_tsn_inside_map, 2190 MAX_TSN)) { 2191 #ifdef INVARIENTS 2192 panic("huh, cumack greater than high-tsn in map"); 2193 #else 2194 printf("huh, cumack greater than high-tsn in map - should panic?\n"); 2195 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2196 #endif 2197 } 2198 if (all_ones || 2199 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) { 2200 /* The complete array was completed by a single FR */ 2201 /* higest becomes the cum-ack */ 2202 int clr; 2203 2204 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2205 /* clear the array */ 2206 if (all_ones) 2207 clr = asoc->mapping_array_size; 2208 else { 2209 clr = (at >> 3) + 1; 2210 /* 2211 * this should be the allones case but just in case 2212 * :> 2213 */ 2214 if (clr > asoc->mapping_array_size) 2215 clr = asoc->mapping_array_size; 2216 } 2217 memset(asoc->mapping_array, 0, clr); 2218 /* base becomes one ahead of the cum-ack */ 2219 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2220 #ifdef SCTP_MAP_LOGGING 2221 sctp_log_map(old_base, old_cumack, old_highest, 2222 SCTP_MAP_PREPARE_SLIDE); 2223 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2224 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2225 #endif 2226 } else if (at >= 8) { 2227 /* we can slide the mapping array down */ 2228 /* Calculate the new byte postion we can move down */ 2229 slide_from = at >> 3; 2230 /* 2231 * now calculate the ceiling of the move using our highest 2232 * TSN value 2233 */ 2234 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2235 lgap = asoc->highest_tsn_inside_map - 2236 asoc->mapping_array_base_tsn; 2237 } else { 2238 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2239 asoc->highest_tsn_inside_map + 1; 2240 } 2241 slide_end = lgap >> 3; 2242 if (slide_end < slide_from) { 2243 panic("impossible slide"); 2244 } 2245 distance = (slide_end - slide_from) + 1; 2246 #ifdef SCTP_MAP_LOGGING 2247 sctp_log_map(old_base, old_cumack, old_highest, 2248 SCTP_MAP_PREPARE_SLIDE); 2249 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2250 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2251 #endif 2252 if (distance + slide_from > asoc->mapping_array_size || 2253 distance < 0) { 2254 /* 2255 * Here we do NOT slide forward the array so that 2256 * hopefully when more data comes in to fill it up 2257 * we will be able to slide it forward. Really I 2258 * don't think this should happen :-0 2259 */ 2260 2261 #ifdef SCTP_MAP_LOGGING 2262 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2263 (uint32_t) asoc->mapping_array_size, 2264 SCTP_MAP_SLIDE_NONE); 2265 #endif 2266 } else { 2267 int ii; 2268 2269 for (ii = 0; ii < distance; ii++) { 2270 asoc->mapping_array[ii] = 2271 asoc->mapping_array[slide_from + ii]; 2272 } 2273 for (ii = distance; ii <= slide_end; ii++) { 2274 asoc->mapping_array[ii] = 0; 2275 } 2276 asoc->mapping_array_base_tsn += (slide_from << 3); 2277 #ifdef SCTP_MAP_LOGGING 2278 sctp_log_map(asoc->mapping_array_base_tsn, 2279 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2280 SCTP_MAP_SLIDE_RESULT); 2281 #endif 2282 } 2283 } 2284 /* check the special flag for stream resets */ 2285 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2286 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2287 (asoc->cumulative_tsn == liste->tsn)) 2288 ) { 2289 /* 2290 * we have finished working through the backlogged TSN's now 2291 * time to reset streams. 1: call reset function. 2: free 2292 * pending_reply space 3: distribute any chunks in 2293 * pending_reply_queue. 2294 */ 2295 struct sctp_queued_to_read *ctl; 2296 2297 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2298 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2299 SCTP_FREE(liste); 2300 liste = TAILQ_FIRST(&asoc->resetHead); 2301 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2302 if (ctl && (liste == NULL)) { 2303 /* All can be removed */ 2304 while (ctl) { 2305 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2306 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2307 if (*abort_flag) { 2308 return; 2309 } 2310 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2311 } 2312 } else if (ctl) { 2313 /* more than one in queue */ 2314 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2315 /* 2316 * if ctl->sinfo_tsn is <= liste->tsn we can 2317 * process it which is the NOT of 2318 * ctl->sinfo_tsn > liste->tsn 2319 */ 2320 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2321 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2322 if (*abort_flag) { 2323 return; 2324 } 2325 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2326 } 2327 } 2328 /* 2329 * Now service re-assembly to pick up anything that has been 2330 * held on reassembly queue? 2331 */ 2332 sctp_deliver_reasm_check(stcb, asoc); 2333 } 2334 /* 2335 * Now we need to see if we need to queue a sack or just start the 2336 * timer (if allowed). 2337 */ 2338 if (ok_to_sack) { 2339 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2340 /* 2341 * Ok special case, in SHUTDOWN-SENT case. here we 2342 * maker sure SACK timer is off and instead send a 2343 * SHUTDOWN and a SACK 2344 */ 2345 if (callout_pending(&stcb->asoc.dack_timer.timer)) { 2346 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2347 stcb->sctp_ep, stcb, NULL); 2348 } 2349 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2350 sctp_send_sack(stcb); 2351 } else { 2352 int is_a_gap; 2353 2354 /* is there a gap now ? */ 2355 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2356 stcb->asoc.cumulative_tsn, MAX_TSN); 2357 2358 /* 2359 * CMT DAC algorithm: increase number of packets 2360 * received since last ack 2361 */ 2362 stcb->asoc.cmt_dac_pkts_rcvd++; 2363 2364 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a 2365 * sack */ 2366 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2367 * longer is one */ 2368 (stcb->asoc.numduptsns) || /* we have dup's */ 2369 (is_a_gap) || /* is still a gap */ 2370 (stcb->asoc.delayed_ack == 0) || 2371 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second 2372 * packet */ 2373 ) { 2374 2375 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) && 2376 (stcb->asoc.first_ack_sent == 1) && 2377 (stcb->asoc.numduptsns == 0) && 2378 (stcb->asoc.delayed_ack) && 2379 (!callout_pending(&stcb->asoc.dack_timer.timer))) { 2380 2381 /* 2382 * CMT DAC algorithm: With CMT, 2383 * delay acks even in the face of 2384 * 2385 * reordering. Therefore, if acks that 2386 * do not have to be sent because of 2387 * the above reasons, will be 2388 * delayed. That is, acks that would 2389 * have been sent due to gap reports 2390 * will be delayed with DAC. Start 2391 * the delayed ack timer. 2392 */ 2393 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2394 stcb->sctp_ep, stcb, NULL); 2395 } else { 2396 /* 2397 * Ok we must build a SACK since the 2398 * timer is pending, we got our 2399 * first packet OR there are gaps or 2400 * duplicates. 2401 */ 2402 stcb->asoc.first_ack_sent = 1; 2403 2404 sctp_send_sack(stcb); 2405 /* The sending will stop the timer */ 2406 } 2407 } else { 2408 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2409 stcb->sctp_ep, stcb, NULL); 2410 } 2411 } 2412 } 2413 } 2414 2415 void 2416 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2417 { 2418 struct sctp_tmit_chunk *chk; 2419 uint32_t tsize; 2420 uint16_t nxt_todel; 2421 2422 if (asoc->fragmented_delivery_inprogress) { 2423 sctp_service_reassembly(stcb, asoc); 2424 } 2425 /* Can we proceed further, i.e. the PD-API is complete */ 2426 if (asoc->fragmented_delivery_inprogress) { 2427 /* no */ 2428 return; 2429 } 2430 /* 2431 * Now is there some other chunk I can deliver from the reassembly 2432 * queue. 2433 */ 2434 chk = TAILQ_FIRST(&asoc->reasmqueue); 2435 if (chk == NULL) { 2436 asoc->size_on_reasm_queue = 0; 2437 asoc->cnt_on_reasm_queue = 0; 2438 return; 2439 } 2440 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2441 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2442 ((nxt_todel == chk->rec.data.stream_seq) || 2443 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2444 /* 2445 * Yep the first one is here. We setup to start reception, 2446 * by backing down the TSN just in case we can't deliver. 2447 */ 2448 2449 /* 2450 * Before we start though either all of the message should 2451 * be here or 1/4 the socket buffer max or nothing on the 2452 * delivery queue and something can be delivered. 2453 */ 2454 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2455 (tsize > stcb->sctp_ep->partial_delivery_point))) { 2456 asoc->fragmented_delivery_inprogress = 1; 2457 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2458 asoc->str_of_pdapi = chk->rec.data.stream_number; 2459 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2460 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2461 asoc->fragment_flags = chk->rec.data.rcv_flags; 2462 sctp_service_reassembly(stcb, asoc); 2463 } 2464 } 2465 } 2466 2467 extern int sctp_strict_data_order; 2468 2469 int 2470 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2471 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2472 struct sctp_nets *net, uint32_t * high_tsn) 2473 { 2474 struct sctp_data_chunk *ch, chunk_buf; 2475 struct sctp_association *asoc; 2476 int num_chunks = 0; /* number of control chunks processed */ 2477 int stop_proc = 0; 2478 int chk_length, break_flag, last_chunk; 2479 int abort_flag = 0, was_a_gap = 0; 2480 struct mbuf *m; 2481 2482 /* set the rwnd */ 2483 sctp_set_rwnd(stcb, &stcb->asoc); 2484 2485 m = *mm; 2486 SCTP_TCB_LOCK_ASSERT(stcb); 2487 asoc = &stcb->asoc; 2488 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 2489 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 2490 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 2491 /* 2492 * wait a minute, this guy is gone, there is no longer a 2493 * receiver. Send peer an ABORT! 2494 */ 2495 struct mbuf *op_err; 2496 2497 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2498 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 2499 return (2); 2500 } 2501 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2502 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2503 /* there was a gap before this data was processed */ 2504 was_a_gap = 1; 2505 } 2506 /* 2507 * setup where we got the last DATA packet from for any SACK that 2508 * may need to go out. Don't bump the net. This is done ONLY when a 2509 * chunk is assigned. 2510 */ 2511 asoc->last_data_chunk_from = net; 2512 2513 /* 2514 * Now before we proceed we must figure out if this is a wasted 2515 * cluster... i.e. it is a small packet sent in and yet the driver 2516 * underneath allocated a full cluster for it. If so we must copy it 2517 * to a smaller mbuf and free up the cluster mbuf. This will help 2518 * with cluster starvation. 2519 */ 2520 if (m->m_len < (long)MHLEN && m->m_next == NULL) { 2521 /* we only handle mbufs that are singletons.. not chains */ 2522 m = sctp_get_mbuf_for_msg(m->m_len, 1, M_DONTWAIT, 1, MT_DATA); 2523 if (m) { 2524 /* ok lets see if we can copy the data up */ 2525 caddr_t *from, *to; 2526 2527 if ((*mm)->m_flags & M_PKTHDR) { 2528 /* got to copy the header first */ 2529 M_MOVE_PKTHDR(m, (*mm)); 2530 } 2531 /* get the pointers and copy */ 2532 to = mtod(m, caddr_t *); 2533 from = mtod((*mm), caddr_t *); 2534 memcpy(to, from, (*mm)->m_len); 2535 /* copy the length and free up the old */ 2536 m->m_len = (*mm)->m_len; 2537 sctp_m_freem(*mm); 2538 /* sucess, back copy */ 2539 *mm = m; 2540 } else { 2541 /* We are in trouble in the mbuf world .. yikes */ 2542 m = *mm; 2543 } 2544 } 2545 /* get pointer to the first chunk header */ 2546 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2547 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2548 if (ch == NULL) { 2549 return (1); 2550 } 2551 /* 2552 * process all DATA chunks... 2553 */ 2554 *high_tsn = asoc->cumulative_tsn; 2555 break_flag = 0; 2556 while (stop_proc == 0) { 2557 /* validate chunk length */ 2558 chk_length = ntohs(ch->ch.chunk_length); 2559 if (length - *offset < chk_length) { 2560 /* all done, mutulated chunk */ 2561 stop_proc = 1; 2562 break; 2563 } 2564 if (ch->ch.chunk_type == SCTP_DATA) { 2565 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2566 /* 2567 * Need to send an abort since we had a 2568 * invalid data chunk. 2569 */ 2570 struct mbuf *op_err; 2571 2572 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 2573 0, M_DONTWAIT, 1, MT_DATA); 2574 2575 if (op_err) { 2576 struct sctp_paramhdr *ph; 2577 uint32_t *ippp; 2578 2579 op_err->m_len = sizeof(struct sctp_paramhdr) + 2580 (2 * sizeof(uint32_t)); 2581 ph = mtod(op_err, struct sctp_paramhdr *); 2582 ph->param_type = 2583 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2584 ph->param_length = htons(op_err->m_len); 2585 ippp = (uint32_t *) (ph + 1); 2586 *ippp = htonl(0x30000001); 2587 ippp++; 2588 *ippp = asoc->cumulative_tsn; 2589 2590 } 2591 sctp_abort_association(inp, stcb, m, iphlen, sh, 2592 op_err); 2593 return (2); 2594 } 2595 #ifdef SCTP_AUDITING_ENABLED 2596 sctp_audit_log(0xB1, 0); 2597 #endif 2598 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2599 last_chunk = 1; 2600 } else { 2601 last_chunk = 0; 2602 } 2603 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2604 chk_length, net, high_tsn, &abort_flag, &break_flag, 2605 last_chunk)) { 2606 num_chunks++; 2607 } 2608 if (abort_flag) 2609 return (2); 2610 2611 if (break_flag) { 2612 /* 2613 * Set because of out of rwnd space and no 2614 * drop rep space left. 2615 */ 2616 stop_proc = 1; 2617 break; 2618 } 2619 } else { 2620 /* not a data chunk in the data region */ 2621 switch (ch->ch.chunk_type) { 2622 case SCTP_INITIATION: 2623 case SCTP_INITIATION_ACK: 2624 case SCTP_SELECTIVE_ACK: 2625 case SCTP_HEARTBEAT_REQUEST: 2626 case SCTP_HEARTBEAT_ACK: 2627 case SCTP_ABORT_ASSOCIATION: 2628 case SCTP_SHUTDOWN: 2629 case SCTP_SHUTDOWN_ACK: 2630 case SCTP_OPERATION_ERROR: 2631 case SCTP_COOKIE_ECHO: 2632 case SCTP_COOKIE_ACK: 2633 case SCTP_ECN_ECHO: 2634 case SCTP_ECN_CWR: 2635 case SCTP_SHUTDOWN_COMPLETE: 2636 case SCTP_AUTHENTICATION: 2637 case SCTP_ASCONF_ACK: 2638 case SCTP_PACKET_DROPPED: 2639 case SCTP_STREAM_RESET: 2640 case SCTP_FORWARD_CUM_TSN: 2641 case SCTP_ASCONF: 2642 /* 2643 * Now, what do we do with KNOWN chunks that 2644 * are NOT in the right place? 2645 * 2646 * For now, I do nothing but ignore them. We 2647 * may later want to add sysctl stuff to 2648 * switch out and do either an ABORT() or 2649 * possibly process them. 2650 */ 2651 if (sctp_strict_data_order) { 2652 struct mbuf *op_err; 2653 2654 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2655 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err); 2656 return (2); 2657 } 2658 break; 2659 default: 2660 /* unknown chunk type, use bit rules */ 2661 if (ch->ch.chunk_type & 0x40) { 2662 /* Add a error report to the queue */ 2663 struct mbuf *mm; 2664 struct sctp_paramhdr *phd; 2665 2666 mm = sctp_get_mbuf_for_msg(sizeof(*phd), 1, M_DONTWAIT, 1, MT_DATA); 2667 if (mm) { 2668 phd = mtod(mm, struct sctp_paramhdr *); 2669 /* 2670 * We cheat and use param 2671 * type since we did not 2672 * bother to define a error 2673 * cause struct. They are 2674 * the same basic format 2675 * with different names. 2676 */ 2677 phd->param_type = 2678 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2679 phd->param_length = 2680 htons(chk_length + sizeof(*phd)); 2681 mm->m_len = sizeof(*phd); 2682 mm->m_next = sctp_m_copym(m, *offset, 2683 SCTP_SIZE32(chk_length), 2684 M_DONTWAIT); 2685 if (mm->m_next) { 2686 mm->m_pkthdr.len = 2687 SCTP_SIZE32(chk_length) + 2688 sizeof(*phd); 2689 sctp_queue_op_err(stcb, mm); 2690 } else { 2691 sctp_m_freem(mm); 2692 } 2693 } 2694 } 2695 if ((ch->ch.chunk_type & 0x80) == 0) { 2696 /* discard the rest of this packet */ 2697 stop_proc = 1; 2698 } /* else skip this bad chunk and 2699 * continue... */ 2700 break; 2701 }; /* switch of chunk type */ 2702 } 2703 *offset += SCTP_SIZE32(chk_length); 2704 if ((*offset >= length) || stop_proc) { 2705 /* no more data left in the mbuf chain */ 2706 stop_proc = 1; 2707 continue; 2708 } 2709 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2710 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2711 if (ch == NULL) { 2712 *offset = length; 2713 stop_proc = 1; 2714 break; 2715 2716 } 2717 } /* while */ 2718 if (break_flag) { 2719 /* 2720 * we need to report rwnd overrun drops. 2721 */ 2722 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2723 } 2724 if (num_chunks) { 2725 /* 2726 * Did we get data, if so update the time for auto-close and 2727 * give peer credit for being alive. 2728 */ 2729 SCTP_STAT_INCR(sctps_recvpktwithdata); 2730 stcb->asoc.overall_error_count = 0; 2731 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2732 } 2733 /* now service all of the reassm queue if needed */ 2734 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2735 sctp_service_queues(stcb, asoc); 2736 2737 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2738 /* 2739 * Assure that we ack right away by making sure that a d-ack 2740 * timer is running. So the sack_check will send a sack. 2741 */ 2742 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, 2743 net); 2744 } 2745 /* Start a sack timer or QUEUE a SACK for sending */ 2746 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2747 (stcb->asoc.first_ack_sent)) { 2748 /* Everything is in order */ 2749 if (stcb->asoc.mapping_array[0] == 0xff) { 2750 /* need to do the slide */ 2751 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2752 } else { 2753 if (callout_pending(&stcb->asoc.dack_timer.timer)) { 2754 stcb->asoc.first_ack_sent = 1; 2755 callout_stop(&stcb->asoc.dack_timer.timer); 2756 sctp_send_sack(stcb); 2757 } else { 2758 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2759 stcb->sctp_ep, stcb, NULL); 2760 } 2761 } 2762 } else { 2763 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2764 } 2765 if (abort_flag) 2766 return (2); 2767 2768 return (0); 2769 } 2770 2771 static void 2772 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc, 2773 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2774 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, int num_seg, int *ecn_seg_sums) 2775 { 2776 /************************************************/ 2777 /* process fragments and update sendqueue */ 2778 /************************************************/ 2779 struct sctp_sack *sack; 2780 struct sctp_gap_ack_block *frag; 2781 struct sctp_tmit_chunk *tp1; 2782 int i; 2783 unsigned int j; 2784 2785 #ifdef SCTP_FR_LOGGING 2786 int num_frs = 0; 2787 2788 #endif 2789 uint16_t frag_strt, frag_end, primary_flag_set; 2790 u_long last_frag_high; 2791 2792 /* 2793 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2794 */ 2795 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2796 primary_flag_set = 1; 2797 } else { 2798 primary_flag_set = 0; 2799 } 2800 2801 sack = &ch->sack; 2802 frag = (struct sctp_gap_ack_block *)((caddr_t)sack + 2803 sizeof(struct sctp_sack)); 2804 tp1 = NULL; 2805 last_frag_high = 0; 2806 for (i = 0; i < num_seg; i++) { 2807 frag_strt = ntohs(frag->start); 2808 frag_end = ntohs(frag->end); 2809 /* some sanity checks on the fargment offsets */ 2810 if (frag_strt > frag_end) { 2811 /* this one is malformed, skip */ 2812 frag++; 2813 continue; 2814 } 2815 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2816 MAX_TSN)) 2817 *biggest_tsn_acked = frag_end + last_tsn; 2818 2819 /* mark acked dgs and find out the highestTSN being acked */ 2820 if (tp1 == NULL) { 2821 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2822 2823 /* save the locations of the last frags */ 2824 last_frag_high = frag_end + last_tsn; 2825 } else { 2826 /* 2827 * now lets see if we need to reset the queue due to 2828 * a out-of-order SACK fragment 2829 */ 2830 if (compare_with_wrap(frag_strt + last_tsn, 2831 last_frag_high, MAX_TSN)) { 2832 /* 2833 * if the new frag starts after the last TSN 2834 * frag covered, we are ok and this one is 2835 * beyond the last one 2836 */ 2837 ; 2838 } else { 2839 /* 2840 * ok, they have reset us, so we need to 2841 * reset the queue this will cause extra 2842 * hunting but hey, they chose the 2843 * performance hit when they failed to order 2844 * there gaps.. 2845 */ 2846 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2847 } 2848 last_frag_high = frag_end + last_tsn; 2849 } 2850 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2851 while (tp1) { 2852 #ifdef SCTP_FR_LOGGING 2853 if (tp1->rec.data.doing_fast_retransmit) 2854 num_frs++; 2855 #endif 2856 2857 /* 2858 * CMT: CUCv2 algorithm. For each TSN being 2859 * processed from the sent queue, track the 2860 * next expected pseudo-cumack, or 2861 * rtx_pseudo_cumack, if required. Separate 2862 * cumack trackers for first transmissions, 2863 * and retransmissions. 2864 */ 2865 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2866 (tp1->snd_count == 1)) { 2867 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2868 tp1->whoTo->find_pseudo_cumack = 0; 2869 } 2870 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2871 (tp1->snd_count > 1)) { 2872 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2873 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2874 } 2875 if (tp1->rec.data.TSN_seq == j) { 2876 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2877 /* 2878 * must be held until 2879 * cum-ack passes 2880 */ 2881 /* 2882 * ECN Nonce: Add the nonce 2883 * value to the sender's 2884 * nonce sum 2885 */ 2886 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 2887 /* 2888 * If it is less 2889 * than ACKED, it is 2890 * now no-longer in 2891 * flight. Higher 2892 * values may 2893 * already be set 2894 * via previous Gap 2895 * Ack Blocks... 2896 * i.e. ACKED or 2897 * MARKED. 2898 */ 2899 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2900 *biggest_newly_acked_tsn, MAX_TSN)) { 2901 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2902 } 2903 /* 2904 * CMT: SFR algo 2905 * (and HTNA) - set 2906 * saw_newack to 1 2907 * for dest being 2908 * newly acked. 2909 * update 2910 * this_sack_highest_ 2911 * n ewack if 2912 * appropriate. 2913 */ 2914 if (tp1->rec.data.chunk_was_revoked == 0) 2915 tp1->whoTo->saw_newack = 1; 2916 2917 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2918 tp1->whoTo->this_sack_highest_newack, 2919 MAX_TSN)) { 2920 tp1->whoTo->this_sack_highest_newack = 2921 tp1->rec.data.TSN_seq; 2922 } 2923 /* 2924 * CMT DAC algo: 2925 * also update 2926 * this_sack_lowest_n 2927 * e wack 2928 */ 2929 if (*this_sack_lowest_newack == 0) { 2930 #ifdef SCTP_SACK_LOGGING 2931 sctp_log_sack(*this_sack_lowest_newack, 2932 last_tsn, 2933 tp1->rec.data.TSN_seq, 2934 0, 2935 0, 2936 SCTP_LOG_TSN_ACKED); 2937 #endif 2938 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2939 } 2940 /* 2941 * CMT: CUCv2 2942 * algorithm. If 2943 * (rtx-)pseudo-cumac 2944 * k for corresp 2945 * dest is being 2946 * acked, then we 2947 * have a new 2948 * (rtx-)pseudo-cumac 2949 * k . Set 2950 * new_(rtx_)pseudo_c 2951 * u mack to TRUE so 2952 * that the cwnd for 2953 * this dest can be 2954 * updated. Also 2955 * trigger search 2956 * for the next 2957 * expected 2958 * (rtx-)pseudo-cumac 2959 * k . Separate 2960 * pseudo_cumack 2961 * trackers for 2962 * first 2963 * transmissions and 2964 * retransmissions. 2965 */ 2966 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2967 if (tp1->rec.data.chunk_was_revoked == 0) { 2968 tp1->whoTo->new_pseudo_cumack = 1; 2969 } 2970 tp1->whoTo->find_pseudo_cumack = 1; 2971 } 2972 #ifdef SCTP_CWND_LOGGING 2973 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2974 #endif 2975 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2976 if (tp1->rec.data.chunk_was_revoked == 0) { 2977 tp1->whoTo->new_pseudo_cumack = 1; 2978 } 2979 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2980 } 2981 #ifdef SCTP_SACK_LOGGING 2982 sctp_log_sack(*biggest_newly_acked_tsn, 2983 last_tsn, 2984 tp1->rec.data.TSN_seq, 2985 frag_strt, 2986 frag_end, 2987 SCTP_LOG_TSN_ACKED); 2988 #endif 2989 2990 if (tp1->rec.data.chunk_was_revoked == 0) { 2991 /* 2992 * Revoked 2993 * chunks 2994 * don't 2995 * count, 2996 * since we 2997 * previously 2998 * pulled 2999 * them from 3000 * the fs. 3001 */ 3002 if (tp1->whoTo->flight_size >= tp1->book_size) 3003 tp1->whoTo->flight_size -= tp1->book_size; 3004 else 3005 tp1->whoTo->flight_size = 0; 3006 if (asoc->total_flight >= tp1->book_size) { 3007 asoc->total_flight -= tp1->book_size; 3008 if (asoc->total_flight_count > 0) 3009 asoc->total_flight_count--; 3010 } else { 3011 asoc->total_flight = 0; 3012 asoc->total_flight_count = 0; 3013 } 3014 3015 tp1->whoTo->net_ack += tp1->send_size; 3016 3017 if (tp1->snd_count < 2) { 3018 /* 3019 * Tru 3020 * e 3021 * no 3022 * n 3023 * -r 3024 * e 3025 * tr 3026 * a 3027 * ns 3028 * m 3029 * it 3030 * e 3031 * d 3032 * ch 3033 * u 3034 * nk 3035 * */ 3036 tp1->whoTo->net_ack2 += tp1->send_size; 3037 3038 /* 3039 * upd 3040 * 3041 * ate 3042 * 3043 * RTO 3044 * 3045 * too 3046 * ? */ 3047 if (tp1->do_rtt) { 3048 tp1->whoTo->RTO = 3049 sctp_calculate_rto(stcb, 3050 asoc, 3051 tp1->whoTo, 3052 &tp1->sent_rcv_time); 3053 tp1->whoTo->rto_pending = 0; 3054 tp1->do_rtt = 0; 3055 } 3056 } 3057 } 3058 } 3059 if (tp1->sent <= SCTP_DATAGRAM_RESEND && 3060 tp1->sent != SCTP_DATAGRAM_UNSENT && 3061 compare_with_wrap(tp1->rec.data.TSN_seq, 3062 asoc->this_sack_highest_gap, 3063 MAX_TSN)) { 3064 asoc->this_sack_highest_gap = 3065 tp1->rec.data.TSN_seq; 3066 } 3067 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3068 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3069 #ifdef SCTP_AUDITING_ENABLED 3070 sctp_audit_log(0xB2, 3071 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3072 #endif 3073 3074 } 3075 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3076 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3077 3078 tp1->sent = SCTP_DATAGRAM_MARKED; 3079 } 3080 break; 3081 } /* if (tp1->TSN_seq == j) */ 3082 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3083 MAX_TSN)) 3084 break; 3085 3086 tp1 = TAILQ_NEXT(tp1, sctp_next); 3087 } /* end while (tp1) */ 3088 } /* end for (j = fragStart */ 3089 frag++; /* next one */ 3090 } 3091 #ifdef SCTP_FR_LOGGING 3092 /* 3093 * if (num_frs) sctp_log_fr(*biggest_tsn_acked, 3094 * *biggest_newly_acked_tsn, last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3095 */ 3096 #endif 3097 } 3098 3099 static void 3100 sctp_check_for_revoked(struct sctp_association *asoc, uint32_t cumack, 3101 u_long biggest_tsn_acked) 3102 { 3103 struct sctp_tmit_chunk *tp1; 3104 int tot_revoked = 0; 3105 3106 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3107 while (tp1) { 3108 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3109 MAX_TSN)) { 3110 /* 3111 * ok this guy is either ACK or MARKED. If it is 3112 * ACKED it has been previously acked but not this 3113 * time i.e. revoked. If it is MARKED it was ACK'ed 3114 * again. 3115 */ 3116 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3117 /* it has been revoked */ 3118 /* 3119 * We do NOT add back to flight size here 3120 * since it is really NOT in flight. Resend 3121 * (when/if it occurs will add to flight 3122 * size 3123 */ 3124 tp1->sent = SCTP_DATAGRAM_SENT; 3125 tp1->rec.data.chunk_was_revoked = 1; 3126 tot_revoked++; 3127 #ifdef SCTP_SACK_LOGGING 3128 sctp_log_sack(asoc->last_acked_seq, 3129 cumack, 3130 tp1->rec.data.TSN_seq, 3131 0, 3132 0, 3133 SCTP_LOG_TSN_REVOKED); 3134 #endif 3135 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3136 /* it has been re-acked in this SACK */ 3137 tp1->sent = SCTP_DATAGRAM_ACKED; 3138 } 3139 } 3140 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3141 break; 3142 tp1 = TAILQ_NEXT(tp1, sctp_next); 3143 } 3144 if (tot_revoked > 0) { 3145 /* 3146 * Setup the ecn nonce re-sync point. We do this since once 3147 * data is revoked we begin to retransmit things, which do 3148 * NOT have the ECN bits set. This means we are now out of 3149 * sync and must wait until we get back in sync with the 3150 * peer to check ECN bits. 3151 */ 3152 tp1 = TAILQ_FIRST(&asoc->send_queue); 3153 if (tp1 == NULL) { 3154 asoc->nonce_resync_tsn = asoc->sending_seq; 3155 } else { 3156 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3157 } 3158 asoc->nonce_wait_for_ecne = 0; 3159 asoc->nonce_sum_check = 0; 3160 } 3161 } 3162 3163 extern int sctp_peer_chunk_oh; 3164 3165 static void 3166 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3167 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3168 { 3169 struct sctp_tmit_chunk *tp1; 3170 int strike_flag = 0; 3171 struct timeval now; 3172 int tot_retrans = 0; 3173 uint32_t sending_seq; 3174 struct sctp_nets *net; 3175 int num_dests_sacked = 0; 3176 3177 /* 3178 * select the sending_seq, this is either the next thing ready to be 3179 * sent but not transmitted, OR, the next seq we assign. 3180 */ 3181 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3182 if (tp1 == NULL) { 3183 sending_seq = asoc->sending_seq; 3184 } else { 3185 sending_seq = tp1->rec.data.TSN_seq; 3186 } 3187 3188 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3189 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3190 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3191 if (net->saw_newack) 3192 num_dests_sacked++; 3193 } 3194 } 3195 if (stcb->asoc.peer_supports_prsctp) { 3196 SCTP_GETTIME_TIMEVAL(&now); 3197 } 3198 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3199 while (tp1) { 3200 strike_flag = 0; 3201 if (tp1->no_fr_allowed) { 3202 /* this one had a timeout or something */ 3203 tp1 = TAILQ_NEXT(tp1, sctp_next); 3204 continue; 3205 } 3206 #ifdef SCTP_FR_LOGGING 3207 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3208 sctp_log_fr(biggest_tsn_newly_acked, 3209 tp1->rec.data.TSN_seq, 3210 tp1->sent, 3211 SCTP_FR_LOG_CHECK_STRIKE); 3212 #endif 3213 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3214 MAX_TSN) || 3215 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3216 /* done */ 3217 break; 3218 } 3219 if (stcb->asoc.peer_supports_prsctp) { 3220 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3221 /* Is it expired? */ 3222 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3223 /* Yes so drop it */ 3224 if (tp1->data != NULL) { 3225 sctp_release_pr_sctp_chunk(stcb, tp1, 3226 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3227 &asoc->sent_queue); 3228 } 3229 tp1 = TAILQ_NEXT(tp1, sctp_next); 3230 continue; 3231 } 3232 } 3233 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3234 /* Has it been retransmitted tv_sec times? */ 3235 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3236 /* Yes, so drop it */ 3237 if (tp1->data != NULL) { 3238 sctp_release_pr_sctp_chunk(stcb, tp1, 3239 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3240 &asoc->sent_queue); 3241 } 3242 tp1 = TAILQ_NEXT(tp1, sctp_next); 3243 continue; 3244 } 3245 } 3246 } 3247 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3248 asoc->this_sack_highest_gap, MAX_TSN)) { 3249 /* we are beyond the tsn in the sack */ 3250 break; 3251 } 3252 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3253 /* either a RESEND, ACKED, or MARKED */ 3254 /* skip */ 3255 tp1 = TAILQ_NEXT(tp1, sctp_next); 3256 continue; 3257 } 3258 /* 3259 * CMT : SFR algo (covers part of DAC and HTNA as well) 3260 */ 3261 if (tp1->whoTo->saw_newack == 0) { 3262 /* 3263 * No new acks were receieved for data sent to this 3264 * dest. Therefore, according to the SFR algo for 3265 * CMT, no data sent to this dest can be marked for 3266 * FR using this SACK. (iyengar@cis.udel.edu, 3267 * 2005/05/12) 3268 */ 3269 tp1 = TAILQ_NEXT(tp1, sctp_next); 3270 continue; 3271 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3272 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3273 /* 3274 * CMT: New acks were receieved for data sent to 3275 * this dest. But no new acks were seen for data 3276 * sent after tp1. Therefore, according to the SFR 3277 * algo for CMT, tp1 cannot be marked for FR using 3278 * this SACK. This step covers part of the DAC algo 3279 * and the HTNA algo as well. 3280 */ 3281 tp1 = TAILQ_NEXT(tp1, sctp_next); 3282 continue; 3283 } 3284 /* 3285 * Here we check to see if we were have already done a FR 3286 * and if so we see if the biggest TSN we saw in the sack is 3287 * smaller than the recovery point. If so we don't strike 3288 * the tsn... otherwise we CAN strike the TSN. 3289 */ 3290 /* 3291 * @@@ JRI: Check for CMT 3292 */ 3293 if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { 3294 /* 3295 * Strike the TSN if in fast-recovery and cum-ack 3296 * moved. 3297 */ 3298 #ifdef SCTP_FR_LOGGING 3299 sctp_log_fr(biggest_tsn_newly_acked, 3300 tp1->rec.data.TSN_seq, 3301 tp1->sent, 3302 SCTP_FR_LOG_STRIKE_CHUNK); 3303 #endif 3304 tp1->sent++; 3305 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3306 /* 3307 * CMT DAC algorithm: If SACK flag is set to 3308 * 0, then lowest_newack test will not pass 3309 * because it would have been set to the 3310 * cumack earlier. If not already to be 3311 * rtx'd, If not a mixed sack and if tp1 is 3312 * not between two sacked TSNs, then mark by 3313 * one more. 3314 */ 3315 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3316 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3317 #ifdef SCTP_FR_LOGGING 3318 sctp_log_fr(16 + num_dests_sacked, 3319 tp1->rec.data.TSN_seq, 3320 tp1->sent, 3321 SCTP_FR_LOG_STRIKE_CHUNK); 3322 #endif 3323 tp1->sent++; 3324 } 3325 } 3326 } else if (tp1->rec.data.doing_fast_retransmit) { 3327 /* 3328 * For those that have done a FR we must take 3329 * special consideration if we strike. I.e the 3330 * biggest_newly_acked must be higher than the 3331 * sending_seq at the time we did the FR. 3332 */ 3333 #ifdef SCTP_FR_TO_ALTERNATE 3334 /* 3335 * If FR's go to new networks, then we must only do 3336 * this for singly homed asoc's. However if the FR's 3337 * go to the same network (Armando's work) then its 3338 * ok to FR multiple times. 3339 */ 3340 if (asoc->numnets < 2) 3341 #else 3342 if (1) 3343 #endif 3344 { 3345 if ((compare_with_wrap(biggest_tsn_newly_acked, 3346 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3347 (biggest_tsn_newly_acked == 3348 tp1->rec.data.fast_retran_tsn)) { 3349 /* 3350 * Strike the TSN, since this ack is 3351 * beyond where things were when we 3352 * did a FR. 3353 */ 3354 #ifdef SCTP_FR_LOGGING 3355 sctp_log_fr(biggest_tsn_newly_acked, 3356 tp1->rec.data.TSN_seq, 3357 tp1->sent, 3358 SCTP_FR_LOG_STRIKE_CHUNK); 3359 #endif 3360 tp1->sent++; 3361 strike_flag = 1; 3362 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3363 /* 3364 * CMT DAC algorithm: If 3365 * SACK flag is set to 0, 3366 * then lowest_newack test 3367 * will not pass because it 3368 * would have been set to 3369 * the cumack earlier. If 3370 * not already to be rtx'd, 3371 * If not a mixed sack and 3372 * if tp1 is not between two 3373 * sacked TSNs, then mark by 3374 * one more. 3375 */ 3376 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3377 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3378 #ifdef SCTP_FR_LOGGING 3379 sctp_log_fr(32 + num_dests_sacked, 3380 tp1->rec.data.TSN_seq, 3381 tp1->sent, 3382 SCTP_FR_LOG_STRIKE_CHUNK); 3383 #endif 3384 tp1->sent++; 3385 } 3386 } 3387 } 3388 } 3389 /* 3390 * @@@ JRI: TODO: remove code for HTNA algo. CMT's 3391 * SFR algo covers HTNA. 3392 */ 3393 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3394 biggest_tsn_newly_acked, MAX_TSN)) { 3395 /* 3396 * We don't strike these: This is the HTNA 3397 * algorithm i.e. we don't strike If our TSN is 3398 * larger than the Highest TSN Newly Acked. 3399 */ 3400 ; 3401 } else { 3402 /* Strike the TSN */ 3403 #ifdef SCTP_FR_LOGGING 3404 sctp_log_fr(biggest_tsn_newly_acked, 3405 tp1->rec.data.TSN_seq, 3406 tp1->sent, 3407 SCTP_FR_LOG_STRIKE_CHUNK); 3408 #endif 3409 tp1->sent++; 3410 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3411 /* 3412 * CMT DAC algorithm: If SACK flag is set to 3413 * 0, then lowest_newack test will not pass 3414 * because it would have been set to the 3415 * cumack earlier. If not already to be 3416 * rtx'd, If not a mixed sack and if tp1 is 3417 * not between two sacked TSNs, then mark by 3418 * one more. 3419 */ 3420 if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3421 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3422 #ifdef SCTP_FR_LOGGING 3423 sctp_log_fr(48 + num_dests_sacked, 3424 tp1->rec.data.TSN_seq, 3425 tp1->sent, 3426 SCTP_FR_LOG_STRIKE_CHUNK); 3427 #endif 3428 tp1->sent++; 3429 } 3430 } 3431 } 3432 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3433 /* Increment the count to resend */ 3434 struct sctp_nets *alt; 3435 3436 /* printf("OK, we are now ready to FR this guy\n"); */ 3437 #ifdef SCTP_FR_LOGGING 3438 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3439 0, SCTP_FR_MARKED); 3440 #endif 3441 if (strike_flag) { 3442 /* This is a subsequent FR */ 3443 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3444 } 3445 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3446 3447 if (sctp_cmt_on_off) { 3448 /* 3449 * CMT: Using RTX_SSTHRESH policy for CMT. 3450 * If CMT is being used, then pick dest with 3451 * largest ssthresh for any retransmission. 3452 * (iyengar@cis.udel.edu, 2005/08/12) 3453 */ 3454 tp1->no_fr_allowed = 1; 3455 alt = tp1->whoTo; 3456 alt = sctp_find_alternate_net(stcb, alt, 1); 3457 /* 3458 * CUCv2: If a different dest is picked for 3459 * the retransmission, then new 3460 * (rtx-)pseudo_cumack needs to be tracked 3461 * for orig dest. Let CUCv2 track new (rtx-) 3462 * pseudo-cumack always. 3463 */ 3464 tp1->whoTo->find_pseudo_cumack = 1; 3465 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3466 3467 3468 } else {/* CMT is OFF */ 3469 3470 #ifdef SCTP_FR_TO_ALTERNATE 3471 /* Can we find an alternate? */ 3472 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3473 #else 3474 /* 3475 * default behavior is to NOT retransmit 3476 * FR's to an alternate. Armando Caro's 3477 * paper details why. 3478 */ 3479 alt = tp1->whoTo; 3480 #endif 3481 } 3482 3483 tp1->rec.data.doing_fast_retransmit = 1; 3484 tot_retrans++; 3485 /* mark the sending seq for possible subsequent FR's */ 3486 /* 3487 * printf("Marking TSN for FR new value %x\n", 3488 * (uint32_t)tpi->rec.data.TSN_seq); 3489 */ 3490 if (TAILQ_EMPTY(&asoc->send_queue)) { 3491 /* 3492 * If the queue of send is empty then its 3493 * the next sequence number that will be 3494 * assigned so we subtract one from this to 3495 * get the one we last sent. 3496 */ 3497 tp1->rec.data.fast_retran_tsn = sending_seq; 3498 } else { 3499 /* 3500 * If there are chunks on the send queue 3501 * (unsent data that has made it from the 3502 * stream queues but not out the door, we 3503 * take the first one (which will have the 3504 * lowest TSN) and subtract one to get the 3505 * one we last sent. 3506 */ 3507 struct sctp_tmit_chunk *ttt; 3508 3509 ttt = TAILQ_FIRST(&asoc->send_queue); 3510 tp1->rec.data.fast_retran_tsn = 3511 ttt->rec.data.TSN_seq; 3512 } 3513 3514 if (tp1->do_rtt) { 3515 /* 3516 * this guy had a RTO calculation pending on 3517 * it, cancel it 3518 */ 3519 tp1->whoTo->rto_pending = 0; 3520 tp1->do_rtt = 0; 3521 } 3522 /* fix counts and things */ 3523 3524 tp1->whoTo->net_ack++; 3525 if (tp1->whoTo->flight_size >= tp1->book_size) 3526 tp1->whoTo->flight_size -= tp1->book_size; 3527 else 3528 tp1->whoTo->flight_size = 0; 3529 3530 #ifdef SCTP_LOG_RWND 3531 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3532 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh); 3533 #endif 3534 /* add back to the rwnd */ 3535 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3536 3537 /* remove from the total flight */ 3538 if (asoc->total_flight >= tp1->book_size) { 3539 asoc->total_flight -= tp1->book_size; 3540 if (asoc->total_flight_count > 0) 3541 asoc->total_flight_count--; 3542 } else { 3543 asoc->total_flight = 0; 3544 asoc->total_flight_count = 0; 3545 } 3546 3547 3548 if (alt != tp1->whoTo) { 3549 /* yes, there is an alternate. */ 3550 sctp_free_remote_addr(tp1->whoTo); 3551 tp1->whoTo = alt; 3552 atomic_add_int(&alt->ref_count, 1); 3553 } 3554 } 3555 tp1 = TAILQ_NEXT(tp1, sctp_next); 3556 } /* while (tp1) */ 3557 3558 if (tot_retrans > 0) { 3559 /* 3560 * Setup the ecn nonce re-sync point. We do this since once 3561 * we go to FR something we introduce a Karn's rule scenario 3562 * and won't know the totals for the ECN bits. 3563 */ 3564 asoc->nonce_resync_tsn = sending_seq; 3565 asoc->nonce_wait_for_ecne = 0; 3566 asoc->nonce_sum_check = 0; 3567 } 3568 } 3569 3570 struct sctp_tmit_chunk * 3571 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3572 struct sctp_association *asoc) 3573 { 3574 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3575 struct timeval now; 3576 int now_filled = 0; 3577 3578 if (asoc->peer_supports_prsctp == 0) { 3579 return (NULL); 3580 } 3581 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3582 while (tp1) { 3583 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3584 tp1->sent != SCTP_DATAGRAM_RESEND) { 3585 /* no chance to advance, out of here */ 3586 break; 3587 } 3588 if (!PR_SCTP_ENABLED(tp1->flags)) { 3589 /* 3590 * We can't fwd-tsn past any that are reliable aka 3591 * retransmitted until the asoc fails. 3592 */ 3593 break; 3594 } 3595 if (!now_filled) { 3596 SCTP_GETTIME_TIMEVAL(&now); 3597 now_filled = 1; 3598 } 3599 tp2 = TAILQ_NEXT(tp1, sctp_next); 3600 /* 3601 * now we got a chunk which is marked for another 3602 * retransmission to a PR-stream but has run out its chances 3603 * already maybe OR has been marked to skip now. Can we skip 3604 * it if its a resend? 3605 */ 3606 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3607 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3608 /* 3609 * Now is this one marked for resend and its time is 3610 * now up? 3611 */ 3612 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3613 /* Yes so drop it */ 3614 if (tp1->data) { 3615 sctp_release_pr_sctp_chunk(stcb, tp1, 3616 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3617 &asoc->sent_queue); 3618 } 3619 } else { 3620 /* 3621 * No, we are done when hit one for resend 3622 * whos time as not expired. 3623 */ 3624 break; 3625 } 3626 } 3627 /* 3628 * Ok now if this chunk is marked to drop it we can clean up 3629 * the chunk, advance our peer ack point and we can check 3630 * the next chunk. 3631 */ 3632 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3633 /* advance PeerAckPoint goes forward */ 3634 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3635 a_adv = tp1; 3636 /* 3637 * we don't want to de-queue it here. Just wait for 3638 * the next peer SACK to come with a new cumTSN and 3639 * then the chunk will be droped in the normal 3640 * fashion. 3641 */ 3642 if (tp1->data) { 3643 sctp_free_bufspace(stcb, asoc, tp1, 1); 3644 /* 3645 * Maybe there should be another 3646 * notification type 3647 */ 3648 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3649 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3650 tp1); 3651 sctp_m_freem(tp1->data); 3652 tp1->data = NULL; 3653 if (stcb->sctp_socket) { 3654 sctp_sowwakeup(stcb->sctp_ep, 3655 stcb->sctp_socket); 3656 #ifdef SCTP_WAKE_LOGGING 3657 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3658 #endif 3659 } 3660 } 3661 } else { 3662 /* 3663 * If it is still in RESEND we can advance no 3664 * further 3665 */ 3666 break; 3667 } 3668 /* 3669 * If we hit here we just dumped tp1, move to next tsn on 3670 * sent queue. 3671 */ 3672 tp1 = tp2; 3673 } 3674 return (a_adv); 3675 } 3676 3677 #ifdef SCTP_HIGH_SPEED 3678 struct sctp_hs_raise_drop { 3679 int32_t cwnd; 3680 int32_t increase; 3681 int32_t drop_percent; 3682 }; 3683 3684 #define SCTP_HS_TABLE_SIZE 73 3685 3686 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 3687 {38, 1, 50}, /* 0 */ 3688 {118, 2, 44}, /* 1 */ 3689 {221, 3, 41}, /* 2 */ 3690 {347, 4, 38}, /* 3 */ 3691 {495, 5, 37}, /* 4 */ 3692 {663, 6, 35}, /* 5 */ 3693 {851, 7, 34}, /* 6 */ 3694 {1058, 8, 33}, /* 7 */ 3695 {1284, 9, 32}, /* 8 */ 3696 {1529, 10, 31}, /* 9 */ 3697 {1793, 11, 30}, /* 10 */ 3698 {2076, 12, 29}, /* 11 */ 3699 {2378, 13, 28}, /* 12 */ 3700 {2699, 14, 28}, /* 13 */ 3701 {3039, 15, 27}, /* 14 */ 3702 {3399, 16, 27}, /* 15 */ 3703 {3778, 17, 26}, /* 16 */ 3704 {4177, 18, 26}, /* 17 */ 3705 {4596, 19, 25}, /* 18 */ 3706 {5036, 20, 25}, /* 19 */ 3707 {5497, 21, 24}, /* 20 */ 3708 {5979, 22, 24}, /* 21 */ 3709 {6483, 23, 23}, /* 22 */ 3710 {7009, 24, 23}, /* 23 */ 3711 {7558, 25, 22}, /* 24 */ 3712 {8130, 26, 22}, /* 25 */ 3713 {8726, 27, 22}, /* 26 */ 3714 {9346, 28, 21}, /* 27 */ 3715 {9991, 29, 21}, /* 28 */ 3716 {10661, 30, 21}, /* 29 */ 3717 {11358, 31, 20}, /* 30 */ 3718 {12082, 32, 20}, /* 31 */ 3719 {12834, 33, 20}, /* 32 */ 3720 {13614, 34, 19}, /* 33 */ 3721 {14424, 35, 19}, /* 34 */ 3722 {15265, 36, 19}, /* 35 */ 3723 {16137, 37, 19}, /* 36 */ 3724 {17042, 38, 18}, /* 37 */ 3725 {17981, 39, 18}, /* 38 */ 3726 {18955, 40, 18}, /* 39 */ 3727 {19965, 41, 17}, /* 40 */ 3728 {21013, 42, 17}, /* 41 */ 3729 {22101, 43, 17}, /* 42 */ 3730 {23230, 44, 17}, /* 43 */ 3731 {24402, 45, 16}, /* 44 */ 3732 {25618, 46, 16}, /* 45 */ 3733 {26881, 47, 16}, /* 46 */ 3734 {28193, 48, 16}, /* 47 */ 3735 {29557, 49, 15}, /* 48 */ 3736 {30975, 50, 15}, /* 49 */ 3737 {32450, 51, 15}, /* 50 */ 3738 {33986, 52, 15}, /* 51 */ 3739 {35586, 53, 14}, /* 52 */ 3740 {37253, 54, 14}, /* 53 */ 3741 {38992, 55, 14}, /* 54 */ 3742 {40808, 56, 14}, /* 55 */ 3743 {42707, 57, 13}, /* 56 */ 3744 {44694, 58, 13}, /* 57 */ 3745 {46776, 59, 13}, /* 58 */ 3746 {48961, 60, 13}, /* 59 */ 3747 {51258, 61, 13}, /* 60 */ 3748 {53677, 62, 12}, /* 61 */ 3749 {56230, 63, 12}, /* 62 */ 3750 {58932, 64, 12}, /* 63 */ 3751 {61799, 65, 12}, /* 64 */ 3752 {64851, 66, 11}, /* 65 */ 3753 {68113, 67, 11}, /* 66 */ 3754 {71617, 68, 11}, /* 67 */ 3755 {75401, 69, 10}, /* 68 */ 3756 {79517, 70, 10}, /* 69 */ 3757 {84035, 71, 10}, /* 70 */ 3758 {89053, 72, 10}, /* 71 */ 3759 {94717, 73, 9} /* 72 */ 3760 }; 3761 3762 static void 3763 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 3764 { 3765 int cur_val, i, indx, incr; 3766 3767 cur_val = net->cwnd >> 10; 3768 indx = SCTP_HS_TABLE_SIZE - 1; 3769 3770 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3771 /* normal mode */ 3772 if (net->net_ack > net->mtu) { 3773 net->cwnd += net->mtu; 3774 #ifdef SCTP_CWND_MONITOR 3775 sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 3776 #endif 3777 } else { 3778 net->cwnd += net->net_ack; 3779 #ifdef SCTP_CWND_MONITOR 3780 sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 3781 #endif 3782 } 3783 } else { 3784 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 3785 if (cur_val < sctp_cwnd_adjust[i].cwnd) { 3786 indx = i; 3787 break; 3788 } 3789 } 3790 net->last_hs_used = indx; 3791 incr = ((sctp_cwnd_adjust[indx].increase) << 10); 3792 net->cwnd += incr; 3793 #ifdef SCTP_CWND_MONITOR 3794 sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 3795 #endif 3796 } 3797 } 3798 3799 static void 3800 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 3801 { 3802 int cur_val, i, indx; 3803 3804 #ifdef SCTP_CWND_MONITOR 3805 int old_cwnd = net->cwnd; 3806 3807 #endif 3808 3809 cur_val = net->cwnd >> 10; 3810 indx = net->last_hs_used; 3811 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3812 /* normal mode */ 3813 net->ssthresh = net->cwnd / 2; 3814 if (net->ssthresh < (net->mtu * 2)) { 3815 net->ssthresh = 2 * net->mtu; 3816 } 3817 net->cwnd = net->ssthresh; 3818 } else { 3819 /* drop by the proper amount */ 3820 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 3821 sctp_cwnd_adjust[net->last_hs_used].drop_percent); 3822 net->cwnd = net->ssthresh; 3823 /* now where are we */ 3824 indx = net->last_hs_used; 3825 cur_val = net->cwnd >> 10; 3826 /* reset where we are in the table */ 3827 if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3828 /* feel out of hs */ 3829 net->last_hs_used = 0; 3830 } else { 3831 for (i = indx; i >= 1; i--) { 3832 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 3833 break; 3834 } 3835 } 3836 net->last_hs_used = indx; 3837 } 3838 } 3839 #ifdef SCTP_CWND_MONITOR 3840 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 3841 #endif 3842 3843 } 3844 3845 #endif 3846 3847 extern int sctp_early_fr; 3848 extern int sctp_L2_abc_variable; 3849 3850 3851 static __inline void 3852 sctp_cwnd_update(struct sctp_tcb *stcb, 3853 struct sctp_association *asoc, 3854 int accum_moved, int reneged_all, int will_exit) 3855 { 3856 struct sctp_nets *net; 3857 3858 /******************************/ 3859 /* update cwnd and Early FR */ 3860 /******************************/ 3861 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3862 #ifdef JANA_CODE_WHY_THIS 3863 /* 3864 * CMT fast recovery code. Need to debug. 3865 */ 3866 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 3867 if (compare_with_wrap(asoc->last_acked_seq, 3868 net->fast_recovery_tsn, MAX_TSN) || 3869 (asoc->last_acked_seq == net->fast_recovery_tsn) || 3870 compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) || 3871 (net->pseudo_cumack == net->fast_recovery_tsn)) { 3872 net->will_exit_fast_recovery = 1; 3873 } 3874 } 3875 #endif 3876 if (sctp_early_fr) { 3877 /* 3878 * So, first of all do we need to have a Early FR 3879 * timer running? 3880 */ 3881 if (((TAILQ_FIRST(&asoc->sent_queue)) && 3882 (net->ref_count > 1) && 3883 (net->flight_size < net->cwnd)) || 3884 (reneged_all)) { 3885 /* 3886 * yes, so in this case stop it if its 3887 * running, and then restart it. Reneging 3888 * all is a special case where we want to 3889 * run the Early FR timer and then force the 3890 * last few unacked to be sent, causing us 3891 * to illicit a sack with gaps to force out 3892 * the others. 3893 */ 3894 if (callout_pending(&net->fr_timer.timer)) { 3895 SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 3896 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3897 } 3898 SCTP_STAT_INCR(sctps_earlyfrstrid); 3899 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3900 } else { 3901 /* No, stop it if its running */ 3902 if (callout_pending(&net->fr_timer.timer)) { 3903 SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 3904 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 3905 } 3906 } 3907 } 3908 /* if nothing was acked on this destination skip it */ 3909 if (net->net_ack == 0) { 3910 #ifdef SCTP_CWND_LOGGING 3911 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 3912 #endif 3913 continue; 3914 } 3915 if (net->net_ack2 > 0) { 3916 /* 3917 * Karn's rule applies to clearing error count, this 3918 * is optional. 3919 */ 3920 net->error_count = 0; 3921 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 3922 SCTP_ADDR_NOT_REACHABLE) { 3923 /* addr came good */ 3924 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 3925 net->dest_state |= SCTP_ADDR_REACHABLE; 3926 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 3927 SCTP_RECEIVED_SACK, (void *)net); 3928 /* now was it the primary? if so restore */ 3929 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 3930 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 3931 } 3932 } 3933 } 3934 #ifdef JANA_CODE_WHY_THIS 3935 /* 3936 * Cannot skip for CMT. Need to come back and check these 3937 * variables for CMT. CMT fast recovery code. Need to debug. 3938 */ 3939 if (sctp_cmt_on_off == 1 && 3940 net->fast_retran_loss_recovery && 3941 net->will_exit_fast_recovery == 0) 3942 #endif 3943 if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) { 3944 /* 3945 * If we are in loss recovery we skip any 3946 * cwnd update 3947 */ 3948 goto skip_cwnd_update; 3949 } 3950 /* 3951 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 3952 * moved. 3953 */ 3954 if (accum_moved || (sctp_cmt_on_off && net->new_pseudo_cumack)) { 3955 /* If the cumulative ack moved we can proceed */ 3956 if (net->cwnd <= net->ssthresh) { 3957 /* We are in slow start */ 3958 if (net->flight_size + net->net_ack >= 3959 net->cwnd) { 3960 #ifdef SCTP_HIGH_SPEED 3961 sctp_hs_cwnd_increase(stcb, net); 3962 #else 3963 if (net->net_ack > (net->mtu * sctp_L2_abc_variable)) { 3964 net->cwnd += (net->mtu * sctp_L2_abc_variable); 3965 #ifdef SCTP_CWND_MONITOR 3966 sctp_log_cwnd(stcb, net, net->mtu, 3967 SCTP_CWND_LOG_FROM_SS); 3968 #endif 3969 3970 } else { 3971 net->cwnd += net->net_ack; 3972 #ifdef SCTP_CWND_MONITOR 3973 sctp_log_cwnd(stcb, net, net->net_ack, 3974 SCTP_CWND_LOG_FROM_SS); 3975 #endif 3976 3977 } 3978 #endif 3979 } else { 3980 unsigned int dif; 3981 3982 dif = net->cwnd - (net->flight_size + 3983 net->net_ack); 3984 #ifdef SCTP_CWND_LOGGING 3985 sctp_log_cwnd(stcb, net, net->net_ack, 3986 SCTP_CWND_LOG_NOADV_SS); 3987 #endif 3988 } 3989 } else { 3990 /* We are in congestion avoidance */ 3991 if (net->flight_size + net->net_ack >= 3992 net->cwnd) { 3993 /* 3994 * add to pba only if we had a 3995 * cwnd's worth (or so) in flight OR 3996 * the burst limit was applied. 3997 */ 3998 net->partial_bytes_acked += 3999 net->net_ack; 4000 4001 /* 4002 * Do we need to increase (if pba is 4003 * > cwnd)? 4004 */ 4005 if (net->partial_bytes_acked >= 4006 net->cwnd) { 4007 if (net->cwnd < 4008 net->partial_bytes_acked) { 4009 net->partial_bytes_acked -= 4010 net->cwnd; 4011 } else { 4012 net->partial_bytes_acked = 4013 0; 4014 } 4015 net->cwnd += net->mtu; 4016 #ifdef SCTP_CWND_MONITOR 4017 sctp_log_cwnd(stcb, net, net->mtu, 4018 SCTP_CWND_LOG_FROM_CA); 4019 #endif 4020 } 4021 #ifdef SCTP_CWND_LOGGING 4022 else { 4023 sctp_log_cwnd(stcb, net, net->net_ack, 4024 SCTP_CWND_LOG_NOADV_CA); 4025 } 4026 #endif 4027 } else { 4028 unsigned int dif; 4029 4030 #ifdef SCTP_CWND_LOGGING 4031 sctp_log_cwnd(stcb, net, net->net_ack, 4032 SCTP_CWND_LOG_NOADV_CA); 4033 #endif 4034 dif = net->cwnd - (net->flight_size + 4035 net->net_ack); 4036 } 4037 } 4038 } else { 4039 #ifdef SCTP_CWND_LOGGING 4040 sctp_log_cwnd(stcb, net, net->mtu, 4041 SCTP_CWND_LOG_NO_CUMACK); 4042 #endif 4043 } 4044 skip_cwnd_update: 4045 /* 4046 * NOW, according to Karn's rule do we need to restore the 4047 * RTO timer back? Check our net_ack2. If not set then we 4048 * have a ambiguity.. i.e. all data ack'd was sent to more 4049 * than one place. 4050 */ 4051 if (net->net_ack2) { 4052 /* restore any doubled timers */ 4053 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 4054 if (net->RTO < stcb->asoc.minrto) { 4055 net->RTO = stcb->asoc.minrto; 4056 } 4057 if (net->RTO > stcb->asoc.maxrto) { 4058 net->RTO = stcb->asoc.maxrto; 4059 } 4060 } 4061 } 4062 } 4063 4064 4065 void 4066 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 4067 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 4068 { 4069 struct sctp_nets *net; 4070 struct sctp_association *asoc; 4071 struct sctp_tmit_chunk *tp1, *tp2; 4072 4073 SCTP_TCB_LOCK_ASSERT(stcb); 4074 asoc = &stcb->asoc; 4075 /* First setup for CC stuff */ 4076 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4077 net->prev_cwnd = net->cwnd; 4078 net->net_ack = 0; 4079 net->net_ack2 = 0; 4080 } 4081 asoc->this_sack_highest_gap = cumack; 4082 stcb->asoc.overall_error_count = 0; 4083 /* process the new consecutive TSN first */ 4084 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4085 while (tp1) { 4086 tp2 = TAILQ_NEXT(tp1, sctp_next); 4087 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 4088 MAX_TSN) || 4089 cumack == tp1->rec.data.TSN_seq) { 4090 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4091 /* 4092 * ECN Nonce: Add the nonce to the sender's 4093 * nonce sum 4094 */ 4095 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4096 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4097 /* 4098 * If it is less than ACKED, it is 4099 * now no-longer in flight. Higher 4100 * values may occur during marking 4101 */ 4102 if (tp1->rec.data.chunk_was_revoked == 1) { 4103 /* 4104 * If its been revoked, and 4105 * now ack'd we do NOT take 4106 * away fs etc. since when 4107 * it is retransmitted we 4108 * clear this flag. 4109 */ 4110 goto skip_fs_update; 4111 } 4112 if (tp1->whoTo->flight_size >= tp1->book_size) { 4113 tp1->whoTo->flight_size -= tp1->book_size; 4114 } else { 4115 tp1->whoTo->flight_size = 0; 4116 } 4117 if (asoc->total_flight >= tp1->book_size) { 4118 asoc->total_flight -= tp1->book_size; 4119 if (asoc->total_flight_count > 0) 4120 asoc->total_flight_count--; 4121 } else { 4122 asoc->total_flight = 0; 4123 asoc->total_flight_count = 0; 4124 } 4125 tp1->whoTo->net_ack += tp1->send_size; 4126 if (tp1->snd_count < 2) { 4127 /* 4128 * True non-retransmited 4129 * chunk 4130 */ 4131 tp1->whoTo->net_ack2 += 4132 tp1->send_size; 4133 4134 /* update RTO too? */ 4135 if ((tp1->do_rtt) && (tp1->whoTo->rto_pending)) { 4136 tp1->whoTo->RTO = 4137 sctp_calculate_rto(stcb, 4138 asoc, tp1->whoTo, 4139 &tp1->sent_rcv_time); 4140 tp1->whoTo->rto_pending = 0; 4141 tp1->do_rtt = 0; 4142 } 4143 } 4144 #ifdef SCTP_CWND_LOGGING 4145 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4146 #endif 4147 } 4148 skip_fs_update: 4149 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4150 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4151 } 4152 tp1->sent = SCTP_DATAGRAM_ACKED; 4153 } 4154 } else { 4155 break; 4156 } 4157 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4158 if (tp1->data) { 4159 sctp_free_bufspace(stcb, asoc, tp1, 1); 4160 sctp_m_freem(tp1->data); 4161 } 4162 #ifdef SCTP_SACK_LOGGING 4163 sctp_log_sack(asoc->last_acked_seq, 4164 cumack, 4165 tp1->rec.data.TSN_seq, 4166 0, 4167 0, 4168 SCTP_LOG_FREE_SENT); 4169 #endif 4170 tp1->data = NULL; 4171 asoc->sent_queue_cnt--; 4172 sctp_free_remote_addr(tp1->whoTo); 4173 sctp_free_a_chunk(stcb, tp1); 4174 tp1 = tp2; 4175 } 4176 if (stcb->sctp_socket) { 4177 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4178 #ifdef SCTP_WAKE_LOGGING 4179 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 4180 #endif 4181 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4182 #ifdef SCTP_WAKE_LOGGING 4183 } else { 4184 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 4185 #endif 4186 } 4187 4188 if (asoc->last_acked_seq != cumack) 4189 sctp_cwnd_update(stcb, asoc, 1, 0, 0); 4190 asoc->last_acked_seq = cumack; 4191 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4192 /* nothing left in-flight */ 4193 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4194 net->flight_size = 0; 4195 net->partial_bytes_acked = 0; 4196 } 4197 asoc->total_flight = 0; 4198 asoc->total_flight_count = 0; 4199 } 4200 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4201 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4202 asoc->advanced_peer_ack_point = cumack; 4203 } 4204 /* ECN Nonce updates */ 4205 if (asoc->ecn_nonce_allowed) { 4206 if (asoc->nonce_sum_check) { 4207 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4208 if (asoc->nonce_wait_for_ecne == 0) { 4209 struct sctp_tmit_chunk *lchk; 4210 4211 lchk = TAILQ_FIRST(&asoc->send_queue); 4212 asoc->nonce_wait_for_ecne = 1; 4213 if (lchk) { 4214 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4215 } else { 4216 asoc->nonce_wait_tsn = asoc->sending_seq; 4217 } 4218 } else { 4219 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4220 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4221 /* 4222 * Misbehaving peer. We need 4223 * to react to this guy 4224 */ 4225 asoc->ecn_allowed = 0; 4226 asoc->ecn_nonce_allowed = 0; 4227 } 4228 } 4229 } 4230 } else { 4231 /* See if Resynchronization Possible */ 4232 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4233 asoc->nonce_sum_check = 1; 4234 /* 4235 * now we must calculate what the base is. 4236 * We do this based on two things, we know 4237 * the total's for all the segments 4238 * gap-acked in the SACK (none), We also 4239 * know the SACK's nonce sum, its in 4240 * nonce_sum_flag. So we can build a truth 4241 * table to back-calculate the new value of 4242 * asoc->nonce_sum_expect_base: 4243 * 4244 * SACK-flag-Value Seg-Sums Base 0 0 0 4245 * 1 0 1 0 1 1 1 4246 * 1 0 4247 */ 4248 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4249 } 4250 } 4251 } 4252 /* RWND update */ 4253 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4254 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4255 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4256 /* SWS sender side engages */ 4257 asoc->peers_rwnd = 0; 4258 } 4259 /* Now assure a timer where data is queued at */ 4260 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4261 if (net->flight_size) { 4262 int to_ticks; 4263 4264 if (net->RTO == 0) { 4265 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4266 } else { 4267 to_ticks = MSEC_TO_TICKS(net->RTO); 4268 } 4269 callout_reset(&net->rxt_timer.timer, to_ticks, 4270 sctp_timeout_handler, &net->rxt_timer); 4271 } else { 4272 if (callout_pending(&net->rxt_timer.timer)) { 4273 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4274 stcb, net); 4275 } 4276 if (sctp_early_fr) { 4277 if (callout_pending(&net->fr_timer.timer)) { 4278 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4279 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 4280 } 4281 } 4282 } 4283 } 4284 4285 /**********************************/ 4286 /* Now what about shutdown issues */ 4287 /**********************************/ 4288 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4289 /* nothing left on sendqueue.. consider done */ 4290 /* clean up */ 4291 if ((asoc->stream_queue_cnt == 1) && 4292 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4293 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4294 (asoc->locked_on_sending) 4295 ) { 4296 struct sctp_stream_queue_pending *sp; 4297 4298 /* 4299 * I may be in a state where we got all across.. but 4300 * cannot write more due to a shutdown... we abort 4301 * since the user did not indicate EOR in this case. 4302 * The sp will be cleaned during free of the asoc. 4303 */ 4304 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4305 sctp_streamhead); 4306 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4307 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4308 asoc->locked_on_sending = NULL; 4309 asoc->stream_queue_cnt--; 4310 } 4311 } 4312 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4313 (asoc->stream_queue_cnt == 0)) { 4314 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4315 /* Need to abort here */ 4316 struct mbuf *oper; 4317 4318 abort_out_now: 4319 *abort_now = 1; 4320 /* XXX */ 4321 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4322 0, M_DONTWAIT, 1, MT_DATA); 4323 if (oper) { 4324 struct sctp_paramhdr *ph; 4325 uint32_t *ippp; 4326 4327 oper->m_len = sizeof(struct sctp_paramhdr) + 4328 sizeof(uint32_t); 4329 ph = mtod(oper, struct sctp_paramhdr *); 4330 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4331 ph->param_length = htons(oper->m_len); 4332 ippp = (uint32_t *) (ph + 1); 4333 *ippp = htonl(0x30000003); 4334 } 4335 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4336 } else { 4337 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4338 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4339 sctp_stop_timers_for_shutdown(stcb); 4340 sctp_send_shutdown(stcb, 4341 stcb->asoc.primary_destination); 4342 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4343 stcb->sctp_ep, stcb, asoc->primary_destination); 4344 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4345 stcb->sctp_ep, stcb, asoc->primary_destination); 4346 } 4347 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4348 (asoc->stream_queue_cnt == 0)) { 4349 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4350 goto abort_out_now; 4351 } 4352 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4353 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4354 sctp_send_shutdown_ack(stcb, 4355 stcb->asoc.primary_destination); 4356 4357 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4358 stcb->sctp_ep, stcb, asoc->primary_destination); 4359 } 4360 } 4361 #ifdef SCTP_SACK_RWND_LOGGING 4362 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4363 rwnd, 4364 stcb->asoc.peers_rwnd, 4365 stcb->asoc.total_flight, 4366 stcb->asoc.total_output_queue_size); 4367 4368 #endif 4369 } 4370 4371 4372 4373 void 4374 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4375 struct sctp_nets *net_from, int *abort_now) 4376 { 4377 struct sctp_association *asoc; 4378 struct sctp_sack *sack; 4379 struct sctp_tmit_chunk *tp1, *tp2; 4380 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4381 this_sack_lowest_newack; 4382 uint16_t num_seg, num_dup; 4383 uint16_t wake_him = 0; 4384 unsigned int sack_length; 4385 uint32_t send_s; 4386 long j; 4387 int accum_moved = 0; 4388 int will_exit_fast_recovery = 0; 4389 uint32_t a_rwnd; 4390 struct sctp_nets *net = NULL; 4391 int nonce_sum_flag, ecn_seg_sums = 0; 4392 uint8_t reneged_all = 0; 4393 uint8_t cmt_dac_flag; 4394 4395 /* 4396 * we take any chance we can to service our queues since we cannot 4397 * get awoken when the socket is read from :< 4398 */ 4399 /* 4400 * Now perform the actual SACK handling: 1) Verify that it is not an 4401 * old sack, if so discard. 2) If there is nothing left in the send 4402 * queue (cum-ack is equal to last acked) then you have a duplicate 4403 * too, update any rwnd change and verify no timers are running. 4404 * then return. 3) Process any new consequtive data i.e. cum-ack 4405 * moved process these first and note that it moved. 4) Process any 4406 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4407 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4408 * sync up flightsizes and things, stop all timers and also check 4409 * for shutdown_pending state. If so then go ahead and send off the 4410 * shutdown. If in shutdown recv, send off the shutdown-ack and 4411 * start that timer, Ret. 9) Strike any non-acked things and do FR 4412 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4413 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4414 * if in shutdown_recv state. 4415 */ 4416 SCTP_TCB_LOCK_ASSERT(stcb); 4417 sack = &ch->sack; 4418 /* CMT DAC algo */ 4419 this_sack_lowest_newack = 0; 4420 j = 0; 4421 sack_length = ntohs(ch->ch.chunk_length); 4422 if (sack_length < sizeof(struct sctp_sack_chunk)) { 4423 #ifdef SCTP_DEBUG 4424 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 4425 printf("Bad size on sack chunk .. to small\n"); 4426 } 4427 #endif 4428 return; 4429 } 4430 /* ECN Nonce */ 4431 SCTP_STAT_INCR(sctps_slowpath_sack); 4432 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4433 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4434 num_seg = ntohs(sack->num_gap_ack_blks); 4435 a_rwnd = (uint32_t) ntohl(sack->a_rwnd); 4436 4437 /* CMT DAC algo */ 4438 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4439 num_dup = ntohs(sack->num_dup_tsns); 4440 4441 4442 stcb->asoc.overall_error_count = 0; 4443 asoc = &stcb->asoc; 4444 #ifdef SCTP_SACK_LOGGING 4445 sctp_log_sack(asoc->last_acked_seq, 4446 cum_ack, 4447 0, 4448 num_seg, 4449 num_dup, 4450 SCTP_LOG_NEW_SACK); 4451 #endif 4452 #if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 4453 if (num_dup) { 4454 int off_to_dup, iii; 4455 uint32_t *dupdata; 4456 4457 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4458 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4459 dupdata = (uint32_t *) ((caddr_t)ch + off_to_dup); 4460 for (iii = 0; iii < num_dup; iii++) { 4461 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4462 dupdata++; 4463 4464 } 4465 } else { 4466 printf("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4467 off_to_dup, num_dup, sack_length, num_seg); 4468 } 4469 } 4470 #endif 4471 /* reality check */ 4472 if (TAILQ_EMPTY(&asoc->send_queue)) { 4473 send_s = asoc->sending_seq; 4474 } else { 4475 tp1 = TAILQ_FIRST(&asoc->send_queue); 4476 send_s = tp1->rec.data.TSN_seq; 4477 } 4478 4479 if (sctp_strict_sacks) { 4480 if (cum_ack == send_s || 4481 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4482 struct mbuf *oper; 4483 4484 /* 4485 * no way, we have not even sent this TSN out yet. 4486 * Peer is hopelessly messed up with us. 4487 */ 4488 hopeless_peer: 4489 *abort_now = 1; 4490 /* XXX */ 4491 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4492 0, M_DONTWAIT, 1, MT_DATA); 4493 if (oper) { 4494 struct sctp_paramhdr *ph; 4495 uint32_t *ippp; 4496 4497 oper->m_len = sizeof(struct sctp_paramhdr) + 4498 sizeof(uint32_t); 4499 ph = mtod(oper, struct sctp_paramhdr *); 4500 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4501 ph->param_length = htons(oper->m_len); 4502 ippp = (uint32_t *) (ph + 1); 4503 *ippp = htonl(0x30000002); 4504 } 4505 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4506 return; 4507 } 4508 } 4509 /**********************/ 4510 /* 1) check the range */ 4511 /**********************/ 4512 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4513 /* acking something behind */ 4514 return; 4515 } 4516 /* update the Rwnd of the peer */ 4517 if (TAILQ_EMPTY(&asoc->sent_queue) && 4518 TAILQ_EMPTY(&asoc->send_queue) && 4519 (asoc->stream_queue_cnt == 0) 4520 ) { 4521 /* nothing left on send/sent and strmq */ 4522 #ifdef SCTP_LOG_RWND 4523 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4524 asoc->peers_rwnd, 0, 0, a_rwnd); 4525 #endif 4526 asoc->peers_rwnd = a_rwnd; 4527 if (asoc->sent_queue_retran_cnt) { 4528 asoc->sent_queue_retran_cnt = 0; 4529 } 4530 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4531 /* SWS sender side engages */ 4532 asoc->peers_rwnd = 0; 4533 } 4534 /* stop any timers */ 4535 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4536 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4537 stcb, net); 4538 if (sctp_early_fr) { 4539 if (callout_pending(&net->fr_timer.timer)) { 4540 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4541 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 4542 } 4543 } 4544 net->partial_bytes_acked = 0; 4545 net->flight_size = 0; 4546 } 4547 asoc->total_flight = 0; 4548 asoc->total_flight_count = 0; 4549 return; 4550 } 4551 /* 4552 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4553 * things. The total byte count acked is tracked in netAckSz AND 4554 * netAck2 is used to track the total bytes acked that are un- 4555 * amibguious and were never retransmitted. We track these on a per 4556 * destination address basis. 4557 */ 4558 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4559 net->prev_cwnd = net->cwnd; 4560 net->net_ack = 0; 4561 net->net_ack2 = 0; 4562 4563 /* 4564 * CMT: Reset CUC algo variable before SACK processing 4565 */ 4566 net->new_pseudo_cumack = 0; 4567 net->will_exit_fast_recovery = 0; 4568 } 4569 /* process the new consecutive TSN first */ 4570 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4571 while (tp1) { 4572 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4573 MAX_TSN) || 4574 last_tsn == tp1->rec.data.TSN_seq) { 4575 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4576 /* 4577 * ECN Nonce: Add the nonce to the sender's 4578 * nonce sum 4579 */ 4580 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4581 accum_moved = 1; 4582 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4583 /* 4584 * If it is less than ACKED, it is 4585 * now no-longer in flight. Higher 4586 * values may occur during marking 4587 */ 4588 if ((tp1->whoTo->dest_state & 4589 SCTP_ADDR_UNCONFIRMED) && 4590 (tp1->snd_count < 2)) { 4591 /* 4592 * If there was no retran 4593 * and the address is 4594 * un-confirmed and we sent 4595 * there and are now 4596 * sacked.. its confirmed, 4597 * mark it so. 4598 */ 4599 tp1->whoTo->dest_state &= 4600 ~SCTP_ADDR_UNCONFIRMED; 4601 } 4602 if (tp1->rec.data.chunk_was_revoked == 1) { 4603 /* 4604 * If its been revoked, and 4605 * now ack'd we do NOT take 4606 * away fs etc. since when 4607 * it is retransmitted we 4608 * clear this flag. 4609 */ 4610 goto skip_fs_update; 4611 } 4612 if (tp1->whoTo->flight_size >= tp1->book_size) { 4613 tp1->whoTo->flight_size -= tp1->book_size; 4614 } else { 4615 tp1->whoTo->flight_size = 0; 4616 } 4617 if (asoc->total_flight >= tp1->book_size) { 4618 asoc->total_flight -= tp1->book_size; 4619 if (asoc->total_flight_count > 0) 4620 asoc->total_flight_count--; 4621 } else { 4622 asoc->total_flight = 0; 4623 asoc->total_flight_count = 0; 4624 } 4625 tp1->whoTo->net_ack += tp1->send_size; 4626 4627 /* CMT SFR and DAC algos */ 4628 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4629 tp1->whoTo->saw_newack = 1; 4630 4631 if (tp1->snd_count < 2) { 4632 /* 4633 * True non-retransmited 4634 * chunk 4635 */ 4636 tp1->whoTo->net_ack2 += 4637 tp1->send_size; 4638 4639 /* update RTO too? */ 4640 if (tp1->do_rtt) { 4641 tp1->whoTo->RTO = 4642 sctp_calculate_rto(stcb, 4643 asoc, tp1->whoTo, 4644 &tp1->sent_rcv_time); 4645 tp1->whoTo->rto_pending = 0; 4646 tp1->do_rtt = 0; 4647 } 4648 } 4649 skip_fs_update: 4650 /* 4651 * CMT: CUCv2 algorithm. From the 4652 * cumack'd TSNs, for each TSN being 4653 * acked for the first time, set the 4654 * following variables for the 4655 * corresp destination. 4656 * new_pseudo_cumack will trigger a 4657 * cwnd update. 4658 * find_(rtx_)pseudo_cumack will 4659 * trigger search for the next 4660 * expected (rtx-)pseudo-cumack. 4661 */ 4662 tp1->whoTo->new_pseudo_cumack = 1; 4663 tp1->whoTo->find_pseudo_cumack = 1; 4664 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4665 4666 4667 #ifdef SCTP_SACK_LOGGING 4668 sctp_log_sack(asoc->last_acked_seq, 4669 cum_ack, 4670 tp1->rec.data.TSN_seq, 4671 0, 4672 0, 4673 SCTP_LOG_TSN_ACKED); 4674 #endif 4675 #ifdef SCTP_CWND_LOGGING 4676 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4677 #endif 4678 } 4679 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4680 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4681 #ifdef SCTP_AUDITING_ENABLED 4682 sctp_audit_log(0xB3, 4683 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4684 #endif 4685 } 4686 tp1->sent = SCTP_DATAGRAM_ACKED; 4687 } 4688 } else { 4689 break; 4690 } 4691 tp1 = TAILQ_NEXT(tp1, sctp_next); 4692 } 4693 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4694 /* always set this up to cum-ack */ 4695 asoc->this_sack_highest_gap = last_tsn; 4696 4697 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4698 4699 /* skip corrupt segments */ 4700 goto skip_segments; 4701 } 4702 if (num_seg > 0) { 4703 4704 /* 4705 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4706 * to be greater than the cumack. Also reset saw_newack to 0 4707 * for all dests. 4708 */ 4709 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4710 net->saw_newack = 0; 4711 net->this_sack_highest_newack = last_tsn; 4712 } 4713 4714 /* 4715 * thisSackHighestGap will increase while handling NEW 4716 * segments this_sack_highest_newack will increase while 4717 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4718 * used for CMT DAC algo. saw_newack will also change. 4719 */ 4720 sctp_handle_segments(stcb, asoc, ch, last_tsn, 4721 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4722 num_seg, &ecn_seg_sums); 4723 4724 if (sctp_strict_sacks) { 4725 /* 4726 * validate the biggest_tsn_acked in the gap acks if 4727 * strict adherence is wanted. 4728 */ 4729 if ((biggest_tsn_acked == send_s) || 4730 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4731 /* 4732 * peer is either confused or we are under 4733 * attack. We must abort. 4734 */ 4735 goto hopeless_peer; 4736 } 4737 } 4738 } 4739 skip_segments: 4740 /*******************************************/ 4741 /* cancel ALL T3-send timer if accum moved */ 4742 /*******************************************/ 4743 if (sctp_cmt_on_off) { 4744 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4745 if (net->new_pseudo_cumack) 4746 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4747 stcb, net); 4748 4749 } 4750 } else { 4751 if (accum_moved) { 4752 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4753 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4754 stcb, net); 4755 } 4756 } 4757 } 4758 /********************************************/ 4759 /* drop the acked chunks from the sendqueue */ 4760 /********************************************/ 4761 asoc->last_acked_seq = cum_ack; 4762 4763 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4764 if (tp1 == NULL) 4765 goto done_with_it; 4766 do { 4767 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4768 MAX_TSN)) { 4769 break; 4770 } 4771 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4772 /* no more sent on list */ 4773 break; 4774 } 4775 tp2 = TAILQ_NEXT(tp1, sctp_next); 4776 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4777 /* 4778 * Friendlier printf in lieu of panic now that I think its 4779 * fixed 4780 */ 4781 4782 if (tp1->pr_sctp_on) { 4783 if (asoc->pr_sctp_cnt != 0) 4784 asoc->pr_sctp_cnt--; 4785 } 4786 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4787 (asoc->total_flight > 0)) { 4788 printf("Warning flight size incorrect should be 0 is %d\n", 4789 asoc->total_flight); 4790 asoc->total_flight = 0; 4791 } 4792 if (tp1->data) { 4793 sctp_free_bufspace(stcb, asoc, tp1, 1); 4794 sctp_m_freem(tp1->data); 4795 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4796 asoc->sent_queue_cnt_removeable--; 4797 } 4798 } 4799 #ifdef SCTP_SACK_LOGGING 4800 sctp_log_sack(asoc->last_acked_seq, 4801 cum_ack, 4802 tp1->rec.data.TSN_seq, 4803 0, 4804 0, 4805 SCTP_LOG_FREE_SENT); 4806 #endif 4807 tp1->data = NULL; 4808 asoc->sent_queue_cnt--; 4809 sctp_free_remote_addr(tp1->whoTo); 4810 4811 sctp_free_a_chunk(stcb, tp1); 4812 wake_him++; 4813 tp1 = tp2; 4814 } while (tp1 != NULL); 4815 4816 done_with_it: 4817 if ((wake_him) && (stcb->sctp_socket)) { 4818 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4819 #ifdef SCTP_WAKE_LOGGING 4820 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 4821 #endif 4822 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4823 #ifdef SCTP_WAKE_LOGGING 4824 } else { 4825 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 4826 #endif 4827 } 4828 4829 if ((sctp_cmt_on_off == 0) && asoc->fast_retran_loss_recovery && accum_moved) { 4830 if (compare_with_wrap(asoc->last_acked_seq, 4831 asoc->fast_recovery_tsn, MAX_TSN) || 4832 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 4833 /* Setup so we will exit RFC2582 fast recovery */ 4834 will_exit_fast_recovery = 1; 4835 } 4836 } 4837 /* 4838 * Check for revoked fragments: 4839 * 4840 * if Previous sack - Had no frags then we can't have any revoked if 4841 * Previous sack - Had frag's then - If we now have frags aka 4842 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4843 * some of them. else - The peer revoked all ACKED fragments, since 4844 * we had some before and now we have NONE. 4845 */ 4846 4847 if (sctp_cmt_on_off) { 4848 /* 4849 * Don't check for revoked if CMT is ON. CMT causes 4850 * reordering of data and acks (received on different 4851 * interfaces) can be persistently reordered. Acking 4852 * followed by apparent revoking and re-acking causes 4853 * unexpected weird behavior. So, at this time, CMT does not 4854 * respect renegs. Renegs will have to be recovered through 4855 * a timeout. Not a big deal for such a rare event. 4856 */ 4857 } else if (num_seg) 4858 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked); 4859 else if (asoc->saw_sack_with_frags) { 4860 int cnt_revoked = 0; 4861 4862 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4863 if (tp1 != NULL) { 4864 /* Peer revoked all dg's marked or acked */ 4865 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4866 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 4867 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 4868 tp1->sent = SCTP_DATAGRAM_SENT; 4869 cnt_revoked++; 4870 } 4871 } 4872 if (cnt_revoked) { 4873 reneged_all = 1; 4874 } 4875 } 4876 asoc->saw_sack_with_frags = 0; 4877 } 4878 if (num_seg) 4879 asoc->saw_sack_with_frags = 1; 4880 else 4881 asoc->saw_sack_with_frags = 0; 4882 4883 4884 sctp_cwnd_update(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4885 4886 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4887 /* nothing left in-flight */ 4888 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4889 /* stop all timers */ 4890 if (sctp_early_fr) { 4891 if (callout_pending(&net->fr_timer.timer)) { 4892 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4893 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 4894 } 4895 } 4896 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4897 stcb, net); 4898 net->flight_size = 0; 4899 net->partial_bytes_acked = 0; 4900 } 4901 asoc->total_flight = 0; 4902 asoc->total_flight_count = 0; 4903 } 4904 /**********************************/ 4905 /* Now what about shutdown issues */ 4906 /**********************************/ 4907 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4908 /* nothing left on sendqueue.. consider done */ 4909 #ifdef SCTP_LOG_RWND 4910 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4911 asoc->peers_rwnd, 0, 0, a_rwnd); 4912 #endif 4913 asoc->peers_rwnd = a_rwnd; 4914 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4915 /* SWS sender side engages */ 4916 asoc->peers_rwnd = 0; 4917 } 4918 /* clean up */ 4919 if ((asoc->stream_queue_cnt == 1) && 4920 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4921 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4922 (asoc->locked_on_sending) 4923 ) { 4924 struct sctp_stream_queue_pending *sp; 4925 4926 /* 4927 * I may be in a state where we got all across.. but 4928 * cannot write more due to a shutdown... we abort 4929 * since the user did not indicate EOR in this case. 4930 */ 4931 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4932 sctp_streamhead); 4933 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4934 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4935 asoc->locked_on_sending = NULL; 4936 asoc->stream_queue_cnt--; 4937 } 4938 } 4939 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4940 (asoc->stream_queue_cnt == 0)) { 4941 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4942 /* Need to abort here */ 4943 struct mbuf *oper; 4944 4945 abort_out_now: 4946 *abort_now = 1; 4947 /* XXX */ 4948 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4949 0, M_DONTWAIT, 1, MT_DATA); 4950 if (oper) { 4951 struct sctp_paramhdr *ph; 4952 uint32_t *ippp; 4953 4954 oper->m_len = sizeof(struct sctp_paramhdr) + 4955 sizeof(uint32_t); 4956 ph = mtod(oper, struct sctp_paramhdr *); 4957 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4958 ph->param_length = htons(oper->m_len); 4959 ippp = (uint32_t *) (ph + 1); 4960 *ippp = htonl(0x30000003); 4961 } 4962 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4963 return; 4964 } else { 4965 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4966 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4967 sctp_stop_timers_for_shutdown(stcb); 4968 sctp_send_shutdown(stcb, 4969 stcb->asoc.primary_destination); 4970 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4971 stcb->sctp_ep, stcb, asoc->primary_destination); 4972 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4973 stcb->sctp_ep, stcb, asoc->primary_destination); 4974 } 4975 return; 4976 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4977 (asoc->stream_queue_cnt == 0)) { 4978 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4979 goto abort_out_now; 4980 } 4981 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4982 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4983 sctp_send_shutdown_ack(stcb, 4984 stcb->asoc.primary_destination); 4985 4986 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4987 stcb->sctp_ep, stcb, asoc->primary_destination); 4988 return; 4989 } 4990 } 4991 /* 4992 * Now here we are going to recycle net_ack for a different use... 4993 * HEADS UP. 4994 */ 4995 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4996 net->net_ack = 0; 4997 } 4998 4999 /* 5000 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5001 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5002 * automatically ensure that. 5003 */ 5004 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) { 5005 this_sack_lowest_newack = cum_ack; 5006 } 5007 if (num_seg > 0) { 5008 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5009 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5010 } 5011 /*********************************************/ 5012 /* Here we perform PR-SCTP procedures */ 5013 /* (section 4.2) */ 5014 /*********************************************/ 5015 /* C1. update advancedPeerAckPoint */ 5016 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 5017 asoc->advanced_peer_ack_point = cum_ack; 5018 } 5019 /* C2. try to further move advancedPeerAckPoint ahead */ 5020 5021 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 5022 struct sctp_tmit_chunk *lchk; 5023 5024 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5025 /* C3. See if we need to send a Fwd-TSN */ 5026 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 5027 MAX_TSN)) { 5028 /* 5029 * ISSUE with ECN, see FWD-TSN processing for notes 5030 * on issues that will occur when the ECN NONCE 5031 * stuff is put into SCTP for cross checking. 5032 */ 5033 send_forward_tsn(stcb, asoc); 5034 5035 /* 5036 * ECN Nonce: Disable Nonce Sum check when FWD TSN 5037 * is sent and store resync tsn 5038 */ 5039 asoc->nonce_sum_check = 0; 5040 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 5041 if (lchk) { 5042 /* Assure a timer is up */ 5043 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5044 stcb->sctp_ep, stcb, lchk->whoTo); 5045 } 5046 } 5047 } 5048 /* 5049 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) && 5050 * (net->fast_retran_loss_recovery == 0))) 5051 */ 5052 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5053 if ((asoc->fast_retran_loss_recovery == 0) || (sctp_cmt_on_off == 1)) { 5054 /* out of a RFC2582 Fast recovery window? */ 5055 if (net->net_ack > 0) { 5056 /* 5057 * per section 7.2.3, are there any 5058 * destinations that had a fast retransmit 5059 * to them. If so what we need to do is 5060 * adjust ssthresh and cwnd. 5061 */ 5062 struct sctp_tmit_chunk *lchk; 5063 5064 #ifdef SCTP_HIGH_SPEED 5065 sctp_hs_cwnd_decrease(stcb, net); 5066 #else 5067 #ifdef SCTP_CWND_MONITOR 5068 int old_cwnd = net->cwnd; 5069 5070 #endif 5071 net->ssthresh = net->cwnd / 2; 5072 if (net->ssthresh < (net->mtu * 2)) { 5073 net->ssthresh = 2 * net->mtu; 5074 } 5075 net->cwnd = net->ssthresh; 5076 #ifdef SCTP_CWND_MONITOR 5077 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 5078 SCTP_CWND_LOG_FROM_FR); 5079 #endif 5080 #endif 5081 5082 lchk = TAILQ_FIRST(&asoc->send_queue); 5083 5084 net->partial_bytes_acked = 0; 5085 /* Turn on fast recovery window */ 5086 asoc->fast_retran_loss_recovery = 1; 5087 if (lchk == NULL) { 5088 /* Mark end of the window */ 5089 asoc->fast_recovery_tsn = asoc->sending_seq - 1; 5090 } else { 5091 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5092 } 5093 5094 /* 5095 * CMT fast recovery -- per destination 5096 * recovery variable. 5097 */ 5098 net->fast_retran_loss_recovery = 1; 5099 5100 if (lchk == NULL) { 5101 /* Mark end of the window */ 5102 net->fast_recovery_tsn = asoc->sending_seq - 1; 5103 } else { 5104 net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 5105 } 5106 5107 5108 5109 /* 5110 * Disable Nonce Sum Checking and store the 5111 * resync tsn 5112 */ 5113 asoc->nonce_sum_check = 0; 5114 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 5115 5116 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 5117 stcb->sctp_ep, stcb, net); 5118 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5119 stcb->sctp_ep, stcb, net); 5120 } 5121 } else if (net->net_ack > 0) { 5122 /* 5123 * Mark a peg that we WOULD have done a cwnd 5124 * reduction but RFC2582 prevented this action. 5125 */ 5126 SCTP_STAT_INCR(sctps_fastretransinrtt); 5127 } 5128 } 5129 5130 5131 /****************************************************************** 5132 * Here we do the stuff with ECN Nonce checking. 5133 * We basically check to see if the nonce sum flag was incorrect 5134 * or if resynchronization needs to be done. Also if we catch a 5135 * misbehaving receiver we give him the kick. 5136 ******************************************************************/ 5137 5138 if (asoc->ecn_nonce_allowed) { 5139 if (asoc->nonce_sum_check) { 5140 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 5141 if (asoc->nonce_wait_for_ecne == 0) { 5142 struct sctp_tmit_chunk *lchk; 5143 5144 lchk = TAILQ_FIRST(&asoc->send_queue); 5145 asoc->nonce_wait_for_ecne = 1; 5146 if (lchk) { 5147 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 5148 } else { 5149 asoc->nonce_wait_tsn = asoc->sending_seq; 5150 } 5151 } else { 5152 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 5153 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 5154 /* 5155 * Misbehaving peer. We need 5156 * to react to this guy 5157 */ 5158 asoc->ecn_allowed = 0; 5159 asoc->ecn_nonce_allowed = 0; 5160 } 5161 } 5162 } 5163 } else { 5164 /* See if Resynchronization Possible */ 5165 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 5166 asoc->nonce_sum_check = 1; 5167 /* 5168 * now we must calculate what the base is. 5169 * We do this based on two things, we know 5170 * the total's for all the segments 5171 * gap-acked in the SACK, its stored in 5172 * ecn_seg_sums. We also know the SACK's 5173 * nonce sum, its in nonce_sum_flag. So we 5174 * can build a truth table to back-calculate 5175 * the new value of 5176 * asoc->nonce_sum_expect_base: 5177 * 5178 * SACK-flag-Value Seg-Sums Base 0 0 0 5179 * 1 0 1 0 1 1 1 5180 * 1 0 5181 */ 5182 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 5183 } 5184 } 5185 } 5186 /* Now are we exiting loss recovery ? */ 5187 if (will_exit_fast_recovery) { 5188 /* Ok, we must exit fast recovery */ 5189 asoc->fast_retran_loss_recovery = 0; 5190 } 5191 if ((asoc->sat_t3_loss_recovery) && 5192 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 5193 MAX_TSN) || 5194 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 5195 /* end satellite t3 loss recovery */ 5196 asoc->sat_t3_loss_recovery = 0; 5197 } 5198 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5199 if (net->will_exit_fast_recovery) { 5200 /* Ok, we must exit fast recovery */ 5201 net->fast_retran_loss_recovery = 0; 5202 } 5203 } 5204 5205 /* Adjust and set the new rwnd value */ 5206 #ifdef SCTP_LOG_RWND 5207 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5208 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 5209 #endif 5210 5211 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5212 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 5213 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5214 /* SWS sender side engages */ 5215 asoc->peers_rwnd = 0; 5216 } 5217 /* 5218 * Now we must setup so we have a timer up for anyone with 5219 * outstanding data. 5220 */ 5221 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5222 if (net->flight_size) { 5223 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5224 stcb->sctp_ep, stcb, net); 5225 } 5226 } 5227 #ifdef SCTP_SACK_RWND_LOGGING 5228 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5229 a_rwnd, 5230 stcb->asoc.peers_rwnd, 5231 stcb->asoc.total_flight, 5232 stcb->asoc.total_output_queue_size); 5233 5234 #endif 5235 5236 } 5237 5238 void 5239 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5240 struct sctp_nets *netp, int *abort_flag) 5241 { 5242 /* Copy cum-ack */ 5243 uint32_t cum_ack, a_rwnd; 5244 5245 cum_ack = ntohl(cp->cumulative_tsn_ack); 5246 /* Arrange so a_rwnd does NOT change */ 5247 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5248 5249 /* Now call the express sack handling */ 5250 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5251 } 5252 5253 static void 5254 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5255 struct sctp_stream_in *strmin) 5256 { 5257 struct sctp_queued_to_read *ctl, *nctl; 5258 struct sctp_association *asoc; 5259 int tt; 5260 5261 asoc = &stcb->asoc; 5262 tt = strmin->last_sequence_delivered; 5263 /* 5264 * First deliver anything prior to and including the stream no that 5265 * came in 5266 */ 5267 ctl = TAILQ_FIRST(&strmin->inqueue); 5268 while (ctl) { 5269 nctl = TAILQ_NEXT(ctl, next); 5270 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5271 (tt == ctl->sinfo_ssn)) { 5272 /* this is deliverable now */ 5273 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5274 /* subtract pending on streams */ 5275 asoc->size_on_all_streams -= ctl->length; 5276 sctp_ucount_decr(asoc->cnt_on_all_streams); 5277 /* deliver it to at least the delivery-q */ 5278 if (stcb->sctp_socket) { 5279 sctp_add_to_readq(stcb->sctp_ep, stcb, 5280 ctl, 5281 &stcb->sctp_socket->so_rcv, 1); 5282 } 5283 } else { 5284 /* no more delivery now. */ 5285 break; 5286 } 5287 ctl = nctl; 5288 } 5289 /* 5290 * now we must deliver things in queue the normal way if any are 5291 * now ready. 5292 */ 5293 tt = strmin->last_sequence_delivered + 1; 5294 ctl = TAILQ_FIRST(&strmin->inqueue); 5295 while (ctl) { 5296 nctl = TAILQ_NEXT(ctl, next); 5297 if (tt == ctl->sinfo_ssn) { 5298 /* this is deliverable now */ 5299 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5300 /* subtract pending on streams */ 5301 asoc->size_on_all_streams -= ctl->length; 5302 sctp_ucount_decr(asoc->cnt_on_all_streams); 5303 /* deliver it to at least the delivery-q */ 5304 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5305 if (stcb->sctp_socket) { 5306 sctp_add_to_readq(stcb->sctp_ep, stcb, 5307 ctl, 5308 &stcb->sctp_socket->so_rcv, 1); 5309 } 5310 tt = strmin->last_sequence_delivered + 1; 5311 } else { 5312 break; 5313 } 5314 ctl = nctl; 5315 } 5316 } 5317 5318 void 5319 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5320 struct sctp_forward_tsn_chunk *fwd, int *abort_flag) 5321 { 5322 /* 5323 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5324 * forward TSN, when the SACK comes back that acknowledges the 5325 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5326 * get quite tricky since we may have sent more data interveneing 5327 * and must carefully account for what the SACK says on the nonce 5328 * and any gaps that are reported. This work will NOT be done here, 5329 * but I note it here since it is really related to PR-SCTP and 5330 * FWD-TSN's 5331 */ 5332 5333 /* The pr-sctp fwd tsn */ 5334 /* 5335 * here we will perform all the data receiver side steps for 5336 * processing FwdTSN, as required in by pr-sctp draft: 5337 * 5338 * Assume we get FwdTSN(x): 5339 * 5340 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5341 * others we have 3) examine and update re-ordering queue on 5342 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5343 * report where we are. 5344 */ 5345 struct sctp_strseq *stseq; 5346 struct sctp_association *asoc; 5347 uint32_t new_cum_tsn, gap, back_out_htsn; 5348 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5349 struct sctp_stream_in *strm; 5350 struct sctp_tmit_chunk *chk, *at; 5351 5352 cumack_set_flag = 0; 5353 asoc = &stcb->asoc; 5354 cnt_gone = 0; 5355 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5356 #ifdef SCTP_DEBUG 5357 if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 5358 printf("Bad size too small/big fwd-tsn\n"); 5359 } 5360 #endif 5361 return; 5362 } 5363 m_size = (stcb->asoc.mapping_array_size << 3); 5364 /*************************************************************/ 5365 /* 1. Here we update local cumTSN and shift the bitmap array */ 5366 /*************************************************************/ 5367 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5368 5369 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5370 asoc->cumulative_tsn == new_cum_tsn) { 5371 /* Already got there ... */ 5372 return; 5373 } 5374 back_out_htsn = asoc->highest_tsn_inside_map; 5375 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5376 MAX_TSN)) { 5377 asoc->highest_tsn_inside_map = new_cum_tsn; 5378 #ifdef SCTP_MAP_LOGGING 5379 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5380 #endif 5381 } 5382 /* 5383 * now we know the new TSN is more advanced, let's find the actual 5384 * gap 5385 */ 5386 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5387 MAX_TSN)) || 5388 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5389 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5390 } else { 5391 /* try to prevent underflow here */ 5392 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5393 } 5394 5395 if (gap > m_size || gap < 0) { 5396 asoc->highest_tsn_inside_map = back_out_htsn; 5397 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5398 /* 5399 * out of range (of single byte chunks in the rwnd I 5400 * give out) too questionable. better to drop it 5401 * silently 5402 */ 5403 return; 5404 } 5405 if (asoc->highest_tsn_inside_map > 5406 asoc->mapping_array_base_tsn) { 5407 gap = asoc->highest_tsn_inside_map - 5408 asoc->mapping_array_base_tsn; 5409 } else { 5410 gap = asoc->highest_tsn_inside_map + 5411 (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5412 } 5413 cumack_set_flag = 1; 5414 } 5415 for (i = 0; i <= gap; i++) { 5416 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5417 } 5418 /* 5419 * Now after marking all, slide thing forward but no sack please. 5420 */ 5421 sctp_sack_check(stcb, 0, 0, abort_flag); 5422 if (*abort_flag) 5423 return; 5424 5425 if (cumack_set_flag) { 5426 /* 5427 * fwd-tsn went outside my gap array - not a common 5428 * occurance. Do the same thing we do when a cookie-echo 5429 * arrives. 5430 */ 5431 asoc->highest_tsn_inside_map = new_cum_tsn - 1; 5432 asoc->mapping_array_base_tsn = new_cum_tsn; 5433 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 5434 #ifdef SCTP_MAP_LOGGING 5435 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5436 #endif 5437 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5438 } 5439 /*************************************************************/ 5440 /* 2. Clear up re-assembly queue */ 5441 /*************************************************************/ 5442 5443 /* 5444 * First service it if pd-api is up, just in case we can progress it 5445 * forward 5446 */ 5447 if (asoc->fragmented_delivery_inprogress) { 5448 sctp_service_reassembly(stcb, asoc); 5449 } 5450 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5451 /* For each one on here see if we need to toss it */ 5452 /* 5453 * For now large messages held on the reasmqueue that are 5454 * complete will be tossed too. We could in theory do more 5455 * work to spin through and stop after dumping one msg aka 5456 * seeing the start of a new msg at the head, and call the 5457 * delivery function... to see if it can be delivered... But 5458 * for now we just dump everything on the queue. 5459 */ 5460 chk = TAILQ_FIRST(&asoc->reasmqueue); 5461 while (chk) { 5462 at = TAILQ_NEXT(chk, sctp_next); 5463 if (compare_with_wrap(asoc->cumulative_tsn, 5464 chk->rec.data.TSN_seq, MAX_TSN) || 5465 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5466 /* It needs to be tossed */ 5467 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5468 if (compare_with_wrap(chk->rec.data.TSN_seq, 5469 asoc->tsn_last_delivered, MAX_TSN)) { 5470 asoc->tsn_last_delivered = 5471 chk->rec.data.TSN_seq; 5472 asoc->str_of_pdapi = 5473 chk->rec.data.stream_number; 5474 asoc->ssn_of_pdapi = 5475 chk->rec.data.stream_seq; 5476 asoc->fragment_flags = 5477 chk->rec.data.rcv_flags; 5478 } 5479 asoc->size_on_reasm_queue -= chk->send_size; 5480 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5481 cnt_gone++; 5482 5483 /* Clear up any stream problem */ 5484 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5485 SCTP_DATA_UNORDERED && 5486 (compare_with_wrap(chk->rec.data.stream_seq, 5487 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5488 MAX_SEQ))) { 5489 /* 5490 * We must dump forward this streams 5491 * sequence number if the chunk is 5492 * not unordered that is being 5493 * skipped. There is a chance that 5494 * if the peer does not include the 5495 * last fragment in its FWD-TSN we 5496 * WILL have a problem here since 5497 * you would have a partial chunk in 5498 * queue that may not be 5499 * deliverable. Also if a Partial 5500 * delivery API as started the user 5501 * may get a partial chunk. The next 5502 * read returning a new chunk... 5503 * really ugly but I see no way 5504 * around it! Maybe a notify?? 5505 */ 5506 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5507 chk->rec.data.stream_seq; 5508 } 5509 if (chk->data) { 5510 sctp_m_freem(chk->data); 5511 chk->data = NULL; 5512 } 5513 sctp_free_remote_addr(chk->whoTo); 5514 sctp_free_a_chunk(stcb, chk); 5515 } else { 5516 /* 5517 * Ok we have gone beyond the end of the 5518 * fwd-tsn's mark. Some checks... 5519 */ 5520 if ((asoc->fragmented_delivery_inprogress) && 5521 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5522 /* 5523 * Special case PD-API is up and 5524 * what we fwd-tsn' over includes 5525 * one that had the LAST_FRAG. We no 5526 * longer need to do the PD-API. 5527 */ 5528 asoc->fragmented_delivery_inprogress = 0; 5529 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5530 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 5531 5532 } 5533 break; 5534 } 5535 chk = at; 5536 } 5537 } 5538 if (asoc->fragmented_delivery_inprogress) { 5539 /* 5540 * Ok we removed cnt_gone chunks in the PD-API queue that 5541 * were being delivered. So now we must turn off the flag. 5542 */ 5543 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5544 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 5545 asoc->fragmented_delivery_inprogress = 0; 5546 } 5547 /*************************************************************/ 5548 /* 3. Update the PR-stream re-ordering queues */ 5549 /*************************************************************/ 5550 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd)); 5551 fwd_sz -= sizeof(*fwd); 5552 { 5553 /* New method. */ 5554 int num_str, i; 5555 5556 num_str = fwd_sz / sizeof(struct sctp_strseq); 5557 for (i = 0; i < num_str; i++) { 5558 uint16_t st; 5559 unsigned char *xx; 5560 5561 /* Convert */ 5562 xx = (unsigned char *)&stseq[i]; 5563 st = ntohs(stseq[i].stream); 5564 stseq[i].stream = st; 5565 st = ntohs(stseq[i].sequence); 5566 stseq[i].sequence = st; 5567 /* now process */ 5568 if (stseq[i].stream > asoc->streamincnt) { 5569 /* 5570 * It is arguable if we should continue. 5571 * Since the peer sent bogus stream info we 5572 * may be in deep trouble.. a return may be 5573 * a better choice? 5574 */ 5575 continue; 5576 } 5577 strm = &asoc->strmin[stseq[i].stream]; 5578 if (compare_with_wrap(stseq[i].sequence, 5579 strm->last_sequence_delivered, MAX_SEQ)) { 5580 /* Update the sequence number */ 5581 strm->last_sequence_delivered = 5582 stseq[i].sequence; 5583 } 5584 /* now kick the stream the new way */ 5585 sctp_kick_prsctp_reorder_queue(stcb, strm); 5586 } 5587 } 5588 } 5589