1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 48 49 /* 50 * NOTES: On the outbound side of things I need to check the sack timer to 51 * see if I should generate a sack into the chunk queue (if I have data to 52 * send that is and will be sending it .. for bundling. 53 * 54 * The callback in sctp_usrreq.c will get called when the socket is read from. 55 * This will cause sctp_service_queues() to get called on the top entry in 56 * the list. 57 */ 58 59 void 60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 61 { 62 uint32_t calc, calc_w_oh; 63 64 /* 65 * This is really set wrong with respect to a 1-2-m socket. Since 66 * the sb_cc is the count that everyone as put up. When we re-write 67 * sctp_soreceive then we will fix this so that ONLY this 68 * associations data is taken into account. 69 */ 70 if (stcb->sctp_socket == NULL) 71 return; 72 73 if (stcb->asoc.sb_cc == 0 && 74 asoc->size_on_reasm_queue == 0 && 75 asoc->size_on_all_streams == 0) { 76 /* Full rwnd granted */ 77 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 78 SCTP_MINIMAL_RWND); 79 return; 80 } 81 /* get actual space */ 82 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 83 84 /* 85 * take out what has NOT been put on socket queue and we yet hold 86 * for putting up. 87 */ 88 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 89 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 90 91 if (calc == 0) { 92 /* out of space */ 93 asoc->my_rwnd = 0; 94 return; 95 } 96 /* what is the overhead of all these rwnd's */ 97 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 98 asoc->my_rwnd = calc; 99 if (calc_w_oh == 0) { 100 /* 101 * If our overhead is greater than the advertised rwnd, we 102 * clamp the rwnd to 1. This lets us still accept inbound 103 * segments, but hopefully will shut the sender down when he 104 * finally gets the message. 105 */ 106 asoc->my_rwnd = 1; 107 } else { 108 /* SWS threshold */ 109 if (asoc->my_rwnd && 110 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 111 /* SWS engaged, tell peer none left */ 112 asoc->my_rwnd = 1; 113 } 114 } 115 } 116 117 /* Calculate what the rwnd would be */ 118 119 uint32_t 120 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 121 { 122 uint32_t calc = 0, calc_w_oh; 123 124 /* 125 * This is really set wrong with respect to a 1-2-m socket. Since 126 * the sb_cc is the count that everyone as put up. When we re-write 127 * sctp_soreceive then we will fix this so that ONLY this 128 * associations data is taken into account. 129 */ 130 if (stcb->sctp_socket == NULL) 131 return (calc); 132 133 if (stcb->asoc.sb_cc == 0 && 134 asoc->size_on_reasm_queue == 0 && 135 asoc->size_on_all_streams == 0) { 136 /* Full rwnd granted */ 137 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 138 SCTP_MINIMAL_RWND); 139 return (calc); 140 } 141 /* get actual space */ 142 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 143 144 /* 145 * take out what has NOT been put on socket queue and we yet hold 146 * for putting up. 147 */ 148 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 149 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 150 151 if (calc == 0) { 152 /* out of space */ 153 return (calc); 154 } 155 /* what is the overhead of all these rwnd's */ 156 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 157 if (calc_w_oh == 0) { 158 /* 159 * If our overhead is greater than the advertised rwnd, we 160 * clamp the rwnd to 1. This lets us still accept inbound 161 * segments, but hopefully will shut the sender down when he 162 * finally gets the message. 163 */ 164 calc = 1; 165 } else { 166 /* SWS threshold */ 167 if (calc && 168 (calc < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 169 /* SWS engaged, tell peer none left */ 170 calc = 1; 171 } 172 } 173 return (calc); 174 } 175 176 177 178 /* 179 * Build out our readq entry based on the incoming packet. 180 */ 181 struct sctp_queued_to_read * 182 sctp_build_readq_entry(struct sctp_tcb *stcb, 183 struct sctp_nets *net, 184 uint32_t tsn, uint32_t ppid, 185 uint32_t context, uint16_t stream_no, 186 uint16_t stream_seq, uint8_t flags, 187 struct mbuf *dm) 188 { 189 struct sctp_queued_to_read *read_queue_e = NULL; 190 191 sctp_alloc_a_readq(stcb, read_queue_e); 192 if (read_queue_e == NULL) { 193 goto failed_build; 194 } 195 read_queue_e->sinfo_stream = stream_no; 196 read_queue_e->sinfo_ssn = stream_seq; 197 read_queue_e->sinfo_flags = (flags << 8); 198 read_queue_e->sinfo_ppid = ppid; 199 read_queue_e->sinfo_context = stcb->asoc.context; 200 read_queue_e->sinfo_timetolive = 0; 201 read_queue_e->sinfo_tsn = tsn; 202 read_queue_e->sinfo_cumtsn = tsn; 203 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 204 read_queue_e->whoFrom = net; 205 read_queue_e->length = 0; 206 atomic_add_int(&net->ref_count, 1); 207 read_queue_e->data = dm; 208 read_queue_e->spec_flags = 0; 209 read_queue_e->tail_mbuf = NULL; 210 read_queue_e->aux_data = NULL; 211 read_queue_e->stcb = stcb; 212 read_queue_e->port_from = stcb->rport; 213 read_queue_e->do_not_ref_stcb = 0; 214 read_queue_e->end_added = 0; 215 read_queue_e->some_taken = 0; 216 read_queue_e->pdapi_aborted = 0; 217 failed_build: 218 return (read_queue_e); 219 } 220 221 222 /* 223 * Build out our readq entry based on the incoming packet. 224 */ 225 static struct sctp_queued_to_read * 226 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 227 struct sctp_tmit_chunk *chk) 228 { 229 struct sctp_queued_to_read *read_queue_e = NULL; 230 231 sctp_alloc_a_readq(stcb, read_queue_e); 232 if (read_queue_e == NULL) { 233 goto failed_build; 234 } 235 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 236 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 237 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 238 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 239 read_queue_e->sinfo_context = stcb->asoc.context; 240 read_queue_e->sinfo_timetolive = 0; 241 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 242 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 243 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 244 read_queue_e->whoFrom = chk->whoTo; 245 read_queue_e->aux_data = NULL; 246 read_queue_e->length = 0; 247 atomic_add_int(&chk->whoTo->ref_count, 1); 248 read_queue_e->data = chk->data; 249 read_queue_e->tail_mbuf = NULL; 250 read_queue_e->stcb = stcb; 251 read_queue_e->port_from = stcb->rport; 252 read_queue_e->spec_flags = 0; 253 read_queue_e->do_not_ref_stcb = 0; 254 read_queue_e->end_added = 0; 255 read_queue_e->some_taken = 0; 256 read_queue_e->pdapi_aborted = 0; 257 failed_build: 258 return (read_queue_e); 259 } 260 261 262 struct mbuf * 263 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 264 struct sctp_sndrcvinfo *sinfo) 265 { 266 struct sctp_sndrcvinfo *outinfo; 267 struct cmsghdr *cmh; 268 struct mbuf *ret; 269 int len; 270 int use_extended = 0; 271 272 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 273 /* user does not want the sndrcv ctl */ 274 return (NULL); 275 } 276 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 277 use_extended = 1; 278 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 279 } else { 280 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 281 } 282 283 284 ret = sctp_get_mbuf_for_msg(len, 285 0, M_DONTWAIT, 1, MT_DATA); 286 287 if (ret == NULL) { 288 /* No space */ 289 return (ret); 290 } 291 /* We need a CMSG header followed by the struct */ 292 cmh = mtod(ret, struct cmsghdr *); 293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 294 cmh->cmsg_level = IPPROTO_SCTP; 295 if (use_extended) { 296 cmh->cmsg_type = SCTP_EXTRCV; 297 cmh->cmsg_len = len; 298 memcpy(outinfo, sinfo, len); 299 } else { 300 cmh->cmsg_type = SCTP_SNDRCV; 301 cmh->cmsg_len = len; 302 *outinfo = *sinfo; 303 } 304 SCTP_BUF_LEN(ret) = cmh->cmsg_len; 305 return (ret); 306 } 307 308 309 char * 310 sctp_build_ctl_cchunk(struct sctp_inpcb *inp, 311 int *control_len, 312 struct sctp_sndrcvinfo *sinfo) 313 { 314 struct sctp_sndrcvinfo *outinfo; 315 struct cmsghdr *cmh; 316 char *buf; 317 int len; 318 int use_extended = 0; 319 320 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 321 /* user does not want the sndrcv ctl */ 322 return (NULL); 323 } 324 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 325 use_extended = 1; 326 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 327 } else { 328 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 329 } 330 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG); 331 if (buf == NULL) { 332 /* No space */ 333 return (buf); 334 } 335 /* We need a CMSG header followed by the struct */ 336 cmh = (struct cmsghdr *)buf; 337 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 338 cmh->cmsg_level = IPPROTO_SCTP; 339 if (use_extended) { 340 cmh->cmsg_type = SCTP_EXTRCV; 341 cmh->cmsg_len = len; 342 memcpy(outinfo, sinfo, len); 343 } else { 344 cmh->cmsg_type = SCTP_SNDRCV; 345 cmh->cmsg_len = len; 346 *outinfo = *sinfo; 347 } 348 *control_len = len; 349 return (buf); 350 } 351 352 353 /* 354 * We are delivering currently from the reassembly queue. We must continue to 355 * deliver until we either: 1) run out of space. 2) run out of sequential 356 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 357 */ 358 static void 359 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 360 { 361 struct sctp_tmit_chunk *chk; 362 uint16_t nxt_todel; 363 uint16_t stream_no; 364 int end = 0; 365 int cntDel; 366 struct sctp_queued_to_read *control, *ctl, *ctlat; 367 368 if (stcb == NULL) 369 return; 370 371 cntDel = stream_no = 0; 372 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 373 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 374 /* socket above is long gone */ 375 asoc->fragmented_delivery_inprogress = 0; 376 chk = TAILQ_FIRST(&asoc->reasmqueue); 377 while (chk) { 378 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 379 asoc->size_on_reasm_queue -= chk->send_size; 380 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 381 /* 382 * Lose the data pointer, since its in the socket 383 * buffer 384 */ 385 if (chk->data) { 386 sctp_m_freem(chk->data); 387 chk->data = NULL; 388 } 389 /* Now free the address and data */ 390 sctp_free_a_chunk(stcb, chk); 391 /* sa_ignore FREED_MEMORY */ 392 chk = TAILQ_FIRST(&asoc->reasmqueue); 393 } 394 return; 395 } 396 SCTP_TCB_LOCK_ASSERT(stcb); 397 do { 398 chk = TAILQ_FIRST(&asoc->reasmqueue); 399 if (chk == NULL) { 400 return; 401 } 402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 403 /* Can't deliver more :< */ 404 return; 405 } 406 stream_no = chk->rec.data.stream_number; 407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 408 if (nxt_todel != chk->rec.data.stream_seq && 409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 410 /* 411 * Not the next sequence to deliver in its stream OR 412 * unordered 413 */ 414 return; 415 } 416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 417 418 control = sctp_build_readq_entry_chk(stcb, chk); 419 if (control == NULL) { 420 /* out of memory? */ 421 return; 422 } 423 /* save it off for our future deliveries */ 424 stcb->asoc.control_pdapi = control; 425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 426 end = 1; 427 else 428 end = 0; 429 sctp_add_to_readq(stcb->sctp_ep, 430 stcb, control, &stcb->sctp_socket->so_rcv, end); 431 cntDel++; 432 } else { 433 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 434 end = 1; 435 else 436 end = 0; 437 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 438 stcb->asoc.control_pdapi, 439 chk->data, end, chk->rec.data.TSN_seq, 440 &stcb->sctp_socket->so_rcv)) { 441 /* 442 * something is very wrong, either 443 * control_pdapi is NULL, or the tail_mbuf 444 * is corrupt, or there is a EOM already on 445 * the mbuf chain. 446 */ 447 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 448 panic("This should not happen control_pdapi NULL?"); 449 } 450 /* if we did not panic, it was a EOM */ 451 panic("Bad chunking ??"); 452 return; 453 } 454 cntDel++; 455 } 456 /* pull it we did it */ 457 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 458 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 459 asoc->fragmented_delivery_inprogress = 0; 460 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 461 asoc->strmin[stream_no].last_sequence_delivered++; 462 } 463 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 464 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 465 } 466 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 467 /* 468 * turn the flag back on since we just delivered 469 * yet another one. 470 */ 471 asoc->fragmented_delivery_inprogress = 1; 472 } 473 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 474 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 475 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 476 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 477 478 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 479 asoc->size_on_reasm_queue -= chk->send_size; 480 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 481 /* free up the chk */ 482 chk->data = NULL; 483 sctp_free_a_chunk(stcb, chk); 484 485 if (asoc->fragmented_delivery_inprogress == 0) { 486 /* 487 * Now lets see if we can deliver the next one on 488 * the stream 489 */ 490 struct sctp_stream_in *strm; 491 492 strm = &asoc->strmin[stream_no]; 493 nxt_todel = strm->last_sequence_delivered + 1; 494 ctl = TAILQ_FIRST(&strm->inqueue); 495 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 496 while (ctl != NULL) { 497 /* Deliver more if we can. */ 498 if (nxt_todel == ctl->sinfo_ssn) { 499 ctlat = TAILQ_NEXT(ctl, next); 500 TAILQ_REMOVE(&strm->inqueue, ctl, next); 501 asoc->size_on_all_streams -= ctl->length; 502 sctp_ucount_decr(asoc->cnt_on_all_streams); 503 strm->last_sequence_delivered++; 504 sctp_add_to_readq(stcb->sctp_ep, stcb, 505 ctl, 506 &stcb->sctp_socket->so_rcv, 1); 507 ctl = ctlat; 508 } else { 509 break; 510 } 511 nxt_todel = strm->last_sequence_delivered + 1; 512 } 513 } 514 break; 515 } 516 /* sa_ignore FREED_MEMORY */ 517 chk = TAILQ_FIRST(&asoc->reasmqueue); 518 } while (chk); 519 } 520 521 /* 522 * Queue the chunk either right into the socket buffer if it is the next one 523 * to go OR put it in the correct place in the delivery queue. If we do 524 * append to the so_buf, keep doing so until we are out of order. One big 525 * question still remains, what to do when the socket buffer is FULL?? 526 */ 527 static void 528 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 529 struct sctp_queued_to_read *control, int *abort_flag) 530 { 531 /* 532 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 533 * all the data in one stream this could happen quite rapidly. One 534 * could use the TSN to keep track of things, but this scheme breaks 535 * down in the other type of stream useage that could occur. Send a 536 * single msg to stream 0, send 4Billion messages to stream 1, now 537 * send a message to stream 0. You have a situation where the TSN 538 * has wrapped but not in the stream. Is this worth worrying about 539 * or should we just change our queue sort at the bottom to be by 540 * TSN. 541 * 542 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 543 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 544 * assignment this could happen... and I don't see how this would be 545 * a violation. So for now I am undecided an will leave the sort by 546 * SSN alone. Maybe a hybred approach is the answer 547 * 548 */ 549 struct sctp_stream_in *strm; 550 struct sctp_queued_to_read *at; 551 int queue_needed; 552 uint16_t nxt_todel; 553 struct mbuf *oper; 554 555 queue_needed = 1; 556 asoc->size_on_all_streams += control->length; 557 sctp_ucount_incr(asoc->cnt_on_all_streams); 558 strm = &asoc->strmin[control->sinfo_stream]; 559 nxt_todel = strm->last_sequence_delivered + 1; 560 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 561 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 562 } 563 SCTPDBG(SCTP_DEBUG_INDATA1, 564 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 565 (uint32_t) control->sinfo_stream, 566 (uint32_t) strm->last_sequence_delivered, 567 (uint32_t) nxt_todel); 568 if (compare_with_wrap(strm->last_sequence_delivered, 569 control->sinfo_ssn, MAX_SEQ) || 570 (strm->last_sequence_delivered == control->sinfo_ssn)) { 571 /* The incoming sseq is behind where we last delivered? */ 572 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 573 control->sinfo_ssn, strm->last_sequence_delivered); 574 /* 575 * throw it in the stream so it gets cleaned up in 576 * association destruction 577 */ 578 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 579 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 580 0, M_DONTWAIT, 1, MT_DATA); 581 if (oper) { 582 struct sctp_paramhdr *ph; 583 uint32_t *ippp; 584 585 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 586 (sizeof(uint32_t) * 3); 587 ph = mtod(oper, struct sctp_paramhdr *); 588 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 589 ph->param_length = htons(SCTP_BUF_LEN(oper)); 590 ippp = (uint32_t *) (ph + 1); 591 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 592 ippp++; 593 *ippp = control->sinfo_tsn; 594 ippp++; 595 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 596 } 597 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 598 sctp_abort_an_association(stcb->sctp_ep, stcb, 599 SCTP_PEER_FAULTY, oper); 600 601 *abort_flag = 1; 602 return; 603 604 } 605 if (nxt_todel == control->sinfo_ssn) { 606 /* can be delivered right away? */ 607 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 608 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 609 } 610 queue_needed = 0; 611 asoc->size_on_all_streams -= control->length; 612 sctp_ucount_decr(asoc->cnt_on_all_streams); 613 strm->last_sequence_delivered++; 614 sctp_add_to_readq(stcb->sctp_ep, stcb, 615 control, 616 &stcb->sctp_socket->so_rcv, 1); 617 control = TAILQ_FIRST(&strm->inqueue); 618 while (control != NULL) { 619 /* all delivered */ 620 nxt_todel = strm->last_sequence_delivered + 1; 621 if (nxt_todel == control->sinfo_ssn) { 622 at = TAILQ_NEXT(control, next); 623 TAILQ_REMOVE(&strm->inqueue, control, next); 624 asoc->size_on_all_streams -= control->length; 625 sctp_ucount_decr(asoc->cnt_on_all_streams); 626 strm->last_sequence_delivered++; 627 /* 628 * We ignore the return of deliver_data here 629 * since we always can hold the chunk on the 630 * d-queue. And we have a finite number that 631 * can be delivered from the strq. 632 */ 633 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 634 sctp_log_strm_del(control, NULL, 635 SCTP_STR_LOG_FROM_IMMED_DEL); 636 } 637 sctp_add_to_readq(stcb->sctp_ep, stcb, 638 control, 639 &stcb->sctp_socket->so_rcv, 1); 640 control = at; 641 continue; 642 } 643 break; 644 } 645 } 646 if (queue_needed) { 647 /* 648 * Ok, we did not deliver this guy, find the correct place 649 * to put it on the queue. 650 */ 651 if (TAILQ_EMPTY(&strm->inqueue)) { 652 /* Empty queue */ 653 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 654 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 655 } 656 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 657 } else { 658 TAILQ_FOREACH(at, &strm->inqueue, next) { 659 if (compare_with_wrap(at->sinfo_ssn, 660 control->sinfo_ssn, MAX_SEQ)) { 661 /* 662 * one in queue is bigger than the 663 * new one, insert before this one 664 */ 665 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 666 sctp_log_strm_del(control, at, 667 SCTP_STR_LOG_FROM_INSERT_MD); 668 } 669 TAILQ_INSERT_BEFORE(at, control, next); 670 break; 671 } else if (at->sinfo_ssn == control->sinfo_ssn) { 672 /* 673 * Gak, He sent me a duplicate str 674 * seq number 675 */ 676 /* 677 * foo bar, I guess I will just free 678 * this new guy, should we abort 679 * too? FIX ME MAYBE? Or it COULD be 680 * that the SSN's have wrapped. 681 * Maybe I should compare to TSN 682 * somehow... sigh for now just blow 683 * away the chunk! 684 */ 685 686 if (control->data) 687 sctp_m_freem(control->data); 688 control->data = NULL; 689 asoc->size_on_all_streams -= control->length; 690 sctp_ucount_decr(asoc->cnt_on_all_streams); 691 if (control->whoFrom) 692 sctp_free_remote_addr(control->whoFrom); 693 control->whoFrom = NULL; 694 sctp_free_a_readq(stcb, control); 695 return; 696 } else { 697 if (TAILQ_NEXT(at, next) == NULL) { 698 /* 699 * We are at the end, insert 700 * it after this one 701 */ 702 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 703 sctp_log_strm_del(control, at, 704 SCTP_STR_LOG_FROM_INSERT_TL); 705 } 706 TAILQ_INSERT_AFTER(&strm->inqueue, 707 at, control, next); 708 break; 709 } 710 } 711 } 712 } 713 } 714 } 715 716 /* 717 * Returns two things: You get the total size of the deliverable parts of the 718 * first fragmented message on the reassembly queue. And you get a 1 back if 719 * all of the message is ready or a 0 back if the message is still incomplete 720 */ 721 static int 722 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 723 { 724 struct sctp_tmit_chunk *chk; 725 uint32_t tsn; 726 727 *t_size = 0; 728 chk = TAILQ_FIRST(&asoc->reasmqueue); 729 if (chk == NULL) { 730 /* nothing on the queue */ 731 return (0); 732 } 733 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 734 /* Not a first on the queue */ 735 return (0); 736 } 737 tsn = chk->rec.data.TSN_seq; 738 while (chk) { 739 if (tsn != chk->rec.data.TSN_seq) { 740 return (0); 741 } 742 *t_size += chk->send_size; 743 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 744 return (1); 745 } 746 tsn++; 747 chk = TAILQ_NEXT(chk, sctp_next); 748 } 749 return (0); 750 } 751 752 static void 753 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 754 { 755 struct sctp_tmit_chunk *chk; 756 uint16_t nxt_todel; 757 uint32_t tsize; 758 759 doit_again: 760 chk = TAILQ_FIRST(&asoc->reasmqueue); 761 if (chk == NULL) { 762 /* Huh? */ 763 asoc->size_on_reasm_queue = 0; 764 asoc->cnt_on_reasm_queue = 0; 765 return; 766 } 767 if (asoc->fragmented_delivery_inprogress == 0) { 768 nxt_todel = 769 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 770 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 771 (nxt_todel == chk->rec.data.stream_seq || 772 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 773 /* 774 * Yep the first one is here and its ok to deliver 775 * but should we? 776 */ 777 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 778 (tsize > stcb->sctp_ep->partial_delivery_point))) { 779 780 /* 781 * Yes, we setup to start reception, by 782 * backing down the TSN just in case we 783 * can't deliver. If we 784 */ 785 asoc->fragmented_delivery_inprogress = 1; 786 asoc->tsn_last_delivered = 787 chk->rec.data.TSN_seq - 1; 788 asoc->str_of_pdapi = 789 chk->rec.data.stream_number; 790 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 791 asoc->pdapi_ppid = chk->rec.data.payloadtype; 792 asoc->fragment_flags = chk->rec.data.rcv_flags; 793 sctp_service_reassembly(stcb, asoc); 794 } 795 } 796 } else { 797 /* 798 * Service re-assembly will deliver stream data queued at 799 * the end of fragmented delivery.. but it wont know to go 800 * back and call itself again... we do that here with the 801 * got doit_again 802 */ 803 sctp_service_reassembly(stcb, asoc); 804 if (asoc->fragmented_delivery_inprogress == 0) { 805 /* 806 * finished our Fragmented delivery, could be more 807 * waiting? 808 */ 809 goto doit_again; 810 } 811 } 812 } 813 814 /* 815 * Dump onto the re-assembly queue, in its proper place. After dumping on the 816 * queue, see if anthing can be delivered. If so pull it off (or as much as 817 * we can. If we run out of space then we must dump what we can and set the 818 * appropriate flag to say we queued what we could. 819 */ 820 static void 821 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 822 struct sctp_tmit_chunk *chk, int *abort_flag) 823 { 824 struct mbuf *oper; 825 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 826 u_char last_flags; 827 struct sctp_tmit_chunk *at, *prev, *next; 828 829 prev = next = NULL; 830 cum_ackp1 = asoc->tsn_last_delivered + 1; 831 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 832 /* This is the first one on the queue */ 833 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 834 /* 835 * we do not check for delivery of anything when only one 836 * fragment is here 837 */ 838 asoc->size_on_reasm_queue = chk->send_size; 839 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 840 if (chk->rec.data.TSN_seq == cum_ackp1) { 841 if (asoc->fragmented_delivery_inprogress == 0 && 842 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 843 SCTP_DATA_FIRST_FRAG) { 844 /* 845 * An empty queue, no delivery inprogress, 846 * we hit the next one and it does NOT have 847 * a FIRST fragment mark. 848 */ 849 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 850 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 851 0, M_DONTWAIT, 1, MT_DATA); 852 853 if (oper) { 854 struct sctp_paramhdr *ph; 855 uint32_t *ippp; 856 857 SCTP_BUF_LEN(oper) = 858 sizeof(struct sctp_paramhdr) + 859 (sizeof(uint32_t) * 3); 860 ph = mtod(oper, struct sctp_paramhdr *); 861 ph->param_type = 862 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 863 ph->param_length = htons(SCTP_BUF_LEN(oper)); 864 ippp = (uint32_t *) (ph + 1); 865 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 866 ippp++; 867 *ippp = chk->rec.data.TSN_seq; 868 ippp++; 869 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 870 871 } 872 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 873 sctp_abort_an_association(stcb->sctp_ep, stcb, 874 SCTP_PEER_FAULTY, oper); 875 *abort_flag = 1; 876 } else if (asoc->fragmented_delivery_inprogress && 877 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 878 /* 879 * We are doing a partial delivery and the 880 * NEXT chunk MUST be either the LAST or 881 * MIDDLE fragment NOT a FIRST 882 */ 883 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 884 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 885 0, M_DONTWAIT, 1, MT_DATA); 886 if (oper) { 887 struct sctp_paramhdr *ph; 888 uint32_t *ippp; 889 890 SCTP_BUF_LEN(oper) = 891 sizeof(struct sctp_paramhdr) + 892 (3 * sizeof(uint32_t)); 893 ph = mtod(oper, struct sctp_paramhdr *); 894 ph->param_type = 895 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 896 ph->param_length = htons(SCTP_BUF_LEN(oper)); 897 ippp = (uint32_t *) (ph + 1); 898 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 899 ippp++; 900 *ippp = chk->rec.data.TSN_seq; 901 ippp++; 902 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 903 } 904 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 905 sctp_abort_an_association(stcb->sctp_ep, stcb, 906 SCTP_PEER_FAULTY, oper); 907 *abort_flag = 1; 908 } else if (asoc->fragmented_delivery_inprogress) { 909 /* 910 * Here we are ok with a MIDDLE or LAST 911 * piece 912 */ 913 if (chk->rec.data.stream_number != 914 asoc->str_of_pdapi) { 915 /* Got to be the right STR No */ 916 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 917 chk->rec.data.stream_number, 918 asoc->str_of_pdapi); 919 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 920 0, M_DONTWAIT, 1, MT_DATA); 921 if (oper) { 922 struct sctp_paramhdr *ph; 923 uint32_t *ippp; 924 925 SCTP_BUF_LEN(oper) = 926 sizeof(struct sctp_paramhdr) + 927 (sizeof(uint32_t) * 3); 928 ph = mtod(oper, 929 struct sctp_paramhdr *); 930 ph->param_type = 931 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 932 ph->param_length = 933 htons(SCTP_BUF_LEN(oper)); 934 ippp = (uint32_t *) (ph + 1); 935 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 936 ippp++; 937 *ippp = chk->rec.data.TSN_seq; 938 ippp++; 939 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 940 } 941 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 942 sctp_abort_an_association(stcb->sctp_ep, 943 stcb, SCTP_PEER_FAULTY, oper); 944 *abort_flag = 1; 945 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 946 SCTP_DATA_UNORDERED && 947 chk->rec.data.stream_seq != 948 asoc->ssn_of_pdapi) { 949 /* Got to be the right STR Seq */ 950 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 951 chk->rec.data.stream_seq, 952 asoc->ssn_of_pdapi); 953 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 954 0, M_DONTWAIT, 1, MT_DATA); 955 if (oper) { 956 struct sctp_paramhdr *ph; 957 uint32_t *ippp; 958 959 SCTP_BUF_LEN(oper) = 960 sizeof(struct sctp_paramhdr) + 961 (3 * sizeof(uint32_t)); 962 ph = mtod(oper, 963 struct sctp_paramhdr *); 964 ph->param_type = 965 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 966 ph->param_length = 967 htons(SCTP_BUF_LEN(oper)); 968 ippp = (uint32_t *) (ph + 1); 969 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 970 ippp++; 971 *ippp = chk->rec.data.TSN_seq; 972 ippp++; 973 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 974 975 } 976 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 977 sctp_abort_an_association(stcb->sctp_ep, 978 stcb, SCTP_PEER_FAULTY, oper); 979 *abort_flag = 1; 980 } 981 } 982 } 983 return; 984 } 985 /* Find its place */ 986 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 987 if (compare_with_wrap(at->rec.data.TSN_seq, 988 chk->rec.data.TSN_seq, MAX_TSN)) { 989 /* 990 * one in queue is bigger than the new one, insert 991 * before this one 992 */ 993 /* A check */ 994 asoc->size_on_reasm_queue += chk->send_size; 995 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 996 next = at; 997 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 998 break; 999 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1000 /* Gak, He sent me a duplicate str seq number */ 1001 /* 1002 * foo bar, I guess I will just free this new guy, 1003 * should we abort too? FIX ME MAYBE? Or it COULD be 1004 * that the SSN's have wrapped. Maybe I should 1005 * compare to TSN somehow... sigh for now just blow 1006 * away the chunk! 1007 */ 1008 if (chk->data) { 1009 sctp_m_freem(chk->data); 1010 chk->data = NULL; 1011 } 1012 sctp_free_a_chunk(stcb, chk); 1013 return; 1014 } else { 1015 last_flags = at->rec.data.rcv_flags; 1016 last_tsn = at->rec.data.TSN_seq; 1017 prev = at; 1018 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1019 /* 1020 * We are at the end, insert it after this 1021 * one 1022 */ 1023 /* check it first */ 1024 asoc->size_on_reasm_queue += chk->send_size; 1025 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1026 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1027 break; 1028 } 1029 } 1030 } 1031 /* Now the audits */ 1032 if (prev) { 1033 prev_tsn = chk->rec.data.TSN_seq - 1; 1034 if (prev_tsn == prev->rec.data.TSN_seq) { 1035 /* 1036 * Ok the one I am dropping onto the end is the 1037 * NEXT. A bit of valdiation here. 1038 */ 1039 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1040 SCTP_DATA_FIRST_FRAG || 1041 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1042 SCTP_DATA_MIDDLE_FRAG) { 1043 /* 1044 * Insert chk MUST be a MIDDLE or LAST 1045 * fragment 1046 */ 1047 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1048 SCTP_DATA_FIRST_FRAG) { 1049 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 1050 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 1051 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1052 0, M_DONTWAIT, 1, MT_DATA); 1053 if (oper) { 1054 struct sctp_paramhdr *ph; 1055 uint32_t *ippp; 1056 1057 SCTP_BUF_LEN(oper) = 1058 sizeof(struct sctp_paramhdr) + 1059 (3 * sizeof(uint32_t)); 1060 ph = mtod(oper, 1061 struct sctp_paramhdr *); 1062 ph->param_type = 1063 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1064 ph->param_length = 1065 htons(SCTP_BUF_LEN(oper)); 1066 ippp = (uint32_t *) (ph + 1); 1067 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1068 ippp++; 1069 *ippp = chk->rec.data.TSN_seq; 1070 ippp++; 1071 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1072 1073 } 1074 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1075 sctp_abort_an_association(stcb->sctp_ep, 1076 stcb, SCTP_PEER_FAULTY, oper); 1077 *abort_flag = 1; 1078 return; 1079 } 1080 if (chk->rec.data.stream_number != 1081 prev->rec.data.stream_number) { 1082 /* 1083 * Huh, need the correct STR here, 1084 * they must be the same. 1085 */ 1086 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1087 chk->rec.data.stream_number, 1088 prev->rec.data.stream_number); 1089 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1090 0, M_DONTWAIT, 1, MT_DATA); 1091 if (oper) { 1092 struct sctp_paramhdr *ph; 1093 uint32_t *ippp; 1094 1095 SCTP_BUF_LEN(oper) = 1096 sizeof(struct sctp_paramhdr) + 1097 (3 * sizeof(uint32_t)); 1098 ph = mtod(oper, 1099 struct sctp_paramhdr *); 1100 ph->param_type = 1101 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1102 ph->param_length = 1103 htons(SCTP_BUF_LEN(oper)); 1104 ippp = (uint32_t *) (ph + 1); 1105 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1106 ippp++; 1107 *ippp = chk->rec.data.TSN_seq; 1108 ippp++; 1109 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1110 } 1111 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1112 sctp_abort_an_association(stcb->sctp_ep, 1113 stcb, SCTP_PEER_FAULTY, oper); 1114 1115 *abort_flag = 1; 1116 return; 1117 } 1118 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1119 chk->rec.data.stream_seq != 1120 prev->rec.data.stream_seq) { 1121 /* 1122 * Huh, need the correct STR here, 1123 * they must be the same. 1124 */ 1125 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1126 chk->rec.data.stream_seq, 1127 prev->rec.data.stream_seq); 1128 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1129 0, M_DONTWAIT, 1, MT_DATA); 1130 if (oper) { 1131 struct sctp_paramhdr *ph; 1132 uint32_t *ippp; 1133 1134 SCTP_BUF_LEN(oper) = 1135 sizeof(struct sctp_paramhdr) + 1136 (3 * sizeof(uint32_t)); 1137 ph = mtod(oper, 1138 struct sctp_paramhdr *); 1139 ph->param_type = 1140 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1141 ph->param_length = 1142 htons(SCTP_BUF_LEN(oper)); 1143 ippp = (uint32_t *) (ph + 1); 1144 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1145 ippp++; 1146 *ippp = chk->rec.data.TSN_seq; 1147 ippp++; 1148 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1149 } 1150 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1151 sctp_abort_an_association(stcb->sctp_ep, 1152 stcb, SCTP_PEER_FAULTY, oper); 1153 1154 *abort_flag = 1; 1155 return; 1156 } 1157 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1158 SCTP_DATA_LAST_FRAG) { 1159 /* Insert chk MUST be a FIRST */ 1160 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1161 SCTP_DATA_FIRST_FRAG) { 1162 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1163 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1164 0, M_DONTWAIT, 1, MT_DATA); 1165 if (oper) { 1166 struct sctp_paramhdr *ph; 1167 uint32_t *ippp; 1168 1169 SCTP_BUF_LEN(oper) = 1170 sizeof(struct sctp_paramhdr) + 1171 (3 * sizeof(uint32_t)); 1172 ph = mtod(oper, 1173 struct sctp_paramhdr *); 1174 ph->param_type = 1175 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1176 ph->param_length = 1177 htons(SCTP_BUF_LEN(oper)); 1178 ippp = (uint32_t *) (ph + 1); 1179 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1180 ippp++; 1181 *ippp = chk->rec.data.TSN_seq; 1182 ippp++; 1183 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1184 1185 } 1186 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1187 sctp_abort_an_association(stcb->sctp_ep, 1188 stcb, SCTP_PEER_FAULTY, oper); 1189 1190 *abort_flag = 1; 1191 return; 1192 } 1193 } 1194 } 1195 } 1196 if (next) { 1197 post_tsn = chk->rec.data.TSN_seq + 1; 1198 if (post_tsn == next->rec.data.TSN_seq) { 1199 /* 1200 * Ok the one I am inserting ahead of is my NEXT 1201 * one. A bit of valdiation here. 1202 */ 1203 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1204 /* Insert chk MUST be a last fragment */ 1205 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1206 != SCTP_DATA_LAST_FRAG) { 1207 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1208 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1209 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1210 0, M_DONTWAIT, 1, MT_DATA); 1211 if (oper) { 1212 struct sctp_paramhdr *ph; 1213 uint32_t *ippp; 1214 1215 SCTP_BUF_LEN(oper) = 1216 sizeof(struct sctp_paramhdr) + 1217 (3 * sizeof(uint32_t)); 1218 ph = mtod(oper, 1219 struct sctp_paramhdr *); 1220 ph->param_type = 1221 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1222 ph->param_length = 1223 htons(SCTP_BUF_LEN(oper)); 1224 ippp = (uint32_t *) (ph + 1); 1225 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1226 ippp++; 1227 *ippp = chk->rec.data.TSN_seq; 1228 ippp++; 1229 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1230 } 1231 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1232 sctp_abort_an_association(stcb->sctp_ep, 1233 stcb, SCTP_PEER_FAULTY, oper); 1234 1235 *abort_flag = 1; 1236 return; 1237 } 1238 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1239 SCTP_DATA_MIDDLE_FRAG || 1240 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1241 SCTP_DATA_LAST_FRAG) { 1242 /* 1243 * Insert chk CAN be MIDDLE or FIRST NOT 1244 * LAST 1245 */ 1246 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1247 SCTP_DATA_LAST_FRAG) { 1248 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1249 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1250 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1251 0, M_DONTWAIT, 1, MT_DATA); 1252 if (oper) { 1253 struct sctp_paramhdr *ph; 1254 uint32_t *ippp; 1255 1256 SCTP_BUF_LEN(oper) = 1257 sizeof(struct sctp_paramhdr) + 1258 (3 * sizeof(uint32_t)); 1259 ph = mtod(oper, 1260 struct sctp_paramhdr *); 1261 ph->param_type = 1262 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1263 ph->param_length = 1264 htons(SCTP_BUF_LEN(oper)); 1265 ippp = (uint32_t *) (ph + 1); 1266 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1267 ippp++; 1268 *ippp = chk->rec.data.TSN_seq; 1269 ippp++; 1270 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1271 1272 } 1273 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1274 sctp_abort_an_association(stcb->sctp_ep, 1275 stcb, SCTP_PEER_FAULTY, oper); 1276 1277 *abort_flag = 1; 1278 return; 1279 } 1280 if (chk->rec.data.stream_number != 1281 next->rec.data.stream_number) { 1282 /* 1283 * Huh, need the correct STR here, 1284 * they must be the same. 1285 */ 1286 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1287 chk->rec.data.stream_number, 1288 next->rec.data.stream_number); 1289 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1290 0, M_DONTWAIT, 1, MT_DATA); 1291 if (oper) { 1292 struct sctp_paramhdr *ph; 1293 uint32_t *ippp; 1294 1295 SCTP_BUF_LEN(oper) = 1296 sizeof(struct sctp_paramhdr) + 1297 (3 * sizeof(uint32_t)); 1298 ph = mtod(oper, 1299 struct sctp_paramhdr *); 1300 ph->param_type = 1301 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1302 ph->param_length = 1303 htons(SCTP_BUF_LEN(oper)); 1304 ippp = (uint32_t *) (ph + 1); 1305 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1306 ippp++; 1307 *ippp = chk->rec.data.TSN_seq; 1308 ippp++; 1309 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1310 1311 } 1312 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1313 sctp_abort_an_association(stcb->sctp_ep, 1314 stcb, SCTP_PEER_FAULTY, oper); 1315 1316 *abort_flag = 1; 1317 return; 1318 } 1319 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1320 chk->rec.data.stream_seq != 1321 next->rec.data.stream_seq) { 1322 /* 1323 * Huh, need the correct STR here, 1324 * they must be the same. 1325 */ 1326 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1327 chk->rec.data.stream_seq, 1328 next->rec.data.stream_seq); 1329 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1330 0, M_DONTWAIT, 1, MT_DATA); 1331 if (oper) { 1332 struct sctp_paramhdr *ph; 1333 uint32_t *ippp; 1334 1335 SCTP_BUF_LEN(oper) = 1336 sizeof(struct sctp_paramhdr) + 1337 (3 * sizeof(uint32_t)); 1338 ph = mtod(oper, 1339 struct sctp_paramhdr *); 1340 ph->param_type = 1341 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1342 ph->param_length = 1343 htons(SCTP_BUF_LEN(oper)); 1344 ippp = (uint32_t *) (ph + 1); 1345 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1346 ippp++; 1347 *ippp = chk->rec.data.TSN_seq; 1348 ippp++; 1349 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1350 } 1351 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1352 sctp_abort_an_association(stcb->sctp_ep, 1353 stcb, SCTP_PEER_FAULTY, oper); 1354 1355 *abort_flag = 1; 1356 return; 1357 1358 } 1359 } 1360 } 1361 } 1362 /* Do we need to do some delivery? check */ 1363 sctp_deliver_reasm_check(stcb, asoc); 1364 } 1365 1366 /* 1367 * This is an unfortunate routine. It checks to make sure a evil guy is not 1368 * stuffing us full of bad packet fragments. A broken peer could also do this 1369 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1370 * :< more cycles. 1371 */ 1372 static int 1373 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1374 uint32_t TSN_seq) 1375 { 1376 struct sctp_tmit_chunk *at; 1377 uint32_t tsn_est; 1378 1379 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1380 if (compare_with_wrap(TSN_seq, 1381 at->rec.data.TSN_seq, MAX_TSN)) { 1382 /* is it one bigger? */ 1383 tsn_est = at->rec.data.TSN_seq + 1; 1384 if (tsn_est == TSN_seq) { 1385 /* yep. It better be a last then */ 1386 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1387 SCTP_DATA_LAST_FRAG) { 1388 /* 1389 * Ok this guy belongs next to a guy 1390 * that is NOT last, it should be a 1391 * middle/last, not a complete 1392 * chunk. 1393 */ 1394 return (1); 1395 } else { 1396 /* 1397 * This guy is ok since its a LAST 1398 * and the new chunk is a fully 1399 * self- contained one. 1400 */ 1401 return (0); 1402 } 1403 } 1404 } else if (TSN_seq == at->rec.data.TSN_seq) { 1405 /* Software error since I have a dup? */ 1406 return (1); 1407 } else { 1408 /* 1409 * Ok, 'at' is larger than new chunk but does it 1410 * need to be right before it. 1411 */ 1412 tsn_est = TSN_seq + 1; 1413 if (tsn_est == at->rec.data.TSN_seq) { 1414 /* Yep, It better be a first */ 1415 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1416 SCTP_DATA_FIRST_FRAG) { 1417 return (1); 1418 } else { 1419 return (0); 1420 } 1421 } 1422 } 1423 } 1424 return (0); 1425 } 1426 1427 1428 static int 1429 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1430 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1431 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1432 int *break_flag, int last_chunk) 1433 { 1434 /* Process a data chunk */ 1435 /* struct sctp_tmit_chunk *chk; */ 1436 struct sctp_tmit_chunk *chk; 1437 uint32_t tsn, gap; 1438 struct mbuf *dmbuf; 1439 int indx, the_len; 1440 int need_reasm_check = 0; 1441 uint16_t strmno, strmseq; 1442 struct mbuf *oper; 1443 struct sctp_queued_to_read *control; 1444 int ordered; 1445 uint32_t protocol_id; 1446 uint8_t chunk_flags; 1447 struct sctp_stream_reset_list *liste; 1448 1449 chk = NULL; 1450 tsn = ntohl(ch->dp.tsn); 1451 chunk_flags = ch->ch.chunk_flags; 1452 protocol_id = ch->dp.protocol_id; 1453 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0); 1454 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 1455 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE); 1456 } 1457 if (stcb == NULL) { 1458 return (0); 1459 } 1460 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1461 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1462 asoc->cumulative_tsn == tsn) { 1463 /* It is a duplicate */ 1464 SCTP_STAT_INCR(sctps_recvdupdata); 1465 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1466 /* Record a dup for the next outbound sack */ 1467 asoc->dup_tsns[asoc->numduptsns] = tsn; 1468 asoc->numduptsns++; 1469 } 1470 return (0); 1471 } 1472 /* Calculate the number of TSN's between the base and this TSN */ 1473 if (tsn >= asoc->mapping_array_base_tsn) { 1474 gap = tsn - asoc->mapping_array_base_tsn; 1475 } else { 1476 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1477 } 1478 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1479 /* Can't hold the bit in the mapping at max array, toss it */ 1480 return (0); 1481 } 1482 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1483 SCTP_TCB_LOCK_ASSERT(stcb); 1484 if (sctp_expand_mapping_array(asoc, gap)) { 1485 /* Can't expand, drop it */ 1486 return (0); 1487 } 1488 } 1489 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1490 *high_tsn = tsn; 1491 } 1492 /* See if we have received this one already */ 1493 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1494 SCTP_STAT_INCR(sctps_recvdupdata); 1495 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1496 /* Record a dup for the next outbound sack */ 1497 asoc->dup_tsns[asoc->numduptsns] = tsn; 1498 asoc->numduptsns++; 1499 } 1500 asoc->send_sack = 1; 1501 return (0); 1502 } 1503 /* 1504 * Check to see about the GONE flag, duplicates would cause a sack 1505 * to be sent up above 1506 */ 1507 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1508 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1509 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1510 ) { 1511 /* 1512 * wait a minute, this guy is gone, there is no longer a 1513 * receiver. Send peer an ABORT! 1514 */ 1515 struct mbuf *op_err; 1516 1517 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1518 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 1519 *abort_flag = 1; 1520 return (0); 1521 } 1522 /* 1523 * Now before going further we see if there is room. If NOT then we 1524 * MAY let one through only IF this TSN is the one we are waiting 1525 * for on a partial delivery API. 1526 */ 1527 1528 /* now do the tests */ 1529 if (((asoc->cnt_on_all_streams + 1530 asoc->cnt_on_reasm_queue + 1531 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1532 (((int)asoc->my_rwnd) <= 0)) { 1533 /* 1534 * When we have NO room in the rwnd we check to make sure 1535 * the reader is doing its job... 1536 */ 1537 if (stcb->sctp_socket->so_rcv.sb_cc) { 1538 /* some to read, wake-up */ 1539 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1540 } 1541 /* now is it in the mapping array of what we have accepted? */ 1542 if (compare_with_wrap(tsn, 1543 asoc->highest_tsn_inside_map, MAX_TSN)) { 1544 1545 /* Nope not in the valid range dump it */ 1546 SCTPDBG(SCTP_DEBUG_INDATA1, "My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld\n", 1547 (u_long)tsn, (u_long)asoc->my_rwnd, 1548 sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)); 1549 sctp_set_rwnd(stcb, asoc); 1550 if ((asoc->cnt_on_all_streams + 1551 asoc->cnt_on_reasm_queue + 1552 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1553 SCTP_STAT_INCR(sctps_datadropchklmt); 1554 } else { 1555 SCTP_STAT_INCR(sctps_datadroprwnd); 1556 } 1557 indx = *break_flag; 1558 *break_flag = 1; 1559 return (0); 1560 } 1561 } 1562 strmno = ntohs(ch->dp.stream_id); 1563 if (strmno >= asoc->streamincnt) { 1564 struct sctp_paramhdr *phdr; 1565 struct mbuf *mb; 1566 1567 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1568 0, M_DONTWAIT, 1, MT_DATA); 1569 if (mb != NULL) { 1570 /* add some space up front so prepend will work well */ 1571 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1572 phdr = mtod(mb, struct sctp_paramhdr *); 1573 /* 1574 * Error causes are just param's and this one has 1575 * two back to back phdr, one with the error type 1576 * and size, the other with the streamid and a rsvd 1577 */ 1578 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1579 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1580 phdr->param_length = 1581 htons(sizeof(struct sctp_paramhdr) * 2); 1582 phdr++; 1583 /* We insert the stream in the type field */ 1584 phdr->param_type = ch->dp.stream_id; 1585 /* And set the length to 0 for the rsvd field */ 1586 phdr->param_length = 0; 1587 sctp_queue_op_err(stcb, mb); 1588 } 1589 SCTP_STAT_INCR(sctps_badsid); 1590 SCTP_TCB_LOCK_ASSERT(stcb); 1591 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1592 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 1593 /* we have a new high score */ 1594 asoc->highest_tsn_inside_map = tsn; 1595 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 1596 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 1597 } 1598 } 1599 if (tsn == (asoc->cumulative_tsn + 1)) { 1600 /* Update cum-ack */ 1601 asoc->cumulative_tsn = tsn; 1602 } 1603 return (0); 1604 } 1605 /* 1606 * Before we continue lets validate that we are not being fooled by 1607 * an evil attacker. We can only have 4k chunks based on our TSN 1608 * spread allowed by the mapping array 512 * 8 bits, so there is no 1609 * way our stream sequence numbers could have wrapped. We of course 1610 * only validate the FIRST fragment so the bit must be set. 1611 */ 1612 strmseq = ntohs(ch->dp.stream_sequence); 1613 #ifdef SCTP_ASOCLOG_OF_TSNS 1614 SCTP_TCB_LOCK_ASSERT(stcb); 1615 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1616 asoc->tsn_in_at = 0; 1617 asoc->tsn_in_wrapped = 1; 1618 } 1619 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1620 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1621 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1622 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1623 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1624 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1625 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1626 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1627 asoc->tsn_in_at++; 1628 #endif 1629 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1630 (TAILQ_EMPTY(&asoc->resetHead)) && 1631 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1632 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1633 strmseq, MAX_SEQ) || 1634 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1635 /* The incoming sseq is behind where we last delivered? */ 1636 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1637 strmseq, asoc->strmin[strmno].last_sequence_delivered); 1638 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1639 0, M_DONTWAIT, 1, MT_DATA); 1640 if (oper) { 1641 struct sctp_paramhdr *ph; 1642 uint32_t *ippp; 1643 1644 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1645 (3 * sizeof(uint32_t)); 1646 ph = mtod(oper, struct sctp_paramhdr *); 1647 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1648 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1649 ippp = (uint32_t *) (ph + 1); 1650 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1651 ippp++; 1652 *ippp = tsn; 1653 ippp++; 1654 *ippp = ((strmno << 16) | strmseq); 1655 1656 } 1657 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1658 sctp_abort_an_association(stcb->sctp_ep, stcb, 1659 SCTP_PEER_FAULTY, oper); 1660 *abort_flag = 1; 1661 return (0); 1662 } 1663 /************************************ 1664 * From here down we may find ch-> invalid 1665 * so its a good idea NOT to use it. 1666 *************************************/ 1667 1668 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1669 if (last_chunk == 0) { 1670 dmbuf = SCTP_M_COPYM(*m, 1671 (offset + sizeof(struct sctp_data_chunk)), 1672 the_len, M_DONTWAIT); 1673 #ifdef SCTP_MBUF_LOGGING 1674 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 1675 struct mbuf *mat; 1676 1677 mat = dmbuf; 1678 while (mat) { 1679 if (SCTP_BUF_IS_EXTENDED(mat)) { 1680 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1681 } 1682 mat = SCTP_BUF_NEXT(mat); 1683 } 1684 } 1685 #endif 1686 } else { 1687 /* We can steal the last chunk */ 1688 int l_len; 1689 1690 dmbuf = *m; 1691 /* lop off the top part */ 1692 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1693 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1694 l_len = SCTP_BUF_LEN(dmbuf); 1695 } else { 1696 /* 1697 * need to count up the size hopefully does not hit 1698 * this to often :-0 1699 */ 1700 struct mbuf *lat; 1701 1702 l_len = 0; 1703 lat = dmbuf; 1704 while (lat) { 1705 l_len += SCTP_BUF_LEN(lat); 1706 lat = SCTP_BUF_NEXT(lat); 1707 } 1708 } 1709 if (l_len > the_len) { 1710 /* Trim the end round bytes off too */ 1711 m_adj(dmbuf, -(l_len - the_len)); 1712 } 1713 } 1714 if (dmbuf == NULL) { 1715 SCTP_STAT_INCR(sctps_nomem); 1716 return (0); 1717 } 1718 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1719 asoc->fragmented_delivery_inprogress == 0 && 1720 TAILQ_EMPTY(&asoc->resetHead) && 1721 ((ordered == 0) || 1722 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1723 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1724 /* Candidate for express delivery */ 1725 /* 1726 * Its not fragmented, No PD-API is up, Nothing in the 1727 * delivery queue, Its un-ordered OR ordered and the next to 1728 * deliver AND nothing else is stuck on the stream queue, 1729 * And there is room for it in the socket buffer. Lets just 1730 * stuff it up the buffer.... 1731 */ 1732 1733 /* It would be nice to avoid this copy if we could :< */ 1734 sctp_alloc_a_readq(stcb, control); 1735 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1736 protocol_id, 1737 stcb->asoc.context, 1738 strmno, strmseq, 1739 chunk_flags, 1740 dmbuf); 1741 if (control == NULL) { 1742 goto failed_express_del; 1743 } 1744 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1); 1745 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1746 /* for ordered, bump what we delivered */ 1747 asoc->strmin[strmno].last_sequence_delivered++; 1748 } 1749 SCTP_STAT_INCR(sctps_recvexpress); 1750 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 1751 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1752 SCTP_STR_LOG_FROM_EXPRS_DEL); 1753 } 1754 control = NULL; 1755 goto finish_express_del; 1756 } 1757 failed_express_del: 1758 /* If we reach here this is a new chunk */ 1759 chk = NULL; 1760 control = NULL; 1761 /* Express for fragmented delivery? */ 1762 if ((asoc->fragmented_delivery_inprogress) && 1763 (stcb->asoc.control_pdapi) && 1764 (asoc->str_of_pdapi == strmno) && 1765 (asoc->ssn_of_pdapi == strmseq) 1766 ) { 1767 control = stcb->asoc.control_pdapi; 1768 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1769 /* Can't be another first? */ 1770 goto failed_pdapi_express_del; 1771 } 1772 if (tsn == (control->sinfo_tsn + 1)) { 1773 /* Yep, we can add it on */ 1774 int end = 0; 1775 uint32_t cumack; 1776 1777 if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1778 end = 1; 1779 } 1780 cumack = asoc->cumulative_tsn; 1781 if ((cumack + 1) == tsn) 1782 cumack = tsn; 1783 1784 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1785 tsn, 1786 &stcb->sctp_socket->so_rcv)) { 1787 SCTP_PRINTF("Append fails end:%d\n", end); 1788 goto failed_pdapi_express_del; 1789 } 1790 SCTP_STAT_INCR(sctps_recvexpressm); 1791 control->sinfo_tsn = tsn; 1792 asoc->tsn_last_delivered = tsn; 1793 asoc->fragment_flags = chunk_flags; 1794 asoc->tsn_of_pdapi_last_delivered = tsn; 1795 asoc->last_flags_delivered = chunk_flags; 1796 asoc->last_strm_seq_delivered = strmseq; 1797 asoc->last_strm_no_delivered = strmno; 1798 if (end) { 1799 /* clean up the flags and such */ 1800 asoc->fragmented_delivery_inprogress = 0; 1801 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1802 asoc->strmin[strmno].last_sequence_delivered++; 1803 } 1804 stcb->asoc.control_pdapi = NULL; 1805 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1806 /* 1807 * There could be another message 1808 * ready 1809 */ 1810 need_reasm_check = 1; 1811 } 1812 } 1813 control = NULL; 1814 goto finish_express_del; 1815 } 1816 } 1817 failed_pdapi_express_del: 1818 control = NULL; 1819 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1820 sctp_alloc_a_chunk(stcb, chk); 1821 if (chk == NULL) { 1822 /* No memory so we drop the chunk */ 1823 SCTP_STAT_INCR(sctps_nomem); 1824 if (last_chunk == 0) { 1825 /* we copied it, free the copy */ 1826 sctp_m_freem(dmbuf); 1827 } 1828 return (0); 1829 } 1830 chk->rec.data.TSN_seq = tsn; 1831 chk->no_fr_allowed = 0; 1832 chk->rec.data.stream_seq = strmseq; 1833 chk->rec.data.stream_number = strmno; 1834 chk->rec.data.payloadtype = protocol_id; 1835 chk->rec.data.context = stcb->asoc.context; 1836 chk->rec.data.doing_fast_retransmit = 0; 1837 chk->rec.data.rcv_flags = chunk_flags; 1838 chk->asoc = asoc; 1839 chk->send_size = the_len; 1840 chk->whoTo = net; 1841 atomic_add_int(&net->ref_count, 1); 1842 chk->data = dmbuf; 1843 } else { 1844 sctp_alloc_a_readq(stcb, control); 1845 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1846 protocol_id, 1847 stcb->asoc.context, 1848 strmno, strmseq, 1849 chunk_flags, 1850 dmbuf); 1851 if (control == NULL) { 1852 /* No memory so we drop the chunk */ 1853 SCTP_STAT_INCR(sctps_nomem); 1854 if (last_chunk == 0) { 1855 /* we copied it, free the copy */ 1856 sctp_m_freem(dmbuf); 1857 } 1858 return (0); 1859 } 1860 control->length = the_len; 1861 } 1862 1863 /* Mark it as received */ 1864 /* Now queue it where it belongs */ 1865 if (control != NULL) { 1866 /* First a sanity check */ 1867 if (asoc->fragmented_delivery_inprogress) { 1868 /* 1869 * Ok, we have a fragmented delivery in progress if 1870 * this chunk is next to deliver OR belongs in our 1871 * view to the reassembly, the peer is evil or 1872 * broken. 1873 */ 1874 uint32_t estimate_tsn; 1875 1876 estimate_tsn = asoc->tsn_last_delivered + 1; 1877 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1878 (estimate_tsn == control->sinfo_tsn)) { 1879 /* Evil/Broke peer */ 1880 sctp_m_freem(control->data); 1881 control->data = NULL; 1882 if (control->whoFrom) { 1883 sctp_free_remote_addr(control->whoFrom); 1884 control->whoFrom = NULL; 1885 } 1886 sctp_free_a_readq(stcb, control); 1887 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1888 0, M_DONTWAIT, 1, MT_DATA); 1889 if (oper) { 1890 struct sctp_paramhdr *ph; 1891 uint32_t *ippp; 1892 1893 SCTP_BUF_LEN(oper) = 1894 sizeof(struct sctp_paramhdr) + 1895 (3 * sizeof(uint32_t)); 1896 ph = mtod(oper, struct sctp_paramhdr *); 1897 ph->param_type = 1898 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1899 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1900 ippp = (uint32_t *) (ph + 1); 1901 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1902 ippp++; 1903 *ippp = tsn; 1904 ippp++; 1905 *ippp = ((strmno << 16) | strmseq); 1906 } 1907 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1908 sctp_abort_an_association(stcb->sctp_ep, stcb, 1909 SCTP_PEER_FAULTY, oper); 1910 1911 *abort_flag = 1; 1912 return (0); 1913 } else { 1914 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1915 sctp_m_freem(control->data); 1916 control->data = NULL; 1917 if (control->whoFrom) { 1918 sctp_free_remote_addr(control->whoFrom); 1919 control->whoFrom = NULL; 1920 } 1921 sctp_free_a_readq(stcb, control); 1922 1923 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1924 0, M_DONTWAIT, 1, MT_DATA); 1925 if (oper) { 1926 struct sctp_paramhdr *ph; 1927 uint32_t *ippp; 1928 1929 SCTP_BUF_LEN(oper) = 1930 sizeof(struct sctp_paramhdr) + 1931 (3 * sizeof(uint32_t)); 1932 ph = mtod(oper, 1933 struct sctp_paramhdr *); 1934 ph->param_type = 1935 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1936 ph->param_length = 1937 htons(SCTP_BUF_LEN(oper)); 1938 ippp = (uint32_t *) (ph + 1); 1939 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 1940 ippp++; 1941 *ippp = tsn; 1942 ippp++; 1943 *ippp = ((strmno << 16) | strmseq); 1944 } 1945 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1946 sctp_abort_an_association(stcb->sctp_ep, 1947 stcb, SCTP_PEER_FAULTY, oper); 1948 1949 *abort_flag = 1; 1950 return (0); 1951 } 1952 } 1953 } else { 1954 /* No PDAPI running */ 1955 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1956 /* 1957 * Reassembly queue is NOT empty validate 1958 * that this tsn does not need to be in 1959 * reasembly queue. If it does then our peer 1960 * is broken or evil. 1961 */ 1962 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1963 sctp_m_freem(control->data); 1964 control->data = NULL; 1965 if (control->whoFrom) { 1966 sctp_free_remote_addr(control->whoFrom); 1967 control->whoFrom = NULL; 1968 } 1969 sctp_free_a_readq(stcb, control); 1970 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1971 0, M_DONTWAIT, 1, MT_DATA); 1972 if (oper) { 1973 struct sctp_paramhdr *ph; 1974 uint32_t *ippp; 1975 1976 SCTP_BUF_LEN(oper) = 1977 sizeof(struct sctp_paramhdr) + 1978 (3 * sizeof(uint32_t)); 1979 ph = mtod(oper, 1980 struct sctp_paramhdr *); 1981 ph->param_type = 1982 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1983 ph->param_length = 1984 htons(SCTP_BUF_LEN(oper)); 1985 ippp = (uint32_t *) (ph + 1); 1986 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 1987 ippp++; 1988 *ippp = tsn; 1989 ippp++; 1990 *ippp = ((strmno << 16) | strmseq); 1991 } 1992 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1993 sctp_abort_an_association(stcb->sctp_ep, 1994 stcb, SCTP_PEER_FAULTY, oper); 1995 1996 *abort_flag = 1; 1997 return (0); 1998 } 1999 } 2000 } 2001 /* ok, if we reach here we have passed the sanity checks */ 2002 if (chunk_flags & SCTP_DATA_UNORDERED) { 2003 /* queue directly into socket buffer */ 2004 sctp_add_to_readq(stcb->sctp_ep, stcb, 2005 control, 2006 &stcb->sctp_socket->so_rcv, 1); 2007 } else { 2008 /* 2009 * Special check for when streams are resetting. We 2010 * could be more smart about this and check the 2011 * actual stream to see if it is not being reset.. 2012 * that way we would not create a HOLB when amongst 2013 * streams being reset and those not being reset. 2014 * 2015 * We take complete messages that have a stream reset 2016 * intervening (aka the TSN is after where our 2017 * cum-ack needs to be) off and put them on a 2018 * pending_reply_queue. The reassembly ones we do 2019 * not have to worry about since they are all sorted 2020 * and proceessed by TSN order. It is only the 2021 * singletons I must worry about. 2022 */ 2023 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2024 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN))) 2025 ) { 2026 /* 2027 * yep its past where we need to reset... go 2028 * ahead and queue it. 2029 */ 2030 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2031 /* first one on */ 2032 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2033 } else { 2034 struct sctp_queued_to_read *ctlOn; 2035 unsigned char inserted = 0; 2036 2037 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2038 while (ctlOn) { 2039 if (compare_with_wrap(control->sinfo_tsn, 2040 ctlOn->sinfo_tsn, MAX_TSN)) { 2041 ctlOn = TAILQ_NEXT(ctlOn, next); 2042 } else { 2043 /* found it */ 2044 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2045 inserted = 1; 2046 break; 2047 } 2048 } 2049 if (inserted == 0) { 2050 /* 2051 * must be put at end, use 2052 * prevP (all setup from 2053 * loop) to setup nextP. 2054 */ 2055 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2056 } 2057 } 2058 } else { 2059 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2060 if (*abort_flag) { 2061 return (0); 2062 } 2063 } 2064 } 2065 } else { 2066 /* Into the re-assembly queue */ 2067 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2068 if (*abort_flag) { 2069 /* 2070 * the assoc is now gone and chk was put onto the 2071 * reasm queue, which has all been freed. 2072 */ 2073 *m = NULL; 2074 return (0); 2075 } 2076 } 2077 finish_express_del: 2078 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2079 /* we have a new high score */ 2080 asoc->highest_tsn_inside_map = tsn; 2081 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2082 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2083 } 2084 } 2085 if (tsn == (asoc->cumulative_tsn + 1)) { 2086 /* Update cum-ack */ 2087 asoc->cumulative_tsn = tsn; 2088 } 2089 if (last_chunk) { 2090 *m = NULL; 2091 } 2092 if (ordered) { 2093 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2094 } else { 2095 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2096 } 2097 SCTP_STAT_INCR(sctps_recvdata); 2098 /* Set it present please */ 2099 if (sctp_logging_level & SCTP_STR_LOGGING_ENABLE) { 2100 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2101 } 2102 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2103 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2104 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2105 } 2106 SCTP_TCB_LOCK_ASSERT(stcb); 2107 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2108 /* check the special flag for stream resets */ 2109 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2110 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2111 (asoc->cumulative_tsn == liste->tsn)) 2112 ) { 2113 /* 2114 * we have finished working through the backlogged TSN's now 2115 * time to reset streams. 1: call reset function. 2: free 2116 * pending_reply space 3: distribute any chunks in 2117 * pending_reply_queue. 2118 */ 2119 struct sctp_queued_to_read *ctl; 2120 2121 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2122 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2123 SCTP_FREE(liste, SCTP_M_STRESET); 2124 /* sa_ignore FREED_MEMORY */ 2125 liste = TAILQ_FIRST(&asoc->resetHead); 2126 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2127 if (ctl && (liste == NULL)) { 2128 /* All can be removed */ 2129 while (ctl) { 2130 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2131 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2132 if (*abort_flag) { 2133 return (0); 2134 } 2135 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2136 } 2137 } else if (ctl) { 2138 /* more than one in queue */ 2139 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2140 /* 2141 * if ctl->sinfo_tsn is <= liste->tsn we can 2142 * process it which is the NOT of 2143 * ctl->sinfo_tsn > liste->tsn 2144 */ 2145 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2146 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2147 if (*abort_flag) { 2148 return (0); 2149 } 2150 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2151 } 2152 } 2153 /* 2154 * Now service re-assembly to pick up anything that has been 2155 * held on reassembly queue? 2156 */ 2157 sctp_deliver_reasm_check(stcb, asoc); 2158 need_reasm_check = 0; 2159 } 2160 if (need_reasm_check) { 2161 /* Another one waits ? */ 2162 sctp_deliver_reasm_check(stcb, asoc); 2163 } 2164 return (1); 2165 } 2166 2167 int8_t sctp_map_lookup_tab[256] = { 2168 -1, 0, -1, 1, -1, 0, -1, 2, 2169 -1, 0, -1, 1, -1, 0, -1, 3, 2170 -1, 0, -1, 1, -1, 0, -1, 2, 2171 -1, 0, -1, 1, -1, 0, -1, 4, 2172 -1, 0, -1, 1, -1, 0, -1, 2, 2173 -1, 0, -1, 1, -1, 0, -1, 3, 2174 -1, 0, -1, 1, -1, 0, -1, 2, 2175 -1, 0, -1, 1, -1, 0, -1, 5, 2176 -1, 0, -1, 1, -1, 0, -1, 2, 2177 -1, 0, -1, 1, -1, 0, -1, 3, 2178 -1, 0, -1, 1, -1, 0, -1, 2, 2179 -1, 0, -1, 1, -1, 0, -1, 4, 2180 -1, 0, -1, 1, -1, 0, -1, 2, 2181 -1, 0, -1, 1, -1, 0, -1, 3, 2182 -1, 0, -1, 1, -1, 0, -1, 2, 2183 -1, 0, -1, 1, -1, 0, -1, 6, 2184 -1, 0, -1, 1, -1, 0, -1, 2, 2185 -1, 0, -1, 1, -1, 0, -1, 3, 2186 -1, 0, -1, 1, -1, 0, -1, 2, 2187 -1, 0, -1, 1, -1, 0, -1, 4, 2188 -1, 0, -1, 1, -1, 0, -1, 2, 2189 -1, 0, -1, 1, -1, 0, -1, 3, 2190 -1, 0, -1, 1, -1, 0, -1, 2, 2191 -1, 0, -1, 1, -1, 0, -1, 5, 2192 -1, 0, -1, 1, -1, 0, -1, 2, 2193 -1, 0, -1, 1, -1, 0, -1, 3, 2194 -1, 0, -1, 1, -1, 0, -1, 2, 2195 -1, 0, -1, 1, -1, 0, -1, 4, 2196 -1, 0, -1, 1, -1, 0, -1, 2, 2197 -1, 0, -1, 1, -1, 0, -1, 3, 2198 -1, 0, -1, 1, -1, 0, -1, 2, 2199 -1, 0, -1, 1, -1, 0, -1, 7, 2200 }; 2201 2202 2203 void 2204 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2205 { 2206 /* 2207 * Now we also need to check the mapping array in a couple of ways. 2208 * 1) Did we move the cum-ack point? 2209 */ 2210 struct sctp_association *asoc; 2211 int i, at; 2212 int all_ones, last_all_ones = 0; 2213 int slide_from, slide_end, lgap, distance; 2214 uint32_t old_cumack, old_base, old_highest; 2215 unsigned char aux_array[64]; 2216 2217 2218 asoc = &stcb->asoc; 2219 at = 0; 2220 2221 old_cumack = asoc->cumulative_tsn; 2222 old_base = asoc->mapping_array_base_tsn; 2223 old_highest = asoc->highest_tsn_inside_map; 2224 if (asoc->mapping_array_size < 64) 2225 memcpy(aux_array, asoc->mapping_array, 2226 asoc->mapping_array_size); 2227 else 2228 memcpy(aux_array, asoc->mapping_array, 64); 2229 2230 /* 2231 * We could probably improve this a small bit by calculating the 2232 * offset of the current cum-ack as the starting point. 2233 */ 2234 all_ones = 1; 2235 at = 0; 2236 for (i = 0; i < stcb->asoc.mapping_array_size; i++) { 2237 2238 if (asoc->mapping_array[i] == 0xff) { 2239 at += 8; 2240 last_all_ones = 1; 2241 } else { 2242 /* there is a 0 bit */ 2243 all_ones = 0; 2244 at += sctp_map_lookup_tab[asoc->mapping_array[i]]; 2245 last_all_ones = 0; 2246 break; 2247 } 2248 } 2249 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones); 2250 /* at is one off, since in the table a embedded -1 is present */ 2251 at++; 2252 2253 if (compare_with_wrap(asoc->cumulative_tsn, 2254 asoc->highest_tsn_inside_map, 2255 MAX_TSN)) { 2256 #ifdef INVARIANTS 2257 panic("huh, cumack greater than high-tsn in map"); 2258 #else 2259 SCTP_PRINTF("huh, cumack greater than high-tsn in map - should panic?\n"); 2260 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2261 #endif 2262 } 2263 if (all_ones || 2264 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) { 2265 /* The complete array was completed by a single FR */ 2266 /* higest becomes the cum-ack */ 2267 int clr; 2268 2269 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2270 /* clear the array */ 2271 if (all_ones) 2272 clr = asoc->mapping_array_size; 2273 else { 2274 clr = (at >> 3) + 1; 2275 /* 2276 * this should be the allones case but just in case 2277 * :> 2278 */ 2279 if (clr > asoc->mapping_array_size) 2280 clr = asoc->mapping_array_size; 2281 } 2282 memset(asoc->mapping_array, 0, clr); 2283 /* base becomes one ahead of the cum-ack */ 2284 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2285 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2286 sctp_log_map(old_base, old_cumack, old_highest, 2287 SCTP_MAP_PREPARE_SLIDE); 2288 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2289 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2290 } 2291 } else if (at >= 8) { 2292 /* we can slide the mapping array down */ 2293 /* Calculate the new byte postion we can move down */ 2294 slide_from = at >> 3; 2295 /* 2296 * now calculate the ceiling of the move using our highest 2297 * TSN value 2298 */ 2299 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2300 lgap = asoc->highest_tsn_inside_map - 2301 asoc->mapping_array_base_tsn; 2302 } else { 2303 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2304 asoc->highest_tsn_inside_map + 1; 2305 } 2306 slide_end = lgap >> 3; 2307 if (slide_end < slide_from) { 2308 panic("impossible slide"); 2309 } 2310 distance = (slide_end - slide_from) + 1; 2311 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2312 sctp_log_map(old_base, old_cumack, old_highest, 2313 SCTP_MAP_PREPARE_SLIDE); 2314 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2315 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2316 } 2317 if (distance + slide_from > asoc->mapping_array_size || 2318 distance < 0) { 2319 /* 2320 * Here we do NOT slide forward the array so that 2321 * hopefully when more data comes in to fill it up 2322 * we will be able to slide it forward. Really I 2323 * don't think this should happen :-0 2324 */ 2325 2326 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2327 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2328 (uint32_t) asoc->mapping_array_size, 2329 SCTP_MAP_SLIDE_NONE); 2330 } 2331 } else { 2332 int ii; 2333 2334 for (ii = 0; ii < distance; ii++) { 2335 asoc->mapping_array[ii] = 2336 asoc->mapping_array[slide_from + ii]; 2337 } 2338 for (ii = distance; ii <= slide_end; ii++) { 2339 asoc->mapping_array[ii] = 0; 2340 } 2341 asoc->mapping_array_base_tsn += (slide_from << 3); 2342 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 2343 sctp_log_map(asoc->mapping_array_base_tsn, 2344 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2345 SCTP_MAP_SLIDE_RESULT); 2346 } 2347 } 2348 } 2349 /* 2350 * Now we need to see if we need to queue a sack or just start the 2351 * timer (if allowed). 2352 */ 2353 if (ok_to_sack) { 2354 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2355 /* 2356 * Ok special case, in SHUTDOWN-SENT case. here we 2357 * maker sure SACK timer is off and instead send a 2358 * SHUTDOWN and a SACK 2359 */ 2360 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2361 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2362 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2363 } 2364 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2365 sctp_send_sack(stcb); 2366 } else { 2367 int is_a_gap; 2368 2369 /* is there a gap now ? */ 2370 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2371 stcb->asoc.cumulative_tsn, MAX_TSN); 2372 2373 /* 2374 * CMT DAC algorithm: increase number of packets 2375 * received since last ack 2376 */ 2377 stcb->asoc.cmt_dac_pkts_rcvd++; 2378 2379 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2380 * SACK */ 2381 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2382 * longer is one */ 2383 (stcb->asoc.numduptsns) || /* we have dup's */ 2384 (is_a_gap) || /* is still a gap */ 2385 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2386 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2387 ) { 2388 2389 if ((sctp_cmt_on_off) && (sctp_cmt_use_dac) && 2390 (stcb->asoc.send_sack == 0) && 2391 (stcb->asoc.numduptsns == 0) && 2392 (stcb->asoc.delayed_ack) && 2393 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2394 2395 /* 2396 * CMT DAC algorithm: With CMT, 2397 * delay acks even in the face of 2398 * 2399 * reordering. Therefore, if acks that 2400 * do not have to be sent because of 2401 * the above reasons, will be 2402 * delayed. That is, acks that would 2403 * have been sent due to gap reports 2404 * will be delayed with DAC. Start 2405 * the delayed ack timer. 2406 */ 2407 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2408 stcb->sctp_ep, stcb, NULL); 2409 } else { 2410 /* 2411 * Ok we must build a SACK since the 2412 * timer is pending, we got our 2413 * first packet OR there are gaps or 2414 * duplicates. 2415 */ 2416 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2417 sctp_send_sack(stcb); 2418 } 2419 } else { 2420 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2421 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2422 stcb->sctp_ep, stcb, NULL); 2423 } 2424 } 2425 } 2426 } 2427 } 2428 2429 void 2430 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2431 { 2432 struct sctp_tmit_chunk *chk; 2433 uint32_t tsize; 2434 uint16_t nxt_todel; 2435 2436 if (asoc->fragmented_delivery_inprogress) { 2437 sctp_service_reassembly(stcb, asoc); 2438 } 2439 /* Can we proceed further, i.e. the PD-API is complete */ 2440 if (asoc->fragmented_delivery_inprogress) { 2441 /* no */ 2442 return; 2443 } 2444 /* 2445 * Now is there some other chunk I can deliver from the reassembly 2446 * queue. 2447 */ 2448 doit_again: 2449 chk = TAILQ_FIRST(&asoc->reasmqueue); 2450 if (chk == NULL) { 2451 asoc->size_on_reasm_queue = 0; 2452 asoc->cnt_on_reasm_queue = 0; 2453 return; 2454 } 2455 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2456 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2457 ((nxt_todel == chk->rec.data.stream_seq) || 2458 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2459 /* 2460 * Yep the first one is here. We setup to start reception, 2461 * by backing down the TSN just in case we can't deliver. 2462 */ 2463 2464 /* 2465 * Before we start though either all of the message should 2466 * be here or 1/4 the socket buffer max or nothing on the 2467 * delivery queue and something can be delivered. 2468 */ 2469 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2470 (tsize > stcb->sctp_ep->partial_delivery_point))) { 2471 asoc->fragmented_delivery_inprogress = 1; 2472 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2473 asoc->str_of_pdapi = chk->rec.data.stream_number; 2474 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2475 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2476 asoc->fragment_flags = chk->rec.data.rcv_flags; 2477 sctp_service_reassembly(stcb, asoc); 2478 if (asoc->fragmented_delivery_inprogress == 0) { 2479 goto doit_again; 2480 } 2481 } 2482 } 2483 } 2484 2485 int 2486 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2487 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2488 struct sctp_nets *net, uint32_t * high_tsn) 2489 { 2490 struct sctp_data_chunk *ch, chunk_buf; 2491 struct sctp_association *asoc; 2492 int num_chunks = 0; /* number of control chunks processed */ 2493 int stop_proc = 0; 2494 int chk_length, break_flag, last_chunk; 2495 int abort_flag = 0, was_a_gap = 0; 2496 struct mbuf *m; 2497 2498 /* set the rwnd */ 2499 sctp_set_rwnd(stcb, &stcb->asoc); 2500 2501 m = *mm; 2502 SCTP_TCB_LOCK_ASSERT(stcb); 2503 asoc = &stcb->asoc; 2504 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2505 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2506 /* there was a gap before this data was processed */ 2507 was_a_gap = 1; 2508 } 2509 /* 2510 * setup where we got the last DATA packet from for any SACK that 2511 * may need to go out. Don't bump the net. This is done ONLY when a 2512 * chunk is assigned. 2513 */ 2514 asoc->last_data_chunk_from = net; 2515 2516 /*- 2517 * Now before we proceed we must figure out if this is a wasted 2518 * cluster... i.e. it is a small packet sent in and yet the driver 2519 * underneath allocated a full cluster for it. If so we must copy it 2520 * to a smaller mbuf and free up the cluster mbuf. This will help 2521 * with cluster starvation. Note for __Panda__ we don't do this 2522 * since it has clusters all the way down to 64 bytes. 2523 */ 2524 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2525 /* we only handle mbufs that are singletons.. not chains */ 2526 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA); 2527 if (m) { 2528 /* ok lets see if we can copy the data up */ 2529 caddr_t *from, *to; 2530 2531 /* get the pointers and copy */ 2532 to = mtod(m, caddr_t *); 2533 from = mtod((*mm), caddr_t *); 2534 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2535 /* copy the length and free up the old */ 2536 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2537 sctp_m_freem(*mm); 2538 /* sucess, back copy */ 2539 *mm = m; 2540 } else { 2541 /* We are in trouble in the mbuf world .. yikes */ 2542 m = *mm; 2543 } 2544 } 2545 /* get pointer to the first chunk header */ 2546 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2547 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2548 if (ch == NULL) { 2549 return (1); 2550 } 2551 /* 2552 * process all DATA chunks... 2553 */ 2554 *high_tsn = asoc->cumulative_tsn; 2555 break_flag = 0; 2556 asoc->data_pkts_seen++; 2557 while (stop_proc == 0) { 2558 /* validate chunk length */ 2559 chk_length = ntohs(ch->ch.chunk_length); 2560 if (length - *offset < chk_length) { 2561 /* all done, mutulated chunk */ 2562 stop_proc = 1; 2563 break; 2564 } 2565 if (ch->ch.chunk_type == SCTP_DATA) { 2566 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2567 /* 2568 * Need to send an abort since we had a 2569 * invalid data chunk. 2570 */ 2571 struct mbuf *op_err; 2572 2573 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 2574 0, M_DONTWAIT, 1, MT_DATA); 2575 2576 if (op_err) { 2577 struct sctp_paramhdr *ph; 2578 uint32_t *ippp; 2579 2580 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 2581 (2 * sizeof(uint32_t)); 2582 ph = mtod(op_err, struct sctp_paramhdr *); 2583 ph->param_type = 2584 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2585 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2586 ippp = (uint32_t *) (ph + 1); 2587 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2588 ippp++; 2589 *ippp = asoc->cumulative_tsn; 2590 2591 } 2592 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2593 sctp_abort_association(inp, stcb, m, iphlen, sh, 2594 op_err, 0); 2595 return (2); 2596 } 2597 #ifdef SCTP_AUDITING_ENABLED 2598 sctp_audit_log(0xB1, 0); 2599 #endif 2600 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2601 last_chunk = 1; 2602 } else { 2603 last_chunk = 0; 2604 } 2605 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2606 chk_length, net, high_tsn, &abort_flag, &break_flag, 2607 last_chunk)) { 2608 num_chunks++; 2609 } 2610 if (abort_flag) 2611 return (2); 2612 2613 if (break_flag) { 2614 /* 2615 * Set because of out of rwnd space and no 2616 * drop rep space left. 2617 */ 2618 stop_proc = 1; 2619 break; 2620 } 2621 } else { 2622 /* not a data chunk in the data region */ 2623 switch (ch->ch.chunk_type) { 2624 case SCTP_INITIATION: 2625 case SCTP_INITIATION_ACK: 2626 case SCTP_SELECTIVE_ACK: 2627 case SCTP_HEARTBEAT_REQUEST: 2628 case SCTP_HEARTBEAT_ACK: 2629 case SCTP_ABORT_ASSOCIATION: 2630 case SCTP_SHUTDOWN: 2631 case SCTP_SHUTDOWN_ACK: 2632 case SCTP_OPERATION_ERROR: 2633 case SCTP_COOKIE_ECHO: 2634 case SCTP_COOKIE_ACK: 2635 case SCTP_ECN_ECHO: 2636 case SCTP_ECN_CWR: 2637 case SCTP_SHUTDOWN_COMPLETE: 2638 case SCTP_AUTHENTICATION: 2639 case SCTP_ASCONF_ACK: 2640 case SCTP_PACKET_DROPPED: 2641 case SCTP_STREAM_RESET: 2642 case SCTP_FORWARD_CUM_TSN: 2643 case SCTP_ASCONF: 2644 /* 2645 * Now, what do we do with KNOWN chunks that 2646 * are NOT in the right place? 2647 * 2648 * For now, I do nothing but ignore them. We 2649 * may later want to add sysctl stuff to 2650 * switch out and do either an ABORT() or 2651 * possibly process them. 2652 */ 2653 if (sctp_strict_data_order) { 2654 struct mbuf *op_err; 2655 2656 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2657 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0); 2658 return (2); 2659 } 2660 break; 2661 default: 2662 /* unknown chunk type, use bit rules */ 2663 if (ch->ch.chunk_type & 0x40) { 2664 /* Add a error report to the queue */ 2665 struct mbuf *merr; 2666 struct sctp_paramhdr *phd; 2667 2668 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA); 2669 if (merr) { 2670 phd = mtod(merr, struct sctp_paramhdr *); 2671 /* 2672 * We cheat and use param 2673 * type since we did not 2674 * bother to define a error 2675 * cause struct. They are 2676 * the same basic format 2677 * with different names. 2678 */ 2679 phd->param_type = 2680 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2681 phd->param_length = 2682 htons(chk_length + sizeof(*phd)); 2683 SCTP_BUF_LEN(merr) = sizeof(*phd); 2684 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, 2685 SCTP_SIZE32(chk_length), 2686 M_DONTWAIT); 2687 if (SCTP_BUF_NEXT(merr)) { 2688 sctp_queue_op_err(stcb, merr); 2689 } else { 2690 sctp_m_freem(merr); 2691 } 2692 } 2693 } 2694 if ((ch->ch.chunk_type & 0x80) == 0) { 2695 /* discard the rest of this packet */ 2696 stop_proc = 1; 2697 } /* else skip this bad chunk and 2698 * continue... */ 2699 break; 2700 }; /* switch of chunk type */ 2701 } 2702 *offset += SCTP_SIZE32(chk_length); 2703 if ((*offset >= length) || stop_proc) { 2704 /* no more data left in the mbuf chain */ 2705 stop_proc = 1; 2706 continue; 2707 } 2708 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2709 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2710 if (ch == NULL) { 2711 *offset = length; 2712 stop_proc = 1; 2713 break; 2714 2715 } 2716 } /* while */ 2717 if (break_flag) { 2718 /* 2719 * we need to report rwnd overrun drops. 2720 */ 2721 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2722 } 2723 if (num_chunks) { 2724 /* 2725 * Did we get data, if so update the time for auto-close and 2726 * give peer credit for being alive. 2727 */ 2728 SCTP_STAT_INCR(sctps_recvpktwithdata); 2729 stcb->asoc.overall_error_count = 0; 2730 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2731 } 2732 /* now service all of the reassm queue if needed */ 2733 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2734 sctp_service_queues(stcb, asoc); 2735 2736 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2737 /* Assure that we ack right away */ 2738 stcb->asoc.send_sack = 1; 2739 } 2740 /* Start a sack timer or QUEUE a SACK for sending */ 2741 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2742 (stcb->asoc.mapping_array[0] != 0xff)) { 2743 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) || 2744 (stcb->asoc.delayed_ack == 0) || 2745 (stcb->asoc.send_sack == 1)) { 2746 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2747 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2748 } 2749 sctp_send_sack(stcb); 2750 } else { 2751 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2752 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2753 stcb->sctp_ep, stcb, NULL); 2754 } 2755 } 2756 } else { 2757 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2758 } 2759 if (abort_flag) 2760 return (2); 2761 2762 return (0); 2763 } 2764 2765 static void 2766 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 2767 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2768 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 2769 int num_seg, int *ecn_seg_sums) 2770 { 2771 /************************************************/ 2772 /* process fragments and update sendqueue */ 2773 /************************************************/ 2774 struct sctp_sack *sack; 2775 struct sctp_gap_ack_block *frag, block; 2776 struct sctp_tmit_chunk *tp1; 2777 int i; 2778 unsigned int j; 2779 int num_frs = 0; 2780 2781 uint16_t frag_strt, frag_end, primary_flag_set; 2782 u_long last_frag_high; 2783 2784 /* 2785 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2786 */ 2787 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2788 primary_flag_set = 1; 2789 } else { 2790 primary_flag_set = 0; 2791 } 2792 sack = &ch->sack; 2793 2794 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 2795 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 2796 *offset += sizeof(block); 2797 if (frag == NULL) { 2798 return; 2799 } 2800 tp1 = NULL; 2801 last_frag_high = 0; 2802 for (i = 0; i < num_seg; i++) { 2803 frag_strt = ntohs(frag->start); 2804 frag_end = ntohs(frag->end); 2805 /* some sanity checks on the fargment offsets */ 2806 if (frag_strt > frag_end) { 2807 /* this one is malformed, skip */ 2808 frag++; 2809 continue; 2810 } 2811 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2812 MAX_TSN)) 2813 *biggest_tsn_acked = frag_end + last_tsn; 2814 2815 /* mark acked dgs and find out the highestTSN being acked */ 2816 if (tp1 == NULL) { 2817 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2818 2819 /* save the locations of the last frags */ 2820 last_frag_high = frag_end + last_tsn; 2821 } else { 2822 /* 2823 * now lets see if we need to reset the queue due to 2824 * a out-of-order SACK fragment 2825 */ 2826 if (compare_with_wrap(frag_strt + last_tsn, 2827 last_frag_high, MAX_TSN)) { 2828 /* 2829 * if the new frag starts after the last TSN 2830 * frag covered, we are ok and this one is 2831 * beyond the last one 2832 */ 2833 ; 2834 } else { 2835 /* 2836 * ok, they have reset us, so we need to 2837 * reset the queue this will cause extra 2838 * hunting but hey, they chose the 2839 * performance hit when they failed to order 2840 * there gaps.. 2841 */ 2842 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2843 } 2844 last_frag_high = frag_end + last_tsn; 2845 } 2846 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2847 while (tp1) { 2848 if (tp1->rec.data.doing_fast_retransmit) 2849 num_frs++; 2850 2851 /* 2852 * CMT: CUCv2 algorithm. For each TSN being 2853 * processed from the sent queue, track the 2854 * next expected pseudo-cumack, or 2855 * rtx_pseudo_cumack, if required. Separate 2856 * cumack trackers for first transmissions, 2857 * and retransmissions. 2858 */ 2859 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2860 (tp1->snd_count == 1)) { 2861 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2862 tp1->whoTo->find_pseudo_cumack = 0; 2863 } 2864 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2865 (tp1->snd_count > 1)) { 2866 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2867 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2868 } 2869 if (tp1->rec.data.TSN_seq == j) { 2870 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2871 /* 2872 * must be held until 2873 * cum-ack passes 2874 */ 2875 /* 2876 * ECN Nonce: Add the nonce 2877 * value to the sender's 2878 * nonce sum 2879 */ 2880 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2881 /*- 2882 * If it is less than RESEND, it is 2883 * now no-longer in flight. 2884 * Higher values may already be set 2885 * via previous Gap Ack Blocks... 2886 * i.e. ACKED or RESEND. 2887 */ 2888 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2889 *biggest_newly_acked_tsn, MAX_TSN)) { 2890 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2891 } 2892 /* 2893 * CMT: SFR algo 2894 * (and HTNA) - set 2895 * saw_newack to 1 2896 * for dest being 2897 * newly acked. 2898 * update 2899 * this_sack_highest_ 2900 * newack if 2901 * appropriate. 2902 */ 2903 if (tp1->rec.data.chunk_was_revoked == 0) 2904 tp1->whoTo->saw_newack = 1; 2905 2906 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2907 tp1->whoTo->this_sack_highest_newack, 2908 MAX_TSN)) { 2909 tp1->whoTo->this_sack_highest_newack = 2910 tp1->rec.data.TSN_seq; 2911 } 2912 /* 2913 * CMT DAC algo: 2914 * also update 2915 * this_sack_lowest_n 2916 * ewack 2917 */ 2918 if (*this_sack_lowest_newack == 0) { 2919 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 2920 sctp_log_sack(*this_sack_lowest_newack, 2921 last_tsn, 2922 tp1->rec.data.TSN_seq, 2923 0, 2924 0, 2925 SCTP_LOG_TSN_ACKED); 2926 } 2927 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2928 } 2929 /* 2930 * CMT: CUCv2 2931 * algorithm. If 2932 * (rtx-)pseudo-cumac 2933 * k for corresp 2934 * dest is being 2935 * acked, then we 2936 * have a new 2937 * (rtx-)pseudo-cumac 2938 * k. Set 2939 * new_(rtx_)pseudo_c 2940 * umack to TRUE so 2941 * that the cwnd for 2942 * this dest can be 2943 * updated. Also 2944 * trigger search 2945 * for the next 2946 * expected 2947 * (rtx-)pseudo-cumac 2948 * k. Separate 2949 * pseudo_cumack 2950 * trackers for 2951 * first 2952 * transmissions and 2953 * retransmissions. 2954 */ 2955 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2956 if (tp1->rec.data.chunk_was_revoked == 0) { 2957 tp1->whoTo->new_pseudo_cumack = 1; 2958 } 2959 tp1->whoTo->find_pseudo_cumack = 1; 2960 } 2961 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 2962 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2963 } 2964 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2965 if (tp1->rec.data.chunk_was_revoked == 0) { 2966 tp1->whoTo->new_pseudo_cumack = 1; 2967 } 2968 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2969 } 2970 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 2971 sctp_log_sack(*biggest_newly_acked_tsn, 2972 last_tsn, 2973 tp1->rec.data.TSN_seq, 2974 frag_strt, 2975 frag_end, 2976 SCTP_LOG_TSN_ACKED); 2977 } 2978 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 2979 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2980 tp1->whoTo->flight_size, 2981 tp1->book_size, 2982 (uintptr_t) tp1->whoTo, 2983 tp1->rec.data.TSN_seq); 2984 } 2985 sctp_flight_size_decrease(tp1); 2986 sctp_total_flight_decrease(stcb, tp1); 2987 2988 tp1->whoTo->net_ack += tp1->send_size; 2989 if (tp1->snd_count < 2) { 2990 /* 2991 * True 2992 * non-retran 2993 * smited 2994 * chunk */ 2995 tp1->whoTo->net_ack2 += tp1->send_size; 2996 2997 /* 2998 * update RTO 2999 * too ? */ 3000 if (tp1->do_rtt) { 3001 tp1->whoTo->RTO = 3002 sctp_calculate_rto(stcb, 3003 asoc, 3004 tp1->whoTo, 3005 &tp1->sent_rcv_time, 3006 sctp_align_safe_nocopy); 3007 tp1->do_rtt = 0; 3008 } 3009 } 3010 } 3011 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3012 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 3013 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 3014 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3015 asoc->this_sack_highest_gap, 3016 MAX_TSN)) { 3017 asoc->this_sack_highest_gap = 3018 tp1->rec.data.TSN_seq; 3019 } 3020 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3021 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3022 #ifdef SCTP_AUDITING_ENABLED 3023 sctp_audit_log(0xB2, 3024 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3025 #endif 3026 } 3027 } 3028 /* 3029 * All chunks NOT UNSENT 3030 * fall through here and are 3031 * marked 3032 */ 3033 tp1->sent = SCTP_DATAGRAM_MARKED; 3034 if (tp1->rec.data.chunk_was_revoked) { 3035 /* deflate the cwnd */ 3036 tp1->whoTo->cwnd -= tp1->book_size; 3037 tp1->rec.data.chunk_was_revoked = 0; 3038 } 3039 } 3040 break; 3041 } /* if (tp1->TSN_seq == j) */ 3042 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3043 MAX_TSN)) 3044 break; 3045 3046 tp1 = TAILQ_NEXT(tp1, sctp_next); 3047 } /* end while (tp1) */ 3048 } /* end for (j = fragStart */ 3049 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3050 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 3051 *offset += sizeof(block); 3052 if (frag == NULL) { 3053 break; 3054 } 3055 } 3056 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3057 if (num_frs) 3058 sctp_log_fr(*biggest_tsn_acked, 3059 *biggest_newly_acked_tsn, 3060 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3061 } 3062 } 3063 3064 static void 3065 sctp_check_for_revoked(struct sctp_tcb *stcb, 3066 struct sctp_association *asoc, uint32_t cumack, 3067 u_long biggest_tsn_acked) 3068 { 3069 struct sctp_tmit_chunk *tp1; 3070 int tot_revoked = 0; 3071 3072 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3073 while (tp1) { 3074 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3075 MAX_TSN)) { 3076 /* 3077 * ok this guy is either ACK or MARKED. If it is 3078 * ACKED it has been previously acked but not this 3079 * time i.e. revoked. If it is MARKED it was ACK'ed 3080 * again. 3081 */ 3082 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3083 MAX_TSN)) 3084 break; 3085 3086 3087 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3088 /* it has been revoked */ 3089 tp1->sent = SCTP_DATAGRAM_SENT; 3090 tp1->rec.data.chunk_was_revoked = 1; 3091 /* 3092 * We must add this stuff back in to assure 3093 * timers and such get started. 3094 */ 3095 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3096 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3097 tp1->whoTo->flight_size, 3098 tp1->book_size, 3099 (uintptr_t) tp1->whoTo, 3100 tp1->rec.data.TSN_seq); 3101 } 3102 sctp_flight_size_increase(tp1); 3103 sctp_total_flight_increase(stcb, tp1); 3104 /* 3105 * We inflate the cwnd to compensate for our 3106 * artificial inflation of the flight_size. 3107 */ 3108 tp1->whoTo->cwnd += tp1->book_size; 3109 tot_revoked++; 3110 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 3111 sctp_log_sack(asoc->last_acked_seq, 3112 cumack, 3113 tp1->rec.data.TSN_seq, 3114 0, 3115 0, 3116 SCTP_LOG_TSN_REVOKED); 3117 } 3118 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3119 /* it has been re-acked in this SACK */ 3120 tp1->sent = SCTP_DATAGRAM_ACKED; 3121 } 3122 } 3123 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3124 break; 3125 tp1 = TAILQ_NEXT(tp1, sctp_next); 3126 } 3127 if (tot_revoked > 0) { 3128 /* 3129 * Setup the ecn nonce re-sync point. We do this since once 3130 * data is revoked we begin to retransmit things, which do 3131 * NOT have the ECN bits set. This means we are now out of 3132 * sync and must wait until we get back in sync with the 3133 * peer to check ECN bits. 3134 */ 3135 tp1 = TAILQ_FIRST(&asoc->send_queue); 3136 if (tp1 == NULL) { 3137 asoc->nonce_resync_tsn = asoc->sending_seq; 3138 } else { 3139 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3140 } 3141 asoc->nonce_wait_for_ecne = 0; 3142 asoc->nonce_sum_check = 0; 3143 } 3144 } 3145 3146 static void 3147 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3148 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3149 { 3150 struct sctp_tmit_chunk *tp1; 3151 int strike_flag = 0; 3152 struct timeval now; 3153 int tot_retrans = 0; 3154 uint32_t sending_seq; 3155 struct sctp_nets *net; 3156 int num_dests_sacked = 0; 3157 3158 /* 3159 * select the sending_seq, this is either the next thing ready to be 3160 * sent but not transmitted, OR, the next seq we assign. 3161 */ 3162 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3163 if (tp1 == NULL) { 3164 sending_seq = asoc->sending_seq; 3165 } else { 3166 sending_seq = tp1->rec.data.TSN_seq; 3167 } 3168 3169 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3170 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3171 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3172 if (net->saw_newack) 3173 num_dests_sacked++; 3174 } 3175 } 3176 if (stcb->asoc.peer_supports_prsctp) { 3177 (void)SCTP_GETTIME_TIMEVAL(&now); 3178 } 3179 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3180 while (tp1) { 3181 strike_flag = 0; 3182 if (tp1->no_fr_allowed) { 3183 /* this one had a timeout or something */ 3184 tp1 = TAILQ_NEXT(tp1, sctp_next); 3185 continue; 3186 } 3187 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3188 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3189 sctp_log_fr(biggest_tsn_newly_acked, 3190 tp1->rec.data.TSN_seq, 3191 tp1->sent, 3192 SCTP_FR_LOG_CHECK_STRIKE); 3193 } 3194 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3195 MAX_TSN) || 3196 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3197 /* done */ 3198 break; 3199 } 3200 if (stcb->asoc.peer_supports_prsctp) { 3201 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3202 /* Is it expired? */ 3203 if ( 3204 (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) 3205 ) { 3206 /* Yes so drop it */ 3207 if (tp1->data != NULL) { 3208 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3209 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3210 &asoc->sent_queue); 3211 } 3212 tp1 = TAILQ_NEXT(tp1, sctp_next); 3213 continue; 3214 } 3215 } 3216 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3217 /* Has it been retransmitted tv_sec times? */ 3218 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3219 /* Yes, so drop it */ 3220 if (tp1->data != NULL) { 3221 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3222 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3223 &asoc->sent_queue); 3224 } 3225 tp1 = TAILQ_NEXT(tp1, sctp_next); 3226 continue; 3227 } 3228 } 3229 } 3230 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3231 asoc->this_sack_highest_gap, MAX_TSN)) { 3232 /* we are beyond the tsn in the sack */ 3233 break; 3234 } 3235 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3236 /* either a RESEND, ACKED, or MARKED */ 3237 /* skip */ 3238 tp1 = TAILQ_NEXT(tp1, sctp_next); 3239 continue; 3240 } 3241 /* 3242 * CMT : SFR algo (covers part of DAC and HTNA as well) 3243 */ 3244 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3245 /* 3246 * No new acks were receieved for data sent to this 3247 * dest. Therefore, according to the SFR algo for 3248 * CMT, no data sent to this dest can be marked for 3249 * FR using this SACK. 3250 */ 3251 tp1 = TAILQ_NEXT(tp1, sctp_next); 3252 continue; 3253 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq, 3254 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3255 /* 3256 * CMT: New acks were receieved for data sent to 3257 * this dest. But no new acks were seen for data 3258 * sent after tp1. Therefore, according to the SFR 3259 * algo for CMT, tp1 cannot be marked for FR using 3260 * this SACK. This step covers part of the DAC algo 3261 * and the HTNA algo as well. 3262 */ 3263 tp1 = TAILQ_NEXT(tp1, sctp_next); 3264 continue; 3265 } 3266 /* 3267 * Here we check to see if we were have already done a FR 3268 * and if so we see if the biggest TSN we saw in the sack is 3269 * smaller than the recovery point. If so we don't strike 3270 * the tsn... otherwise we CAN strike the TSN. 3271 */ 3272 /* 3273 * @@@ JRI: Check for CMT if (accum_moved && 3274 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3275 * 0)) { 3276 */ 3277 if (accum_moved && asoc->fast_retran_loss_recovery) { 3278 /* 3279 * Strike the TSN if in fast-recovery and cum-ack 3280 * moved. 3281 */ 3282 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3283 sctp_log_fr(biggest_tsn_newly_acked, 3284 tp1->rec.data.TSN_seq, 3285 tp1->sent, 3286 SCTP_FR_LOG_STRIKE_CHUNK); 3287 } 3288 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3289 tp1->sent++; 3290 } 3291 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3292 /* 3293 * CMT DAC algorithm: If SACK flag is set to 3294 * 0, then lowest_newack test will not pass 3295 * because it would have been set to the 3296 * cumack earlier. If not already to be 3297 * rtx'd, If not a mixed sack and if tp1 is 3298 * not between two sacked TSNs, then mark by 3299 * one more. NOTE that we are marking by one 3300 * additional time since the SACK DAC flag 3301 * indicates that two packets have been 3302 * received after this missing TSN. 3303 */ 3304 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3305 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3306 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3307 sctp_log_fr(16 + num_dests_sacked, 3308 tp1->rec.data.TSN_seq, 3309 tp1->sent, 3310 SCTP_FR_LOG_STRIKE_CHUNK); 3311 } 3312 tp1->sent++; 3313 } 3314 } 3315 } else if (tp1->rec.data.doing_fast_retransmit) { 3316 /* 3317 * For those that have done a FR we must take 3318 * special consideration if we strike. I.e the 3319 * biggest_newly_acked must be higher than the 3320 * sending_seq at the time we did the FR. 3321 */ 3322 if ( 3323 #ifdef SCTP_FR_TO_ALTERNATE 3324 /* 3325 * If FR's go to new networks, then we must only do 3326 * this for singly homed asoc's. However if the FR's 3327 * go to the same network (Armando's work) then its 3328 * ok to FR multiple times. 3329 */ 3330 (asoc->numnets < 2) 3331 #else 3332 (1) 3333 #endif 3334 ) { 3335 3336 if ((compare_with_wrap(biggest_tsn_newly_acked, 3337 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3338 (biggest_tsn_newly_acked == 3339 tp1->rec.data.fast_retran_tsn)) { 3340 /* 3341 * Strike the TSN, since this ack is 3342 * beyond where things were when we 3343 * did a FR. 3344 */ 3345 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3346 sctp_log_fr(biggest_tsn_newly_acked, 3347 tp1->rec.data.TSN_seq, 3348 tp1->sent, 3349 SCTP_FR_LOG_STRIKE_CHUNK); 3350 } 3351 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3352 tp1->sent++; 3353 } 3354 strike_flag = 1; 3355 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3356 /* 3357 * CMT DAC algorithm: If 3358 * SACK flag is set to 0, 3359 * then lowest_newack test 3360 * will not pass because it 3361 * would have been set to 3362 * the cumack earlier. If 3363 * not already to be rtx'd, 3364 * If not a mixed sack and 3365 * if tp1 is not between two 3366 * sacked TSNs, then mark by 3367 * one more. NOTE that we 3368 * are marking by one 3369 * additional time since the 3370 * SACK DAC flag indicates 3371 * that two packets have 3372 * been received after this 3373 * missing TSN. 3374 */ 3375 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3376 (num_dests_sacked == 1) && 3377 compare_with_wrap(this_sack_lowest_newack, 3378 tp1->rec.data.TSN_seq, MAX_TSN)) { 3379 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3380 sctp_log_fr(32 + num_dests_sacked, 3381 tp1->rec.data.TSN_seq, 3382 tp1->sent, 3383 SCTP_FR_LOG_STRIKE_CHUNK); 3384 } 3385 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3386 tp1->sent++; 3387 3388 } 3389 } 3390 } 3391 } 3392 } 3393 /* 3394 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3395 * algo covers HTNA. 3396 */ 3397 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3398 biggest_tsn_newly_acked, MAX_TSN)) { 3399 /* 3400 * We don't strike these: This is the HTNA 3401 * algorithm i.e. we don't strike If our TSN is 3402 * larger than the Highest TSN Newly Acked. 3403 */ 3404 ; 3405 } else { 3406 /* Strike the TSN */ 3407 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3408 sctp_log_fr(biggest_tsn_newly_acked, 3409 tp1->rec.data.TSN_seq, 3410 tp1->sent, 3411 SCTP_FR_LOG_STRIKE_CHUNK); 3412 } 3413 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3414 tp1->sent++; 3415 } 3416 if (sctp_cmt_on_off && sctp_cmt_use_dac) { 3417 /* 3418 * CMT DAC algorithm: If SACK flag is set to 3419 * 0, then lowest_newack test will not pass 3420 * because it would have been set to the 3421 * cumack earlier. If not already to be 3422 * rtx'd, If not a mixed sack and if tp1 is 3423 * not between two sacked TSNs, then mark by 3424 * one more. NOTE that we are marking by one 3425 * additional time since the SACK DAC flag 3426 * indicates that two packets have been 3427 * received after this missing TSN. 3428 */ 3429 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3430 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3431 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3432 sctp_log_fr(48 + num_dests_sacked, 3433 tp1->rec.data.TSN_seq, 3434 tp1->sent, 3435 SCTP_FR_LOG_STRIKE_CHUNK); 3436 } 3437 tp1->sent++; 3438 } 3439 } 3440 } 3441 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3442 /* Increment the count to resend */ 3443 struct sctp_nets *alt; 3444 3445 /* printf("OK, we are now ready to FR this guy\n"); */ 3446 if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) { 3447 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3448 0, SCTP_FR_MARKED); 3449 } 3450 if (strike_flag) { 3451 /* This is a subsequent FR */ 3452 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3453 } 3454 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3455 if (sctp_cmt_on_off) { 3456 /* 3457 * CMT: Using RTX_SSTHRESH policy for CMT. 3458 * If CMT is being used, then pick dest with 3459 * largest ssthresh for any retransmission. 3460 */ 3461 tp1->no_fr_allowed = 1; 3462 alt = tp1->whoTo; 3463 /* sa_ignore NO_NULL_CHK */ 3464 if (sctp_cmt_on_off && sctp_cmt_pf) { 3465 /* 3466 * JRS 5/18/07 - If CMT PF is on, 3467 * use the PF version of 3468 * find_alt_net() 3469 */ 3470 alt = sctp_find_alternate_net(stcb, alt, 2); 3471 } else { 3472 /* 3473 * JRS 5/18/07 - If only CMT is on, 3474 * use the CMT version of 3475 * find_alt_net() 3476 */ 3477 /* sa_ignore NO_NULL_CHK */ 3478 alt = sctp_find_alternate_net(stcb, alt, 1); 3479 } 3480 if (alt == NULL) { 3481 alt = tp1->whoTo; 3482 } 3483 /* 3484 * CUCv2: If a different dest is picked for 3485 * the retransmission, then new 3486 * (rtx-)pseudo_cumack needs to be tracked 3487 * for orig dest. Let CUCv2 track new (rtx-) 3488 * pseudo-cumack always. 3489 */ 3490 if (tp1->whoTo) { 3491 tp1->whoTo->find_pseudo_cumack = 1; 3492 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3493 } 3494 } else {/* CMT is OFF */ 3495 3496 #ifdef SCTP_FR_TO_ALTERNATE 3497 /* Can we find an alternate? */ 3498 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3499 #else 3500 /* 3501 * default behavior is to NOT retransmit 3502 * FR's to an alternate. Armando Caro's 3503 * paper details why. 3504 */ 3505 alt = tp1->whoTo; 3506 #endif 3507 } 3508 3509 tp1->rec.data.doing_fast_retransmit = 1; 3510 tot_retrans++; 3511 /* mark the sending seq for possible subsequent FR's */ 3512 /* 3513 * printf("Marking TSN for FR new value %x\n", 3514 * (uint32_t)tpi->rec.data.TSN_seq); 3515 */ 3516 if (TAILQ_EMPTY(&asoc->send_queue)) { 3517 /* 3518 * If the queue of send is empty then its 3519 * the next sequence number that will be 3520 * assigned so we subtract one from this to 3521 * get the one we last sent. 3522 */ 3523 tp1->rec.data.fast_retran_tsn = sending_seq; 3524 } else { 3525 /* 3526 * If there are chunks on the send queue 3527 * (unsent data that has made it from the 3528 * stream queues but not out the door, we 3529 * take the first one (which will have the 3530 * lowest TSN) and subtract one to get the 3531 * one we last sent. 3532 */ 3533 struct sctp_tmit_chunk *ttt; 3534 3535 ttt = TAILQ_FIRST(&asoc->send_queue); 3536 tp1->rec.data.fast_retran_tsn = 3537 ttt->rec.data.TSN_seq; 3538 } 3539 3540 if (tp1->do_rtt) { 3541 /* 3542 * this guy had a RTO calculation pending on 3543 * it, cancel it 3544 */ 3545 tp1->do_rtt = 0; 3546 } 3547 /* fix counts and things */ 3548 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3549 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3550 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3551 tp1->book_size, 3552 (uintptr_t) tp1->whoTo, 3553 tp1->rec.data.TSN_seq); 3554 } 3555 if (tp1->whoTo) { 3556 tp1->whoTo->net_ack++; 3557 sctp_flight_size_decrease(tp1); 3558 } 3559 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 3560 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3561 asoc->peers_rwnd, tp1->send_size, sctp_peer_chunk_oh); 3562 } 3563 /* add back to the rwnd */ 3564 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3565 3566 /* remove from the total flight */ 3567 sctp_total_flight_decrease(stcb, tp1); 3568 if (alt != tp1->whoTo) { 3569 /* yes, there is an alternate. */ 3570 sctp_free_remote_addr(tp1->whoTo); 3571 /* sa_ignore FREED_MEMORY */ 3572 tp1->whoTo = alt; 3573 atomic_add_int(&alt->ref_count, 1); 3574 } 3575 } 3576 tp1 = TAILQ_NEXT(tp1, sctp_next); 3577 } /* while (tp1) */ 3578 3579 if (tot_retrans > 0) { 3580 /* 3581 * Setup the ecn nonce re-sync point. We do this since once 3582 * we go to FR something we introduce a Karn's rule scenario 3583 * and won't know the totals for the ECN bits. 3584 */ 3585 asoc->nonce_resync_tsn = sending_seq; 3586 asoc->nonce_wait_for_ecne = 0; 3587 asoc->nonce_sum_check = 0; 3588 } 3589 } 3590 3591 struct sctp_tmit_chunk * 3592 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3593 struct sctp_association *asoc) 3594 { 3595 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3596 struct timeval now; 3597 int now_filled = 0; 3598 3599 if (asoc->peer_supports_prsctp == 0) { 3600 return (NULL); 3601 } 3602 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3603 while (tp1) { 3604 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3605 tp1->sent != SCTP_DATAGRAM_RESEND) { 3606 /* no chance to advance, out of here */ 3607 break; 3608 } 3609 if (!PR_SCTP_ENABLED(tp1->flags)) { 3610 /* 3611 * We can't fwd-tsn past any that are reliable aka 3612 * retransmitted until the asoc fails. 3613 */ 3614 break; 3615 } 3616 if (!now_filled) { 3617 (void)SCTP_GETTIME_TIMEVAL(&now); 3618 now_filled = 1; 3619 } 3620 tp2 = TAILQ_NEXT(tp1, sctp_next); 3621 /* 3622 * now we got a chunk which is marked for another 3623 * retransmission to a PR-stream but has run out its chances 3624 * already maybe OR has been marked to skip now. Can we skip 3625 * it if its a resend? 3626 */ 3627 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3628 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3629 /* 3630 * Now is this one marked for resend and its time is 3631 * now up? 3632 */ 3633 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3634 /* Yes so drop it */ 3635 if (tp1->data) { 3636 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3637 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3638 &asoc->sent_queue); 3639 } 3640 } else { 3641 /* 3642 * No, we are done when hit one for resend 3643 * whos time as not expired. 3644 */ 3645 break; 3646 } 3647 } 3648 /* 3649 * Ok now if this chunk is marked to drop it we can clean up 3650 * the chunk, advance our peer ack point and we can check 3651 * the next chunk. 3652 */ 3653 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3654 /* advance PeerAckPoint goes forward */ 3655 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3656 a_adv = tp1; 3657 /* 3658 * we don't want to de-queue it here. Just wait for 3659 * the next peer SACK to come with a new cumTSN and 3660 * then the chunk will be droped in the normal 3661 * fashion. 3662 */ 3663 if (tp1->data) { 3664 sctp_free_bufspace(stcb, asoc, tp1, 1); 3665 /* 3666 * Maybe there should be another 3667 * notification type 3668 */ 3669 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3670 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3671 tp1); 3672 sctp_m_freem(tp1->data); 3673 tp1->data = NULL; 3674 if (stcb->sctp_socket) { 3675 sctp_sowwakeup(stcb->sctp_ep, 3676 stcb->sctp_socket); 3677 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 3678 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3679 } 3680 } 3681 } 3682 } else { 3683 /* 3684 * If it is still in RESEND we can advance no 3685 * further 3686 */ 3687 break; 3688 } 3689 /* 3690 * If we hit here we just dumped tp1, move to next tsn on 3691 * sent queue. 3692 */ 3693 tp1 = tp2; 3694 } 3695 return (a_adv); 3696 } 3697 3698 static void 3699 sctp_fs_audit(struct sctp_association *asoc) 3700 { 3701 struct sctp_tmit_chunk *chk; 3702 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3703 3704 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3705 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3706 inflight++; 3707 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3708 resend++; 3709 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3710 inbetween++; 3711 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3712 above++; 3713 } else { 3714 acked++; 3715 } 3716 } 3717 3718 if ((inflight > 0) || (inbetween > 0)) { 3719 #ifdef INVARIANTS 3720 panic("Flight size-express incorrect? \n"); 3721 #else 3722 SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n", 3723 inflight, inbetween); 3724 #endif 3725 } 3726 } 3727 3728 3729 static void 3730 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3731 struct sctp_association *asoc, 3732 struct sctp_nets *net, 3733 struct sctp_tmit_chunk *tp1) 3734 { 3735 struct sctp_tmit_chunk *chk; 3736 3737 /* First setup this one and get it moved back */ 3738 tp1->sent = SCTP_DATAGRAM_UNSENT; 3739 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3740 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3741 tp1->whoTo->flight_size, 3742 tp1->book_size, 3743 (uintptr_t) tp1->whoTo, 3744 tp1->rec.data.TSN_seq); 3745 } 3746 sctp_flight_size_decrease(tp1); 3747 sctp_total_flight_decrease(stcb, tp1); 3748 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3749 TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next); 3750 asoc->sent_queue_cnt--; 3751 asoc->send_queue_cnt++; 3752 /* 3753 * Now all guys marked for RESEND on the sent_queue must be moved 3754 * back too. 3755 */ 3756 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3757 if (chk->sent == SCTP_DATAGRAM_RESEND) { 3758 /* Another chunk to move */ 3759 chk->sent = SCTP_DATAGRAM_UNSENT; 3760 /* It should not be in flight */ 3761 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3762 TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next); 3763 asoc->sent_queue_cnt--; 3764 asoc->send_queue_cnt++; 3765 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3766 } 3767 } 3768 } 3769 3770 void 3771 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3772 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 3773 { 3774 struct sctp_nets *net; 3775 struct sctp_association *asoc; 3776 struct sctp_tmit_chunk *tp1, *tp2; 3777 uint32_t old_rwnd; 3778 int win_probe_recovery = 0; 3779 int win_probe_recovered = 0; 3780 int j, done_once = 0; 3781 3782 3783 if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3784 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3785 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3786 } 3787 SCTP_TCB_LOCK_ASSERT(stcb); 3788 #ifdef SCTP_ASOCLOG_OF_TSNS 3789 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3790 stcb->asoc.cumack_log_at++; 3791 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3792 stcb->asoc.cumack_log_at = 0; 3793 } 3794 #endif 3795 asoc = &stcb->asoc; 3796 old_rwnd = asoc->peers_rwnd; 3797 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) { 3798 /* old ack */ 3799 return; 3800 } else if (asoc->last_acked_seq == cumack) { 3801 /* Window update sack */ 3802 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3803 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 3804 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3805 /* SWS sender side engages */ 3806 asoc->peers_rwnd = 0; 3807 } 3808 if (asoc->peers_rwnd > old_rwnd) { 3809 goto again; 3810 } 3811 return; 3812 3813 } 3814 /* First setup for CC stuff */ 3815 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3816 net->prev_cwnd = net->cwnd; 3817 net->net_ack = 0; 3818 net->net_ack2 = 0; 3819 3820 /* 3821 * CMT: Reset CUC and Fast recovery algo variables before 3822 * SACK processing 3823 */ 3824 net->new_pseudo_cumack = 0; 3825 net->will_exit_fast_recovery = 0; 3826 } 3827 if (sctp_strict_sacks) { 3828 uint32_t send_s; 3829 3830 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3831 tp1 = TAILQ_LAST(&asoc->sent_queue, 3832 sctpchunk_listhead); 3833 send_s = tp1->rec.data.TSN_seq + 1; 3834 } else { 3835 send_s = asoc->sending_seq; 3836 } 3837 if ((cumack == send_s) || 3838 compare_with_wrap(cumack, send_s, MAX_TSN)) { 3839 #ifndef INVARIANTS 3840 struct mbuf *oper; 3841 3842 #endif 3843 #ifdef INVARIANTS 3844 panic("Impossible sack 1"); 3845 #else 3846 *abort_now = 1; 3847 /* XXX */ 3848 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 3849 0, M_DONTWAIT, 1, MT_DATA); 3850 if (oper) { 3851 struct sctp_paramhdr *ph; 3852 uint32_t *ippp; 3853 3854 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 3855 sizeof(uint32_t); 3856 ph = mtod(oper, struct sctp_paramhdr *); 3857 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 3858 ph->param_length = htons(SCTP_BUF_LEN(oper)); 3859 ippp = (uint32_t *) (ph + 1); 3860 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 3861 } 3862 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 3863 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 3864 return; 3865 #endif 3866 } 3867 } 3868 asoc->this_sack_highest_gap = cumack; 3869 stcb->asoc.overall_error_count = 0; 3870 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) { 3871 /* process the new consecutive TSN first */ 3872 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3873 while (tp1) { 3874 tp2 = TAILQ_NEXT(tp1, sctp_next); 3875 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 3876 MAX_TSN) || 3877 cumack == tp1->rec.data.TSN_seq) { 3878 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3879 printf("Warning, an unsent is now acked?\n"); 3880 } 3881 /* 3882 * ECN Nonce: Add the nonce to the sender's 3883 * nonce sum 3884 */ 3885 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 3886 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3887 /* 3888 * If it is less than ACKED, it is 3889 * now no-longer in flight. Higher 3890 * values may occur during marking 3891 */ 3892 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3893 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 3894 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3895 tp1->whoTo->flight_size, 3896 tp1->book_size, 3897 (uintptr_t) tp1->whoTo, 3898 tp1->rec.data.TSN_seq); 3899 } 3900 sctp_flight_size_decrease(tp1); 3901 sctp_total_flight_decrease(stcb, tp1); 3902 } 3903 tp1->whoTo->net_ack += tp1->send_size; 3904 if (tp1->snd_count < 2) { 3905 /* 3906 * True non-retransmited 3907 * chunk 3908 */ 3909 tp1->whoTo->net_ack2 += 3910 tp1->send_size; 3911 3912 /* update RTO too? */ 3913 if (tp1->do_rtt) { 3914 tp1->whoTo->RTO = 3915 sctp_calculate_rto(stcb, 3916 asoc, tp1->whoTo, 3917 &tp1->sent_rcv_time, 3918 sctp_align_safe_nocopy); 3919 tp1->do_rtt = 0; 3920 } 3921 } 3922 /* 3923 * CMT: CUCv2 algorithm. From the 3924 * cumack'd TSNs, for each TSN being 3925 * acked for the first time, set the 3926 * following variables for the 3927 * corresp destination. 3928 * new_pseudo_cumack will trigger a 3929 * cwnd update. 3930 * find_(rtx_)pseudo_cumack will 3931 * trigger search for the next 3932 * expected (rtx-)pseudo-cumack. 3933 */ 3934 tp1->whoTo->new_pseudo_cumack = 1; 3935 tp1->whoTo->find_pseudo_cumack = 1; 3936 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3937 3938 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 3939 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3940 } 3941 } 3942 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3943 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3944 } 3945 if (tp1->rec.data.chunk_was_revoked) { 3946 /* deflate the cwnd */ 3947 tp1->whoTo->cwnd -= tp1->book_size; 3948 tp1->rec.data.chunk_was_revoked = 0; 3949 } 3950 tp1->sent = SCTP_DATAGRAM_ACKED; 3951 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3952 if (tp1->data) { 3953 sctp_free_bufspace(stcb, asoc, tp1, 1); 3954 sctp_m_freem(tp1->data); 3955 } 3956 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 3957 sctp_log_sack(asoc->last_acked_seq, 3958 cumack, 3959 tp1->rec.data.TSN_seq, 3960 0, 3961 0, 3962 SCTP_LOG_FREE_SENT); 3963 } 3964 tp1->data = NULL; 3965 asoc->sent_queue_cnt--; 3966 sctp_free_a_chunk(stcb, tp1); 3967 tp1 = tp2; 3968 } else { 3969 break; 3970 } 3971 } 3972 3973 } 3974 if (stcb->sctp_socket) { 3975 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 3976 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 3977 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 3978 } 3979 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 3980 } else { 3981 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 3982 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 3983 } 3984 } 3985 3986 /* JRS - Use the congestion control given in the CC module */ 3987 if (asoc->last_acked_seq != cumack) 3988 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 3989 3990 asoc->last_acked_seq = cumack; 3991 3992 if (TAILQ_EMPTY(&asoc->sent_queue)) { 3993 /* nothing left in-flight */ 3994 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3995 net->flight_size = 0; 3996 net->partial_bytes_acked = 0; 3997 } 3998 asoc->total_flight = 0; 3999 asoc->total_flight_count = 0; 4000 } 4001 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4002 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4003 asoc->advanced_peer_ack_point = cumack; 4004 } 4005 /* ECN Nonce updates */ 4006 if (asoc->ecn_nonce_allowed) { 4007 if (asoc->nonce_sum_check) { 4008 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4009 if (asoc->nonce_wait_for_ecne == 0) { 4010 struct sctp_tmit_chunk *lchk; 4011 4012 lchk = TAILQ_FIRST(&asoc->send_queue); 4013 asoc->nonce_wait_for_ecne = 1; 4014 if (lchk) { 4015 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4016 } else { 4017 asoc->nonce_wait_tsn = asoc->sending_seq; 4018 } 4019 } else { 4020 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4021 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4022 /* 4023 * Misbehaving peer. We need 4024 * to react to this guy 4025 */ 4026 asoc->ecn_allowed = 0; 4027 asoc->ecn_nonce_allowed = 0; 4028 } 4029 } 4030 } 4031 } else { 4032 /* See if Resynchronization Possible */ 4033 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4034 asoc->nonce_sum_check = 1; 4035 /* 4036 * now we must calculate what the base is. 4037 * We do this based on two things, we know 4038 * the total's for all the segments 4039 * gap-acked in the SACK (none), We also 4040 * know the SACK's nonce sum, its in 4041 * nonce_sum_flag. So we can build a truth 4042 * table to back-calculate the new value of 4043 * asoc->nonce_sum_expect_base: 4044 * 4045 * SACK-flag-Value Seg-Sums Base 0 0 0 4046 * 1 0 1 0 1 1 1 4047 * 1 0 4048 */ 4049 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4050 } 4051 } 4052 } 4053 /* RWND update */ 4054 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4055 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4056 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4057 /* SWS sender side engages */ 4058 asoc->peers_rwnd = 0; 4059 } 4060 if (asoc->peers_rwnd > old_rwnd) { 4061 win_probe_recovery = 1; 4062 } 4063 /* Now assure a timer where data is queued at */ 4064 again: 4065 j = 0; 4066 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4067 if (win_probe_recovery && (net->window_probe)) { 4068 net->window_probe = 0; 4069 win_probe_recovered = 1; 4070 /* 4071 * Find first chunk that was used with window probe 4072 * and clear the sent 4073 */ 4074 /* sa_ignore FREED_MEMORY */ 4075 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4076 if (tp1->window_probe) { 4077 /* move back to data send queue */ 4078 sctp_window_probe_recovery(stcb, asoc, net, tp1); 4079 break; 4080 } 4081 } 4082 } 4083 if (net->flight_size) { 4084 int to_ticks; 4085 4086 if (net->RTO == 0) { 4087 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4088 } else { 4089 to_ticks = MSEC_TO_TICKS(net->RTO); 4090 } 4091 j++; 4092 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4093 sctp_timeout_handler, &net->rxt_timer); 4094 } else { 4095 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4096 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4097 stcb, net, 4098 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4099 } 4100 if (sctp_early_fr) { 4101 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4102 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4103 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4104 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4105 } 4106 } 4107 } 4108 } 4109 if ((j == 0) && 4110 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4111 (asoc->sent_queue_retran_cnt == 0) && 4112 (win_probe_recovered == 0) && 4113 (done_once == 0)) { 4114 /* huh, this should not happen */ 4115 sctp_fs_audit(asoc); 4116 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4117 net->flight_size = 0; 4118 } 4119 asoc->total_flight = 0; 4120 asoc->total_flight_count = 0; 4121 asoc->sent_queue_retran_cnt = 0; 4122 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4123 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4124 sctp_flight_size_increase(tp1); 4125 sctp_total_flight_increase(stcb, tp1); 4126 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4127 asoc->sent_queue_retran_cnt++; 4128 } 4129 } 4130 done_once = 1; 4131 goto again; 4132 } 4133 /**********************************/ 4134 /* Now what about shutdown issues */ 4135 /**********************************/ 4136 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4137 /* nothing left on sendqueue.. consider done */ 4138 /* clean up */ 4139 if ((asoc->stream_queue_cnt == 1) && 4140 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4141 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4142 (asoc->locked_on_sending) 4143 ) { 4144 struct sctp_stream_queue_pending *sp; 4145 4146 /* 4147 * I may be in a state where we got all across.. but 4148 * cannot write more due to a shutdown... we abort 4149 * since the user did not indicate EOR in this case. 4150 * The sp will be cleaned during free of the asoc. 4151 */ 4152 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4153 sctp_streamhead); 4154 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4155 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4156 asoc->locked_on_sending = NULL; 4157 asoc->stream_queue_cnt--; 4158 } 4159 } 4160 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4161 (asoc->stream_queue_cnt == 0)) { 4162 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4163 /* Need to abort here */ 4164 struct mbuf *oper; 4165 4166 abort_out_now: 4167 *abort_now = 1; 4168 /* XXX */ 4169 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4170 0, M_DONTWAIT, 1, MT_DATA); 4171 if (oper) { 4172 struct sctp_paramhdr *ph; 4173 uint32_t *ippp; 4174 4175 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4176 sizeof(uint32_t); 4177 ph = mtod(oper, struct sctp_paramhdr *); 4178 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4179 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4180 ippp = (uint32_t *) (ph + 1); 4181 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4182 } 4183 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4184 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4185 } else { 4186 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4187 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4188 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4189 } 4190 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4191 sctp_stop_timers_for_shutdown(stcb); 4192 sctp_send_shutdown(stcb, 4193 stcb->asoc.primary_destination); 4194 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4195 stcb->sctp_ep, stcb, asoc->primary_destination); 4196 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4197 stcb->sctp_ep, stcb, asoc->primary_destination); 4198 } 4199 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4200 (asoc->stream_queue_cnt == 0)) { 4201 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4202 goto abort_out_now; 4203 } 4204 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4205 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4206 sctp_send_shutdown_ack(stcb, 4207 stcb->asoc.primary_destination); 4208 4209 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4210 stcb->sctp_ep, stcb, asoc->primary_destination); 4211 } 4212 } 4213 if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) { 4214 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4215 rwnd, 4216 stcb->asoc.peers_rwnd, 4217 stcb->asoc.total_flight, 4218 stcb->asoc.total_output_queue_size); 4219 } 4220 } 4221 4222 void 4223 sctp_handle_sack(struct mbuf *m, int offset, 4224 struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4225 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd) 4226 { 4227 struct sctp_association *asoc; 4228 struct sctp_sack *sack; 4229 struct sctp_tmit_chunk *tp1, *tp2; 4230 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4231 this_sack_lowest_newack; 4232 uint32_t sav_cum_ack; 4233 uint16_t num_seg, num_dup; 4234 uint16_t wake_him = 0; 4235 unsigned int sack_length; 4236 uint32_t send_s = 0; 4237 long j; 4238 int accum_moved = 0; 4239 int will_exit_fast_recovery = 0; 4240 uint32_t a_rwnd, old_rwnd; 4241 int win_probe_recovery = 0; 4242 int win_probe_recovered = 0; 4243 struct sctp_nets *net = NULL; 4244 int nonce_sum_flag, ecn_seg_sums = 0; 4245 int done_once; 4246 uint8_t reneged_all = 0; 4247 uint8_t cmt_dac_flag; 4248 4249 /* 4250 * we take any chance we can to service our queues since we cannot 4251 * get awoken when the socket is read from :< 4252 */ 4253 /* 4254 * Now perform the actual SACK handling: 1) Verify that it is not an 4255 * old sack, if so discard. 2) If there is nothing left in the send 4256 * queue (cum-ack is equal to last acked) then you have a duplicate 4257 * too, update any rwnd change and verify no timers are running. 4258 * then return. 3) Process any new consequtive data i.e. cum-ack 4259 * moved process these first and note that it moved. 4) Process any 4260 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4261 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4262 * sync up flightsizes and things, stop all timers and also check 4263 * for shutdown_pending state. If so then go ahead and send off the 4264 * shutdown. If in shutdown recv, send off the shutdown-ack and 4265 * start that timer, Ret. 9) Strike any non-acked things and do FR 4266 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4267 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4268 * if in shutdown_recv state. 4269 */ 4270 SCTP_TCB_LOCK_ASSERT(stcb); 4271 sack = &ch->sack; 4272 /* CMT DAC algo */ 4273 this_sack_lowest_newack = 0; 4274 j = 0; 4275 sack_length = (unsigned int)sack_len; 4276 /* ECN Nonce */ 4277 SCTP_STAT_INCR(sctps_slowpath_sack); 4278 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4279 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4280 #ifdef SCTP_ASOCLOG_OF_TSNS 4281 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4282 stcb->asoc.cumack_log_at++; 4283 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4284 stcb->asoc.cumack_log_at = 0; 4285 } 4286 #endif 4287 num_seg = ntohs(sack->num_gap_ack_blks); 4288 a_rwnd = rwnd; 4289 4290 if (sctp_logging_level & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4291 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4292 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4293 } 4294 /* CMT DAC algo */ 4295 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4296 num_dup = ntohs(sack->num_dup_tsns); 4297 4298 old_rwnd = stcb->asoc.peers_rwnd; 4299 stcb->asoc.overall_error_count = 0; 4300 asoc = &stcb->asoc; 4301 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4302 sctp_log_sack(asoc->last_acked_seq, 4303 cum_ack, 4304 0, 4305 num_seg, 4306 num_dup, 4307 SCTP_LOG_NEW_SACK); 4308 } 4309 if ((num_dup) && (sctp_logging_level & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) { 4310 int off_to_dup, iii; 4311 uint32_t *dupdata, dblock; 4312 4313 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4314 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4315 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup, 4316 sizeof(uint32_t), (uint8_t *) & dblock); 4317 off_to_dup += sizeof(uint32_t); 4318 if (dupdata) { 4319 for (iii = 0; iii < num_dup; iii++) { 4320 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4321 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup, 4322 sizeof(uint32_t), (uint8_t *) & dblock); 4323 if (dupdata == NULL) 4324 break; 4325 off_to_dup += sizeof(uint32_t); 4326 4327 4328 } 4329 } 4330 } else { 4331 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4332 off_to_dup, num_dup, sack_length, num_seg); 4333 } 4334 } 4335 if (sctp_strict_sacks) { 4336 /* reality check */ 4337 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4338 tp1 = TAILQ_LAST(&asoc->sent_queue, 4339 sctpchunk_listhead); 4340 send_s = tp1->rec.data.TSN_seq + 1; 4341 } else { 4342 send_s = asoc->sending_seq; 4343 } 4344 if (cum_ack == send_s || 4345 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4346 #ifndef INVARIANTS 4347 struct mbuf *oper; 4348 4349 #endif 4350 #ifdef INVARIANTS 4351 hopeless_peer: 4352 panic("Impossible sack 1"); 4353 #else 4354 4355 4356 /* 4357 * no way, we have not even sent this TSN out yet. 4358 * Peer is hopelessly messed up with us. 4359 */ 4360 hopeless_peer: 4361 *abort_now = 1; 4362 /* XXX */ 4363 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4364 0, M_DONTWAIT, 1, MT_DATA); 4365 if (oper) { 4366 struct sctp_paramhdr *ph; 4367 uint32_t *ippp; 4368 4369 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4370 sizeof(uint32_t); 4371 ph = mtod(oper, struct sctp_paramhdr *); 4372 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4373 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4374 ippp = (uint32_t *) (ph + 1); 4375 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4376 } 4377 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4378 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 4379 return; 4380 #endif 4381 } 4382 } 4383 /**********************/ 4384 /* 1) check the range */ 4385 /**********************/ 4386 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4387 /* acking something behind */ 4388 return; 4389 } 4390 sav_cum_ack = asoc->last_acked_seq; 4391 4392 /* update the Rwnd of the peer */ 4393 if (TAILQ_EMPTY(&asoc->sent_queue) && 4394 TAILQ_EMPTY(&asoc->send_queue) && 4395 (asoc->stream_queue_cnt == 0) 4396 ) { 4397 /* nothing left on send/sent and strmq */ 4398 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 4399 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4400 asoc->peers_rwnd, 0, 0, a_rwnd); 4401 } 4402 asoc->peers_rwnd = a_rwnd; 4403 if (asoc->sent_queue_retran_cnt) { 4404 asoc->sent_queue_retran_cnt = 0; 4405 } 4406 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4407 /* SWS sender side engages */ 4408 asoc->peers_rwnd = 0; 4409 } 4410 /* stop any timers */ 4411 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4412 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4413 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4414 if (sctp_early_fr) { 4415 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4416 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4417 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4418 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4419 } 4420 } 4421 net->partial_bytes_acked = 0; 4422 net->flight_size = 0; 4423 } 4424 asoc->total_flight = 0; 4425 asoc->total_flight_count = 0; 4426 return; 4427 } 4428 /* 4429 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4430 * things. The total byte count acked is tracked in netAckSz AND 4431 * netAck2 is used to track the total bytes acked that are un- 4432 * amibguious and were never retransmitted. We track these on a per 4433 * destination address basis. 4434 */ 4435 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4436 net->prev_cwnd = net->cwnd; 4437 net->net_ack = 0; 4438 net->net_ack2 = 0; 4439 4440 /* 4441 * CMT: Reset CUC and Fast recovery algo variables before 4442 * SACK processing 4443 */ 4444 net->new_pseudo_cumack = 0; 4445 net->will_exit_fast_recovery = 0; 4446 } 4447 /* process the new consecutive TSN first */ 4448 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4449 while (tp1) { 4450 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4451 MAX_TSN) || 4452 last_tsn == tp1->rec.data.TSN_seq) { 4453 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4454 /* 4455 * ECN Nonce: Add the nonce to the sender's 4456 * nonce sum 4457 */ 4458 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4459 accum_moved = 1; 4460 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4461 /* 4462 * If it is less than ACKED, it is 4463 * now no-longer in flight. Higher 4464 * values may occur during marking 4465 */ 4466 if ((tp1->whoTo->dest_state & 4467 SCTP_ADDR_UNCONFIRMED) && 4468 (tp1->snd_count < 2)) { 4469 /* 4470 * If there was no retran 4471 * and the address is 4472 * un-confirmed and we sent 4473 * there and are now 4474 * sacked.. its confirmed, 4475 * mark it so. 4476 */ 4477 tp1->whoTo->dest_state &= 4478 ~SCTP_ADDR_UNCONFIRMED; 4479 } 4480 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4481 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 4482 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4483 tp1->whoTo->flight_size, 4484 tp1->book_size, 4485 (uintptr_t) tp1->whoTo, 4486 tp1->rec.data.TSN_seq); 4487 } 4488 sctp_flight_size_decrease(tp1); 4489 sctp_total_flight_decrease(stcb, tp1); 4490 } 4491 tp1->whoTo->net_ack += tp1->send_size; 4492 4493 /* CMT SFR and DAC algos */ 4494 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4495 tp1->whoTo->saw_newack = 1; 4496 4497 if (tp1->snd_count < 2) { 4498 /* 4499 * True non-retransmited 4500 * chunk 4501 */ 4502 tp1->whoTo->net_ack2 += 4503 tp1->send_size; 4504 4505 /* update RTO too? */ 4506 if (tp1->do_rtt) { 4507 tp1->whoTo->RTO = 4508 sctp_calculate_rto(stcb, 4509 asoc, tp1->whoTo, 4510 &tp1->sent_rcv_time, 4511 sctp_align_safe_nocopy); 4512 tp1->do_rtt = 0; 4513 } 4514 } 4515 /* 4516 * CMT: CUCv2 algorithm. From the 4517 * cumack'd TSNs, for each TSN being 4518 * acked for the first time, set the 4519 * following variables for the 4520 * corresp destination. 4521 * new_pseudo_cumack will trigger a 4522 * cwnd update. 4523 * find_(rtx_)pseudo_cumack will 4524 * trigger search for the next 4525 * expected (rtx-)pseudo-cumack. 4526 */ 4527 tp1->whoTo->new_pseudo_cumack = 1; 4528 tp1->whoTo->find_pseudo_cumack = 1; 4529 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4530 4531 4532 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4533 sctp_log_sack(asoc->last_acked_seq, 4534 cum_ack, 4535 tp1->rec.data.TSN_seq, 4536 0, 4537 0, 4538 SCTP_LOG_TSN_ACKED); 4539 } 4540 if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) { 4541 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4542 } 4543 } 4544 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4545 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4546 #ifdef SCTP_AUDITING_ENABLED 4547 sctp_audit_log(0xB3, 4548 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4549 #endif 4550 } 4551 if (tp1->rec.data.chunk_was_revoked) { 4552 /* deflate the cwnd */ 4553 tp1->whoTo->cwnd -= tp1->book_size; 4554 tp1->rec.data.chunk_was_revoked = 0; 4555 } 4556 tp1->sent = SCTP_DATAGRAM_ACKED; 4557 } 4558 } else { 4559 break; 4560 } 4561 tp1 = TAILQ_NEXT(tp1, sctp_next); 4562 } 4563 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4564 /* always set this up to cum-ack */ 4565 asoc->this_sack_highest_gap = last_tsn; 4566 4567 /* Move offset up to point to gaps/dups */ 4568 offset += sizeof(struct sctp_sack_chunk); 4569 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4570 4571 /* skip corrupt segments */ 4572 goto skip_segments; 4573 } 4574 if (num_seg > 0) { 4575 4576 /* 4577 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4578 * to be greater than the cumack. Also reset saw_newack to 0 4579 * for all dests. 4580 */ 4581 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4582 net->saw_newack = 0; 4583 net->this_sack_highest_newack = last_tsn; 4584 } 4585 4586 /* 4587 * thisSackHighestGap will increase while handling NEW 4588 * segments this_sack_highest_newack will increase while 4589 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4590 * used for CMT DAC algo. saw_newack will also change. 4591 */ 4592 sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn, 4593 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4594 num_seg, &ecn_seg_sums); 4595 4596 if (sctp_strict_sacks) { 4597 /* 4598 * validate the biggest_tsn_acked in the gap acks if 4599 * strict adherence is wanted. 4600 */ 4601 if ((biggest_tsn_acked == send_s) || 4602 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4603 /* 4604 * peer is either confused or we are under 4605 * attack. We must abort. 4606 */ 4607 goto hopeless_peer; 4608 } 4609 } 4610 } 4611 skip_segments: 4612 /*******************************************/ 4613 /* cancel ALL T3-send timer if accum moved */ 4614 /*******************************************/ 4615 if (sctp_cmt_on_off) { 4616 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4617 if (net->new_pseudo_cumack) 4618 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4619 stcb, net, 4620 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4621 4622 } 4623 } else { 4624 if (accum_moved) { 4625 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4626 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4627 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4628 } 4629 } 4630 } 4631 /********************************************/ 4632 /* drop the acked chunks from the sendqueue */ 4633 /********************************************/ 4634 asoc->last_acked_seq = cum_ack; 4635 4636 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4637 if (tp1 == NULL) 4638 goto done_with_it; 4639 do { 4640 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4641 MAX_TSN)) { 4642 break; 4643 } 4644 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4645 /* no more sent on list */ 4646 printf("Warning, tp1->sent == %d and its now acked?\n", 4647 tp1->sent); 4648 } 4649 tp2 = TAILQ_NEXT(tp1, sctp_next); 4650 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4651 if (tp1->pr_sctp_on) { 4652 if (asoc->pr_sctp_cnt != 0) 4653 asoc->pr_sctp_cnt--; 4654 } 4655 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4656 (asoc->total_flight > 0)) { 4657 #ifdef INVARIANTS 4658 panic("Warning flight size is postive and should be 0"); 4659 #else 4660 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4661 asoc->total_flight); 4662 #endif 4663 asoc->total_flight = 0; 4664 } 4665 if (tp1->data) { 4666 sctp_free_bufspace(stcb, asoc, tp1, 1); 4667 sctp_m_freem(tp1->data); 4668 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4669 asoc->sent_queue_cnt_removeable--; 4670 } 4671 } 4672 if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) { 4673 sctp_log_sack(asoc->last_acked_seq, 4674 cum_ack, 4675 tp1->rec.data.TSN_seq, 4676 0, 4677 0, 4678 SCTP_LOG_FREE_SENT); 4679 } 4680 tp1->data = NULL; 4681 asoc->sent_queue_cnt--; 4682 sctp_free_a_chunk(stcb, tp1); 4683 wake_him++; 4684 tp1 = tp2; 4685 } while (tp1 != NULL); 4686 4687 done_with_it: 4688 if ((wake_him) && (stcb->sctp_socket)) { 4689 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4690 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 4691 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 4692 } 4693 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4694 } else { 4695 if (sctp_logging_level & SCTP_WAKE_LOGGING_ENABLE) { 4696 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 4697 } 4698 } 4699 4700 if (asoc->fast_retran_loss_recovery && accum_moved) { 4701 if (compare_with_wrap(asoc->last_acked_seq, 4702 asoc->fast_recovery_tsn, MAX_TSN) || 4703 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 4704 /* Setup so we will exit RFC2582 fast recovery */ 4705 will_exit_fast_recovery = 1; 4706 } 4707 } 4708 /* 4709 * Check for revoked fragments: 4710 * 4711 * if Previous sack - Had no frags then we can't have any revoked if 4712 * Previous sack - Had frag's then - If we now have frags aka 4713 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4714 * some of them. else - The peer revoked all ACKED fragments, since 4715 * we had some before and now we have NONE. 4716 */ 4717 4718 if (num_seg) 4719 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4720 else if (asoc->saw_sack_with_frags) { 4721 int cnt_revoked = 0; 4722 4723 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4724 if (tp1 != NULL) { 4725 /* Peer revoked all dg's marked or acked */ 4726 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4727 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 4728 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 4729 tp1->sent = SCTP_DATAGRAM_SENT; 4730 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 4731 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4732 tp1->whoTo->flight_size, 4733 tp1->book_size, 4734 (uintptr_t) tp1->whoTo, 4735 tp1->rec.data.TSN_seq); 4736 } 4737 sctp_flight_size_increase(tp1); 4738 sctp_total_flight_increase(stcb, tp1); 4739 tp1->rec.data.chunk_was_revoked = 1; 4740 /* 4741 * To ensure that this increase in 4742 * flightsize, which is artificial, 4743 * does not throttle the sender, we 4744 * also increase the cwnd 4745 * artificially. 4746 */ 4747 tp1->whoTo->cwnd += tp1->book_size; 4748 cnt_revoked++; 4749 } 4750 } 4751 if (cnt_revoked) { 4752 reneged_all = 1; 4753 } 4754 } 4755 asoc->saw_sack_with_frags = 0; 4756 } 4757 if (num_seg) 4758 asoc->saw_sack_with_frags = 1; 4759 else 4760 asoc->saw_sack_with_frags = 0; 4761 4762 /* JRS - Use the congestion control given in the CC module */ 4763 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4764 4765 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4766 /* nothing left in-flight */ 4767 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4768 /* stop all timers */ 4769 if (sctp_early_fr) { 4770 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4771 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4772 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4773 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4774 } 4775 } 4776 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4777 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4778 net->flight_size = 0; 4779 net->partial_bytes_acked = 0; 4780 } 4781 asoc->total_flight = 0; 4782 asoc->total_flight_count = 0; 4783 } 4784 /**********************************/ 4785 /* Now what about shutdown issues */ 4786 /**********************************/ 4787 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4788 /* nothing left on sendqueue.. consider done */ 4789 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 4790 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4791 asoc->peers_rwnd, 0, 0, a_rwnd); 4792 } 4793 asoc->peers_rwnd = a_rwnd; 4794 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4795 /* SWS sender side engages */ 4796 asoc->peers_rwnd = 0; 4797 } 4798 /* clean up */ 4799 if ((asoc->stream_queue_cnt == 1) && 4800 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4801 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4802 (asoc->locked_on_sending) 4803 ) { 4804 struct sctp_stream_queue_pending *sp; 4805 4806 /* 4807 * I may be in a state where we got all across.. but 4808 * cannot write more due to a shutdown... we abort 4809 * since the user did not indicate EOR in this case. 4810 */ 4811 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4812 sctp_streamhead); 4813 if ((sp) && (sp->length == 0) && (sp->msg_is_complete == 0)) { 4814 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4815 asoc->locked_on_sending = NULL; 4816 asoc->stream_queue_cnt--; 4817 } 4818 } 4819 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4820 (asoc->stream_queue_cnt == 0)) { 4821 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4822 /* Need to abort here */ 4823 struct mbuf *oper; 4824 4825 abort_out_now: 4826 *abort_now = 1; 4827 /* XXX */ 4828 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4829 0, M_DONTWAIT, 1, MT_DATA); 4830 if (oper) { 4831 struct sctp_paramhdr *ph; 4832 uint32_t *ippp; 4833 4834 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4835 sizeof(uint32_t); 4836 ph = mtod(oper, struct sctp_paramhdr *); 4837 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4838 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4839 ippp = (uint32_t *) (ph + 1); 4840 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4841 } 4842 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 4843 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper); 4844 return; 4845 } else { 4846 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4847 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4848 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4849 } 4850 asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4851 sctp_stop_timers_for_shutdown(stcb); 4852 sctp_send_shutdown(stcb, 4853 stcb->asoc.primary_destination); 4854 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4855 stcb->sctp_ep, stcb, asoc->primary_destination); 4856 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4857 stcb->sctp_ep, stcb, asoc->primary_destination); 4858 } 4859 return; 4860 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4861 (asoc->stream_queue_cnt == 0)) { 4862 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4863 goto abort_out_now; 4864 } 4865 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4866 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4867 sctp_send_shutdown_ack(stcb, 4868 stcb->asoc.primary_destination); 4869 4870 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4871 stcb->sctp_ep, stcb, asoc->primary_destination); 4872 return; 4873 } 4874 } 4875 /* 4876 * Now here we are going to recycle net_ack for a different use... 4877 * HEADS UP. 4878 */ 4879 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4880 net->net_ack = 0; 4881 } 4882 4883 /* 4884 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 4885 * to be done. Setting this_sack_lowest_newack to the cum_ack will 4886 * automatically ensure that. 4887 */ 4888 if (sctp_cmt_on_off && sctp_cmt_use_dac && (cmt_dac_flag == 0)) { 4889 this_sack_lowest_newack = cum_ack; 4890 } 4891 if (num_seg > 0) { 4892 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4893 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 4894 } 4895 /*********************************************/ 4896 /* Here we perform PR-SCTP procedures */ 4897 /* (section 4.2) */ 4898 /*********************************************/ 4899 /* C1. update advancedPeerAckPoint */ 4900 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4901 asoc->advanced_peer_ack_point = cum_ack; 4902 } 4903 /* C2. try to further move advancedPeerAckPoint ahead */ 4904 4905 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 4906 struct sctp_tmit_chunk *lchk; 4907 4908 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4909 /* C3. See if we need to send a Fwd-TSN */ 4910 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 4911 MAX_TSN)) { 4912 /* 4913 * ISSUE with ECN, see FWD-TSN processing for notes 4914 * on issues that will occur when the ECN NONCE 4915 * stuff is put into SCTP for cross checking. 4916 */ 4917 send_forward_tsn(stcb, asoc); 4918 4919 /* 4920 * ECN Nonce: Disable Nonce Sum check when FWD TSN 4921 * is sent and store resync tsn 4922 */ 4923 asoc->nonce_sum_check = 0; 4924 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 4925 if (lchk) { 4926 /* Assure a timer is up */ 4927 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4928 stcb->sctp_ep, stcb, lchk->whoTo); 4929 } 4930 } 4931 } 4932 /* JRS - Use the congestion control given in the CC module */ 4933 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 4934 4935 /****************************************************************** 4936 * Here we do the stuff with ECN Nonce checking. 4937 * We basically check to see if the nonce sum flag was incorrect 4938 * or if resynchronization needs to be done. Also if we catch a 4939 * misbehaving receiver we give him the kick. 4940 ******************************************************************/ 4941 4942 if (asoc->ecn_nonce_allowed) { 4943 if (asoc->nonce_sum_check) { 4944 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 4945 if (asoc->nonce_wait_for_ecne == 0) { 4946 struct sctp_tmit_chunk *lchk; 4947 4948 lchk = TAILQ_FIRST(&asoc->send_queue); 4949 asoc->nonce_wait_for_ecne = 1; 4950 if (lchk) { 4951 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4952 } else { 4953 asoc->nonce_wait_tsn = asoc->sending_seq; 4954 } 4955 } else { 4956 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4957 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4958 /* 4959 * Misbehaving peer. We need 4960 * to react to this guy 4961 */ 4962 asoc->ecn_allowed = 0; 4963 asoc->ecn_nonce_allowed = 0; 4964 } 4965 } 4966 } 4967 } else { 4968 /* See if Resynchronization Possible */ 4969 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4970 asoc->nonce_sum_check = 1; 4971 /* 4972 * now we must calculate what the base is. 4973 * We do this based on two things, we know 4974 * the total's for all the segments 4975 * gap-acked in the SACK, its stored in 4976 * ecn_seg_sums. We also know the SACK's 4977 * nonce sum, its in nonce_sum_flag. So we 4978 * can build a truth table to back-calculate 4979 * the new value of 4980 * asoc->nonce_sum_expect_base: 4981 * 4982 * SACK-flag-Value Seg-Sums Base 0 0 0 4983 * 1 0 1 0 1 1 1 4984 * 1 0 4985 */ 4986 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4987 } 4988 } 4989 } 4990 /* Now are we exiting loss recovery ? */ 4991 if (will_exit_fast_recovery) { 4992 /* Ok, we must exit fast recovery */ 4993 asoc->fast_retran_loss_recovery = 0; 4994 } 4995 if ((asoc->sat_t3_loss_recovery) && 4996 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 4997 MAX_TSN) || 4998 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 4999 /* end satellite t3 loss recovery */ 5000 asoc->sat_t3_loss_recovery = 0; 5001 } 5002 /* 5003 * CMT Fast recovery 5004 */ 5005 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5006 if (net->will_exit_fast_recovery) { 5007 /* Ok, we must exit fast recovery */ 5008 net->fast_retran_loss_recovery = 0; 5009 } 5010 } 5011 5012 /* Adjust and set the new rwnd value */ 5013 if (sctp_logging_level & SCTP_LOG_RWND_ENABLE) { 5014 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5015 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 5016 } 5017 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5018 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 5019 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5020 /* SWS sender side engages */ 5021 asoc->peers_rwnd = 0; 5022 } 5023 if (asoc->peers_rwnd > old_rwnd) { 5024 win_probe_recovery = 1; 5025 } 5026 /* 5027 * Now we must setup so we have a timer up for anyone with 5028 * outstanding data. 5029 */ 5030 done_once = 0; 5031 again: 5032 j = 0; 5033 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5034 if (win_probe_recovery && (net->window_probe)) { 5035 net->window_probe = 0; 5036 win_probe_recovered = 1; 5037 /*- 5038 * Find first chunk that was used with 5039 * window probe and clear the event. Put 5040 * it back into the send queue as if has 5041 * not been sent. 5042 */ 5043 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5044 if (tp1->window_probe) { 5045 sctp_window_probe_recovery(stcb, asoc, net, tp1); 5046 break; 5047 } 5048 } 5049 } 5050 if (net->flight_size) { 5051 j++; 5052 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5053 stcb->sctp_ep, stcb, net); 5054 } else { 5055 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5056 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5057 stcb, net, 5058 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 5059 } 5060 if (sctp_early_fr) { 5061 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 5062 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 5063 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5064 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 5065 } 5066 } 5067 } 5068 } 5069 if ((j == 0) && 5070 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5071 (asoc->sent_queue_retran_cnt == 0) && 5072 (win_probe_recovered == 0) && 5073 (done_once == 0)) { 5074 /* huh, this should not happen */ 5075 sctp_fs_audit(asoc); 5076 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5077 net->flight_size = 0; 5078 } 5079 asoc->total_flight = 0; 5080 asoc->total_flight_count = 0; 5081 asoc->sent_queue_retran_cnt = 0; 5082 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5083 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5084 sctp_flight_size_increase(tp1); 5085 sctp_total_flight_increase(stcb, tp1); 5086 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5087 asoc->sent_queue_retran_cnt++; 5088 } 5089 } 5090 done_once = 1; 5091 goto again; 5092 } 5093 if (sctp_logging_level & SCTP_SACK_RWND_LOGGING_ENABLE) { 5094 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5095 a_rwnd, 5096 stcb->asoc.peers_rwnd, 5097 stcb->asoc.total_flight, 5098 stcb->asoc.total_output_queue_size); 5099 } 5100 } 5101 5102 void 5103 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5104 struct sctp_nets *netp, int *abort_flag) 5105 { 5106 /* Copy cum-ack */ 5107 uint32_t cum_ack, a_rwnd; 5108 5109 cum_ack = ntohl(cp->cumulative_tsn_ack); 5110 /* Arrange so a_rwnd does NOT change */ 5111 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5112 5113 /* Now call the express sack handling */ 5114 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5115 } 5116 5117 static void 5118 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5119 struct sctp_stream_in *strmin) 5120 { 5121 struct sctp_queued_to_read *ctl, *nctl; 5122 struct sctp_association *asoc; 5123 int tt; 5124 5125 asoc = &stcb->asoc; 5126 tt = strmin->last_sequence_delivered; 5127 /* 5128 * First deliver anything prior to and including the stream no that 5129 * came in 5130 */ 5131 ctl = TAILQ_FIRST(&strmin->inqueue); 5132 while (ctl) { 5133 nctl = TAILQ_NEXT(ctl, next); 5134 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5135 (tt == ctl->sinfo_ssn)) { 5136 /* this is deliverable now */ 5137 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5138 /* subtract pending on streams */ 5139 asoc->size_on_all_streams -= ctl->length; 5140 sctp_ucount_decr(asoc->cnt_on_all_streams); 5141 /* deliver it to at least the delivery-q */ 5142 if (stcb->sctp_socket) { 5143 sctp_add_to_readq(stcb->sctp_ep, stcb, 5144 ctl, 5145 &stcb->sctp_socket->so_rcv, 1); 5146 } 5147 } else { 5148 /* no more delivery now. */ 5149 break; 5150 } 5151 ctl = nctl; 5152 } 5153 /* 5154 * now we must deliver things in queue the normal way if any are 5155 * now ready. 5156 */ 5157 tt = strmin->last_sequence_delivered + 1; 5158 ctl = TAILQ_FIRST(&strmin->inqueue); 5159 while (ctl) { 5160 nctl = TAILQ_NEXT(ctl, next); 5161 if (tt == ctl->sinfo_ssn) { 5162 /* this is deliverable now */ 5163 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5164 /* subtract pending on streams */ 5165 asoc->size_on_all_streams -= ctl->length; 5166 sctp_ucount_decr(asoc->cnt_on_all_streams); 5167 /* deliver it to at least the delivery-q */ 5168 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5169 if (stcb->sctp_socket) { 5170 sctp_add_to_readq(stcb->sctp_ep, stcb, 5171 ctl, 5172 &stcb->sctp_socket->so_rcv, 1); 5173 } 5174 tt = strmin->last_sequence_delivered + 1; 5175 } else { 5176 break; 5177 } 5178 ctl = nctl; 5179 } 5180 } 5181 5182 void 5183 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5184 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset) 5185 { 5186 /* 5187 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5188 * forward TSN, when the SACK comes back that acknowledges the 5189 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5190 * get quite tricky since we may have sent more data interveneing 5191 * and must carefully account for what the SACK says on the nonce 5192 * and any gaps that are reported. This work will NOT be done here, 5193 * but I note it here since it is really related to PR-SCTP and 5194 * FWD-TSN's 5195 */ 5196 5197 /* The pr-sctp fwd tsn */ 5198 /* 5199 * here we will perform all the data receiver side steps for 5200 * processing FwdTSN, as required in by pr-sctp draft: 5201 * 5202 * Assume we get FwdTSN(x): 5203 * 5204 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5205 * others we have 3) examine and update re-ordering queue on 5206 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5207 * report where we are. 5208 */ 5209 struct sctp_association *asoc; 5210 uint32_t new_cum_tsn, gap, back_out_htsn; 5211 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5212 struct sctp_stream_in *strm; 5213 struct sctp_tmit_chunk *chk, *at; 5214 5215 cumack_set_flag = 0; 5216 asoc = &stcb->asoc; 5217 cnt_gone = 0; 5218 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5219 SCTPDBG(SCTP_DEBUG_INDATA1, 5220 "Bad size too small/big fwd-tsn\n"); 5221 return; 5222 } 5223 m_size = (stcb->asoc.mapping_array_size << 3); 5224 /*************************************************************/ 5225 /* 1. Here we update local cumTSN and shift the bitmap array */ 5226 /*************************************************************/ 5227 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5228 5229 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5230 asoc->cumulative_tsn == new_cum_tsn) { 5231 /* Already got there ... */ 5232 return; 5233 } 5234 back_out_htsn = asoc->highest_tsn_inside_map; 5235 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5236 MAX_TSN)) { 5237 asoc->highest_tsn_inside_map = new_cum_tsn; 5238 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 5239 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5240 } 5241 } 5242 /* 5243 * now we know the new TSN is more advanced, let's find the actual 5244 * gap 5245 */ 5246 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5247 MAX_TSN)) || 5248 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5249 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5250 } else { 5251 /* try to prevent underflow here */ 5252 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5253 } 5254 5255 if (gap > m_size) { 5256 asoc->highest_tsn_inside_map = back_out_htsn; 5257 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5258 struct mbuf *oper; 5259 5260 /* 5261 * out of range (of single byte chunks in the rwnd I 5262 * give out). This must be an attacker. 5263 */ 5264 *abort_flag = 1; 5265 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 5266 0, M_DONTWAIT, 1, MT_DATA); 5267 if (oper) { 5268 struct sctp_paramhdr *ph; 5269 uint32_t *ippp; 5270 5271 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 5272 (sizeof(uint32_t) * 3); 5273 ph = mtod(oper, struct sctp_paramhdr *); 5274 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 5275 ph->param_length = htons(SCTP_BUF_LEN(oper)); 5276 ippp = (uint32_t *) (ph + 1); 5277 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 5278 ippp++; 5279 *ippp = asoc->highest_tsn_inside_map; 5280 ippp++; 5281 *ippp = new_cum_tsn; 5282 } 5283 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5284 sctp_abort_an_association(stcb->sctp_ep, stcb, 5285 SCTP_PEER_FAULTY, oper); 5286 return; 5287 } 5288 if (asoc->highest_tsn_inside_map > 5289 asoc->mapping_array_base_tsn) { 5290 gap = asoc->highest_tsn_inside_map - 5291 asoc->mapping_array_base_tsn; 5292 } else { 5293 gap = asoc->highest_tsn_inside_map + 5294 (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5295 } 5296 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5297 cumack_set_flag = 1; 5298 } 5299 SCTP_TCB_LOCK_ASSERT(stcb); 5300 for (i = 0; i <= gap; i++) { 5301 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5302 } 5303 /* 5304 * Now after marking all, slide thing forward but no sack please. 5305 */ 5306 sctp_sack_check(stcb, 0, 0, abort_flag); 5307 if (*abort_flag) 5308 return; 5309 5310 if (cumack_set_flag) { 5311 /* 5312 * fwd-tsn went outside my gap array - not a common 5313 * occurance. Do the same thing we do when a cookie-echo 5314 * arrives. 5315 */ 5316 asoc->highest_tsn_inside_map = new_cum_tsn - 1; 5317 asoc->mapping_array_base_tsn = new_cum_tsn; 5318 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 5319 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 5320 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5321 } 5322 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5323 } 5324 /*************************************************************/ 5325 /* 2. Clear up re-assembly queue */ 5326 /*************************************************************/ 5327 5328 /* 5329 * First service it if pd-api is up, just in case we can progress it 5330 * forward 5331 */ 5332 if (asoc->fragmented_delivery_inprogress) { 5333 sctp_service_reassembly(stcb, asoc); 5334 } 5335 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5336 /* For each one on here see if we need to toss it */ 5337 /* 5338 * For now large messages held on the reasmqueue that are 5339 * complete will be tossed too. We could in theory do more 5340 * work to spin through and stop after dumping one msg aka 5341 * seeing the start of a new msg at the head, and call the 5342 * delivery function... to see if it can be delivered... But 5343 * for now we just dump everything on the queue. 5344 */ 5345 chk = TAILQ_FIRST(&asoc->reasmqueue); 5346 while (chk) { 5347 at = TAILQ_NEXT(chk, sctp_next); 5348 if (compare_with_wrap(asoc->cumulative_tsn, 5349 chk->rec.data.TSN_seq, MAX_TSN) || 5350 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5351 /* It needs to be tossed */ 5352 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5353 if (compare_with_wrap(chk->rec.data.TSN_seq, 5354 asoc->tsn_last_delivered, MAX_TSN)) { 5355 asoc->tsn_last_delivered = 5356 chk->rec.data.TSN_seq; 5357 asoc->str_of_pdapi = 5358 chk->rec.data.stream_number; 5359 asoc->ssn_of_pdapi = 5360 chk->rec.data.stream_seq; 5361 asoc->fragment_flags = 5362 chk->rec.data.rcv_flags; 5363 } 5364 asoc->size_on_reasm_queue -= chk->send_size; 5365 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5366 cnt_gone++; 5367 5368 /* Clear up any stream problem */ 5369 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5370 SCTP_DATA_UNORDERED && 5371 (compare_with_wrap(chk->rec.data.stream_seq, 5372 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5373 MAX_SEQ))) { 5374 /* 5375 * We must dump forward this streams 5376 * sequence number if the chunk is 5377 * not unordered that is being 5378 * skipped. There is a chance that 5379 * if the peer does not include the 5380 * last fragment in its FWD-TSN we 5381 * WILL have a problem here since 5382 * you would have a partial chunk in 5383 * queue that may not be 5384 * deliverable. Also if a Partial 5385 * delivery API as started the user 5386 * may get a partial chunk. The next 5387 * read returning a new chunk... 5388 * really ugly but I see no way 5389 * around it! Maybe a notify?? 5390 */ 5391 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5392 chk->rec.data.stream_seq; 5393 } 5394 if (chk->data) { 5395 sctp_m_freem(chk->data); 5396 chk->data = NULL; 5397 } 5398 sctp_free_a_chunk(stcb, chk); 5399 } else { 5400 /* 5401 * Ok we have gone beyond the end of the 5402 * fwd-tsn's mark. Some checks... 5403 */ 5404 if ((asoc->fragmented_delivery_inprogress) && 5405 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5406 uint32_t str_seq; 5407 5408 /* 5409 * Special case PD-API is up and 5410 * what we fwd-tsn' over includes 5411 * one that had the LAST_FRAG. We no 5412 * longer need to do the PD-API. 5413 */ 5414 asoc->fragmented_delivery_inprogress = 0; 5415 5416 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5417 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5418 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq); 5419 5420 } 5421 break; 5422 } 5423 chk = at; 5424 } 5425 } 5426 if (asoc->fragmented_delivery_inprogress) { 5427 /* 5428 * Ok we removed cnt_gone chunks in the PD-API queue that 5429 * were being delivered. So now we must turn off the flag. 5430 */ 5431 uint32_t str_seq; 5432 5433 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5434 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5435 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq); 5436 asoc->fragmented_delivery_inprogress = 0; 5437 } 5438 /*************************************************************/ 5439 /* 3. Update the PR-stream re-ordering queues */ 5440 /*************************************************************/ 5441 fwd_sz -= sizeof(*fwd); 5442 if (m && fwd_sz) { 5443 /* New method. */ 5444 unsigned int num_str; 5445 struct sctp_strseq *stseq, strseqbuf; 5446 5447 offset += sizeof(*fwd); 5448 5449 num_str = fwd_sz / sizeof(struct sctp_strseq); 5450 for (i = 0; i < num_str; i++) { 5451 uint16_t st; 5452 unsigned char *xx; 5453 5454 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5455 sizeof(struct sctp_strseq), 5456 (uint8_t *) & strseqbuf); 5457 offset += sizeof(struct sctp_strseq); 5458 if (stseq == NULL) 5459 break; 5460 /* Convert */ 5461 xx = (unsigned char *)&stseq[i]; 5462 st = ntohs(stseq[i].stream); 5463 stseq[i].stream = st; 5464 st = ntohs(stseq[i].sequence); 5465 stseq[i].sequence = st; 5466 /* now process */ 5467 if (stseq[i].stream >= asoc->streamincnt) { 5468 /* 5469 * It is arguable if we should continue. 5470 * Since the peer sent bogus stream info we 5471 * may be in deep trouble.. a return may be 5472 * a better choice? 5473 */ 5474 continue; 5475 } 5476 strm = &asoc->strmin[stseq[i].stream]; 5477 if (compare_with_wrap(stseq[i].sequence, 5478 strm->last_sequence_delivered, MAX_SEQ)) { 5479 /* Update the sequence number */ 5480 strm->last_sequence_delivered = 5481 stseq[i].sequence; 5482 } 5483 /* now kick the stream the new way */ 5484 sctp_kick_prsctp_reorder_queue(stcb, strm); 5485 } 5486 } 5487 if (TAILQ_FIRST(&asoc->reasmqueue)) { 5488 /* now lets kick out and check for more fragmented delivery */ 5489 sctp_deliver_reasm_check(stcb, &stcb->asoc); 5490 } 5491 } 5492