1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 48 49 /* 50 * NOTES: On the outbound side of things I need to check the sack timer to 51 * see if I should generate a sack into the chunk queue (if I have data to 52 * send that is and will be sending it .. for bundling. 53 * 54 * The callback in sctp_usrreq.c will get called when the socket is read from. 55 * This will cause sctp_service_queues() to get called on the top entry in 56 * the list. 57 */ 58 59 void 60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 61 { 62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 63 } 64 65 /* Calculate what the rwnd would be */ 66 uint32_t 67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 68 { 69 uint32_t calc = 0; 70 71 /* 72 * This is really set wrong with respect to a 1-2-m socket. Since 73 * the sb_cc is the count that everyone as put up. When we re-write 74 * sctp_soreceive then we will fix this so that ONLY this 75 * associations data is taken into account. 76 */ 77 if (stcb->sctp_socket == NULL) 78 return (calc); 79 80 if (stcb->asoc.sb_cc == 0 && 81 asoc->size_on_reasm_queue == 0 && 82 asoc->size_on_all_streams == 0) { 83 /* Full rwnd granted */ 84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 85 return (calc); 86 } 87 /* get actual space */ 88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 89 90 /* 91 * take out what has NOT been put on socket queue and we yet hold 92 * for putting up. 93 */ 94 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue); 95 calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams); 96 97 if (calc == 0) { 98 /* out of space */ 99 return (calc); 100 } 101 /* what is the overhead of all these rwnd's */ 102 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 103 /* 104 * If the window gets too small due to ctrl-stuff, reduce it to 1, 105 * even it is 0. SWS engaged 106 */ 107 if (calc < stcb->asoc.my_rwnd_control_len) { 108 calc = 1; 109 } 110 return (calc); 111 } 112 113 114 115 /* 116 * Build out our readq entry based on the incoming packet. 117 */ 118 struct sctp_queued_to_read * 119 sctp_build_readq_entry(struct sctp_tcb *stcb, 120 struct sctp_nets *net, 121 uint32_t tsn, uint32_t ppid, 122 uint32_t context, uint16_t stream_no, 123 uint16_t stream_seq, uint8_t flags, 124 struct mbuf *dm) 125 { 126 struct sctp_queued_to_read *read_queue_e = NULL; 127 128 sctp_alloc_a_readq(stcb, read_queue_e); 129 if (read_queue_e == NULL) { 130 goto failed_build; 131 } 132 read_queue_e->sinfo_stream = stream_no; 133 read_queue_e->sinfo_ssn = stream_seq; 134 read_queue_e->sinfo_flags = (flags << 8); 135 read_queue_e->sinfo_ppid = ppid; 136 read_queue_e->sinfo_context = stcb->asoc.context; 137 read_queue_e->sinfo_timetolive = 0; 138 read_queue_e->sinfo_tsn = tsn; 139 read_queue_e->sinfo_cumtsn = tsn; 140 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 141 read_queue_e->whoFrom = net; 142 read_queue_e->length = 0; 143 atomic_add_int(&net->ref_count, 1); 144 read_queue_e->data = dm; 145 read_queue_e->spec_flags = 0; 146 read_queue_e->tail_mbuf = NULL; 147 read_queue_e->aux_data = NULL; 148 read_queue_e->stcb = stcb; 149 read_queue_e->port_from = stcb->rport; 150 read_queue_e->do_not_ref_stcb = 0; 151 read_queue_e->end_added = 0; 152 read_queue_e->some_taken = 0; 153 read_queue_e->pdapi_aborted = 0; 154 failed_build: 155 return (read_queue_e); 156 } 157 158 159 /* 160 * Build out our readq entry based on the incoming packet. 161 */ 162 static struct sctp_queued_to_read * 163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 164 struct sctp_tmit_chunk *chk) 165 { 166 struct sctp_queued_to_read *read_queue_e = NULL; 167 168 sctp_alloc_a_readq(stcb, read_queue_e); 169 if (read_queue_e == NULL) { 170 goto failed_build; 171 } 172 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 173 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 174 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 175 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 176 read_queue_e->sinfo_context = stcb->asoc.context; 177 read_queue_e->sinfo_timetolive = 0; 178 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 179 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 180 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 181 read_queue_e->whoFrom = chk->whoTo; 182 read_queue_e->aux_data = NULL; 183 read_queue_e->length = 0; 184 atomic_add_int(&chk->whoTo->ref_count, 1); 185 read_queue_e->data = chk->data; 186 read_queue_e->tail_mbuf = NULL; 187 read_queue_e->stcb = stcb; 188 read_queue_e->port_from = stcb->rport; 189 read_queue_e->spec_flags = 0; 190 read_queue_e->do_not_ref_stcb = 0; 191 read_queue_e->end_added = 0; 192 read_queue_e->some_taken = 0; 193 read_queue_e->pdapi_aborted = 0; 194 failed_build: 195 return (read_queue_e); 196 } 197 198 199 struct mbuf * 200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, 201 struct sctp_sndrcvinfo *sinfo) 202 { 203 struct sctp_sndrcvinfo *outinfo; 204 struct cmsghdr *cmh; 205 struct mbuf *ret; 206 int len; 207 int use_extended = 0; 208 209 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 210 /* user does not want the sndrcv ctl */ 211 return (NULL); 212 } 213 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 214 use_extended = 1; 215 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 216 } else { 217 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 218 } 219 220 221 ret = sctp_get_mbuf_for_msg(len, 222 0, M_DONTWAIT, 1, MT_DATA); 223 224 if (ret == NULL) { 225 /* No space */ 226 return (ret); 227 } 228 /* We need a CMSG header followed by the struct */ 229 cmh = mtod(ret, struct cmsghdr *); 230 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 231 cmh->cmsg_level = IPPROTO_SCTP; 232 if (use_extended) { 233 cmh->cmsg_type = SCTP_EXTRCV; 234 cmh->cmsg_len = len; 235 memcpy(outinfo, sinfo, len); 236 } else { 237 cmh->cmsg_type = SCTP_SNDRCV; 238 cmh->cmsg_len = len; 239 *outinfo = *sinfo; 240 } 241 SCTP_BUF_LEN(ret) = cmh->cmsg_len; 242 return (ret); 243 } 244 245 246 char * 247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp, 248 int *control_len, 249 struct sctp_sndrcvinfo *sinfo) 250 { 251 struct sctp_sndrcvinfo *outinfo; 252 struct cmsghdr *cmh; 253 char *buf; 254 int len; 255 int use_extended = 0; 256 257 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 258 /* user does not want the sndrcv ctl */ 259 return (NULL); 260 } 261 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 262 use_extended = 1; 263 len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 264 } else { 265 len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 266 } 267 SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG); 268 if (buf == NULL) { 269 /* No space */ 270 return (buf); 271 } 272 /* We need a CMSG header followed by the struct */ 273 cmh = (struct cmsghdr *)buf; 274 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 275 cmh->cmsg_level = IPPROTO_SCTP; 276 if (use_extended) { 277 cmh->cmsg_type = SCTP_EXTRCV; 278 cmh->cmsg_len = len; 279 memcpy(outinfo, sinfo, len); 280 } else { 281 cmh->cmsg_type = SCTP_SNDRCV; 282 cmh->cmsg_len = len; 283 *outinfo = *sinfo; 284 } 285 *control_len = len; 286 return (buf); 287 } 288 289 290 /* 291 * We are delivering currently from the reassembly queue. We must continue to 292 * deliver until we either: 1) run out of space. 2) run out of sequential 293 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 294 */ 295 static void 296 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 297 { 298 struct sctp_tmit_chunk *chk; 299 uint16_t nxt_todel; 300 uint16_t stream_no; 301 int end = 0; 302 int cntDel; 303 struct sctp_queued_to_read *control, *ctl, *ctlat; 304 305 if (stcb == NULL) 306 return; 307 308 cntDel = stream_no = 0; 309 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 310 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || 311 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 312 /* socket above is long gone or going.. */ 313 abandon: 314 asoc->fragmented_delivery_inprogress = 0; 315 chk = TAILQ_FIRST(&asoc->reasmqueue); 316 while (chk) { 317 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 318 asoc->size_on_reasm_queue -= chk->send_size; 319 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 320 /* 321 * Lose the data pointer, since its in the socket 322 * buffer 323 */ 324 if (chk->data) { 325 sctp_m_freem(chk->data); 326 chk->data = NULL; 327 } 328 /* Now free the address and data */ 329 sctp_free_a_chunk(stcb, chk); 330 /* sa_ignore FREED_MEMORY */ 331 chk = TAILQ_FIRST(&asoc->reasmqueue); 332 } 333 return; 334 } 335 SCTP_TCB_LOCK_ASSERT(stcb); 336 do { 337 chk = TAILQ_FIRST(&asoc->reasmqueue); 338 if (chk == NULL) { 339 return; 340 } 341 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 342 /* Can't deliver more :< */ 343 return; 344 } 345 stream_no = chk->rec.data.stream_number; 346 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 347 if (nxt_todel != chk->rec.data.stream_seq && 348 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 349 /* 350 * Not the next sequence to deliver in its stream OR 351 * unordered 352 */ 353 return; 354 } 355 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 356 357 control = sctp_build_readq_entry_chk(stcb, chk); 358 if (control == NULL) { 359 /* out of memory? */ 360 return; 361 } 362 /* save it off for our future deliveries */ 363 stcb->asoc.control_pdapi = control; 364 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 365 end = 1; 366 else 367 end = 0; 368 sctp_add_to_readq(stcb->sctp_ep, 369 stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED); 370 cntDel++; 371 } else { 372 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 373 end = 1; 374 else 375 end = 0; 376 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 377 stcb->asoc.control_pdapi, 378 chk->data, end, chk->rec.data.TSN_seq, 379 &stcb->sctp_socket->so_rcv)) { 380 /* 381 * something is very wrong, either 382 * control_pdapi is NULL, or the tail_mbuf 383 * is corrupt, or there is a EOM already on 384 * the mbuf chain. 385 */ 386 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 387 goto abandon; 388 } else { 389 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 390 panic("This should not happen control_pdapi NULL?"); 391 } 392 /* if we did not panic, it was a EOM */ 393 panic("Bad chunking ??"); 394 return; 395 } 396 } 397 cntDel++; 398 } 399 /* pull it we did it */ 400 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 401 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 402 asoc->fragmented_delivery_inprogress = 0; 403 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 404 asoc->strmin[stream_no].last_sequence_delivered++; 405 } 406 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 407 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 408 } 409 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 410 /* 411 * turn the flag back on since we just delivered 412 * yet another one. 413 */ 414 asoc->fragmented_delivery_inprogress = 1; 415 } 416 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 417 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 418 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 419 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 420 421 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 422 asoc->size_on_reasm_queue -= chk->send_size; 423 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 424 /* free up the chk */ 425 chk->data = NULL; 426 sctp_free_a_chunk(stcb, chk); 427 428 if (asoc->fragmented_delivery_inprogress == 0) { 429 /* 430 * Now lets see if we can deliver the next one on 431 * the stream 432 */ 433 struct sctp_stream_in *strm; 434 435 strm = &asoc->strmin[stream_no]; 436 nxt_todel = strm->last_sequence_delivered + 1; 437 ctl = TAILQ_FIRST(&strm->inqueue); 438 if (ctl && (nxt_todel == ctl->sinfo_ssn)) { 439 while (ctl != NULL) { 440 /* Deliver more if we can. */ 441 if (nxt_todel == ctl->sinfo_ssn) { 442 ctlat = TAILQ_NEXT(ctl, next); 443 TAILQ_REMOVE(&strm->inqueue, ctl, next); 444 asoc->size_on_all_streams -= ctl->length; 445 sctp_ucount_decr(asoc->cnt_on_all_streams); 446 strm->last_sequence_delivered++; 447 sctp_add_to_readq(stcb->sctp_ep, stcb, 448 ctl, 449 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 450 ctl = ctlat; 451 } else { 452 break; 453 } 454 nxt_todel = strm->last_sequence_delivered + 1; 455 } 456 } 457 break; 458 } 459 /* sa_ignore FREED_MEMORY */ 460 chk = TAILQ_FIRST(&asoc->reasmqueue); 461 } while (chk); 462 } 463 464 /* 465 * Queue the chunk either right into the socket buffer if it is the next one 466 * to go OR put it in the correct place in the delivery queue. If we do 467 * append to the so_buf, keep doing so until we are out of order. One big 468 * question still remains, what to do when the socket buffer is FULL?? 469 */ 470 static void 471 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 472 struct sctp_queued_to_read *control, int *abort_flag) 473 { 474 /* 475 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 476 * all the data in one stream this could happen quite rapidly. One 477 * could use the TSN to keep track of things, but this scheme breaks 478 * down in the other type of stream useage that could occur. Send a 479 * single msg to stream 0, send 4Billion messages to stream 1, now 480 * send a message to stream 0. You have a situation where the TSN 481 * has wrapped but not in the stream. Is this worth worrying about 482 * or should we just change our queue sort at the bottom to be by 483 * TSN. 484 * 485 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 486 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 487 * assignment this could happen... and I don't see how this would be 488 * a violation. So for now I am undecided an will leave the sort by 489 * SSN alone. Maybe a hybred approach is the answer 490 * 491 */ 492 struct sctp_stream_in *strm; 493 struct sctp_queued_to_read *at; 494 int queue_needed; 495 uint16_t nxt_todel; 496 struct mbuf *oper; 497 498 queue_needed = 1; 499 asoc->size_on_all_streams += control->length; 500 sctp_ucount_incr(asoc->cnt_on_all_streams); 501 strm = &asoc->strmin[control->sinfo_stream]; 502 nxt_todel = strm->last_sequence_delivered + 1; 503 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 504 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 505 } 506 SCTPDBG(SCTP_DEBUG_INDATA1, 507 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 508 (uint32_t) control->sinfo_stream, 509 (uint32_t) strm->last_sequence_delivered, 510 (uint32_t) nxt_todel); 511 if (compare_with_wrap(strm->last_sequence_delivered, 512 control->sinfo_ssn, MAX_SEQ) || 513 (strm->last_sequence_delivered == control->sinfo_ssn)) { 514 /* The incoming sseq is behind where we last delivered? */ 515 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 516 control->sinfo_ssn, strm->last_sequence_delivered); 517 protocol_error: 518 /* 519 * throw it in the stream so it gets cleaned up in 520 * association destruction 521 */ 522 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 523 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 524 0, M_DONTWAIT, 1, MT_DATA); 525 if (oper) { 526 struct sctp_paramhdr *ph; 527 uint32_t *ippp; 528 529 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 530 (sizeof(uint32_t) * 3); 531 ph = mtod(oper, struct sctp_paramhdr *); 532 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 533 ph->param_length = htons(SCTP_BUF_LEN(oper)); 534 ippp = (uint32_t *) (ph + 1); 535 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 536 ippp++; 537 *ippp = control->sinfo_tsn; 538 ippp++; 539 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 540 } 541 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 542 sctp_abort_an_association(stcb->sctp_ep, stcb, 543 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 544 545 *abort_flag = 1; 546 return; 547 548 } 549 if (nxt_todel == control->sinfo_ssn) { 550 /* can be delivered right away? */ 551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 552 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 553 } 554 queue_needed = 0; 555 asoc->size_on_all_streams -= control->length; 556 sctp_ucount_decr(asoc->cnt_on_all_streams); 557 strm->last_sequence_delivered++; 558 sctp_add_to_readq(stcb->sctp_ep, stcb, 559 control, 560 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 561 control = TAILQ_FIRST(&strm->inqueue); 562 while (control != NULL) { 563 /* all delivered */ 564 nxt_todel = strm->last_sequence_delivered + 1; 565 if (nxt_todel == control->sinfo_ssn) { 566 at = TAILQ_NEXT(control, next); 567 TAILQ_REMOVE(&strm->inqueue, control, next); 568 asoc->size_on_all_streams -= control->length; 569 sctp_ucount_decr(asoc->cnt_on_all_streams); 570 strm->last_sequence_delivered++; 571 /* 572 * We ignore the return of deliver_data here 573 * since we always can hold the chunk on the 574 * d-queue. And we have a finite number that 575 * can be delivered from the strq. 576 */ 577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 578 sctp_log_strm_del(control, NULL, 579 SCTP_STR_LOG_FROM_IMMED_DEL); 580 } 581 sctp_add_to_readq(stcb->sctp_ep, stcb, 582 control, 583 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 584 control = at; 585 continue; 586 } 587 break; 588 } 589 } 590 if (queue_needed) { 591 /* 592 * Ok, we did not deliver this guy, find the correct place 593 * to put it on the queue. 594 */ 595 if ((compare_with_wrap(asoc->cumulative_tsn, 596 control->sinfo_tsn, MAX_TSN)) || 597 (control->sinfo_tsn == asoc->cumulative_tsn)) { 598 goto protocol_error; 599 } 600 if (TAILQ_EMPTY(&strm->inqueue)) { 601 /* Empty queue */ 602 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 603 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 604 } 605 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 606 } else { 607 TAILQ_FOREACH(at, &strm->inqueue, next) { 608 if (compare_with_wrap(at->sinfo_ssn, 609 control->sinfo_ssn, MAX_SEQ)) { 610 /* 611 * one in queue is bigger than the 612 * new one, insert before this one 613 */ 614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 615 sctp_log_strm_del(control, at, 616 SCTP_STR_LOG_FROM_INSERT_MD); 617 } 618 TAILQ_INSERT_BEFORE(at, control, next); 619 break; 620 } else if (at->sinfo_ssn == control->sinfo_ssn) { 621 /* 622 * Gak, He sent me a duplicate str 623 * seq number 624 */ 625 /* 626 * foo bar, I guess I will just free 627 * this new guy, should we abort 628 * too? FIX ME MAYBE? Or it COULD be 629 * that the SSN's have wrapped. 630 * Maybe I should compare to TSN 631 * somehow... sigh for now just blow 632 * away the chunk! 633 */ 634 635 if (control->data) 636 sctp_m_freem(control->data); 637 control->data = NULL; 638 asoc->size_on_all_streams -= control->length; 639 sctp_ucount_decr(asoc->cnt_on_all_streams); 640 if (control->whoFrom) 641 sctp_free_remote_addr(control->whoFrom); 642 control->whoFrom = NULL; 643 sctp_free_a_readq(stcb, control); 644 return; 645 } else { 646 if (TAILQ_NEXT(at, next) == NULL) { 647 /* 648 * We are at the end, insert 649 * it after this one 650 */ 651 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 652 sctp_log_strm_del(control, at, 653 SCTP_STR_LOG_FROM_INSERT_TL); 654 } 655 TAILQ_INSERT_AFTER(&strm->inqueue, 656 at, control, next); 657 break; 658 } 659 } 660 } 661 } 662 } 663 } 664 665 /* 666 * Returns two things: You get the total size of the deliverable parts of the 667 * first fragmented message on the reassembly queue. And you get a 1 back if 668 * all of the message is ready or a 0 back if the message is still incomplete 669 */ 670 static int 671 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 672 { 673 struct sctp_tmit_chunk *chk; 674 uint32_t tsn; 675 676 *t_size = 0; 677 chk = TAILQ_FIRST(&asoc->reasmqueue); 678 if (chk == NULL) { 679 /* nothing on the queue */ 680 return (0); 681 } 682 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 683 /* Not a first on the queue */ 684 return (0); 685 } 686 tsn = chk->rec.data.TSN_seq; 687 while (chk) { 688 if (tsn != chk->rec.data.TSN_seq) { 689 return (0); 690 } 691 *t_size += chk->send_size; 692 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 693 return (1); 694 } 695 tsn++; 696 chk = TAILQ_NEXT(chk, sctp_next); 697 } 698 return (0); 699 } 700 701 static void 702 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 703 { 704 struct sctp_tmit_chunk *chk; 705 uint16_t nxt_todel; 706 uint32_t tsize; 707 708 doit_again: 709 chk = TAILQ_FIRST(&asoc->reasmqueue); 710 if (chk == NULL) { 711 /* Huh? */ 712 asoc->size_on_reasm_queue = 0; 713 asoc->cnt_on_reasm_queue = 0; 714 return; 715 } 716 if (asoc->fragmented_delivery_inprogress == 0) { 717 nxt_todel = 718 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 719 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 720 (nxt_todel == chk->rec.data.stream_seq || 721 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 722 /* 723 * Yep the first one is here and its ok to deliver 724 * but should we? 725 */ 726 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 727 (tsize >= stcb->sctp_ep->partial_delivery_point))) { 728 729 /* 730 * Yes, we setup to start reception, by 731 * backing down the TSN just in case we 732 * can't deliver. If we 733 */ 734 asoc->fragmented_delivery_inprogress = 1; 735 asoc->tsn_last_delivered = 736 chk->rec.data.TSN_seq - 1; 737 asoc->str_of_pdapi = 738 chk->rec.data.stream_number; 739 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 740 asoc->pdapi_ppid = chk->rec.data.payloadtype; 741 asoc->fragment_flags = chk->rec.data.rcv_flags; 742 sctp_service_reassembly(stcb, asoc); 743 } 744 } 745 } else { 746 /* 747 * Service re-assembly will deliver stream data queued at 748 * the end of fragmented delivery.. but it wont know to go 749 * back and call itself again... we do that here with the 750 * got doit_again 751 */ 752 sctp_service_reassembly(stcb, asoc); 753 if (asoc->fragmented_delivery_inprogress == 0) { 754 /* 755 * finished our Fragmented delivery, could be more 756 * waiting? 757 */ 758 goto doit_again; 759 } 760 } 761 } 762 763 /* 764 * Dump onto the re-assembly queue, in its proper place. After dumping on the 765 * queue, see if anthing can be delivered. If so pull it off (or as much as 766 * we can. If we run out of space then we must dump what we can and set the 767 * appropriate flag to say we queued what we could. 768 */ 769 static void 770 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 771 struct sctp_tmit_chunk *chk, int *abort_flag) 772 { 773 struct mbuf *oper; 774 uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn; 775 u_char last_flags; 776 struct sctp_tmit_chunk *at, *prev, *next; 777 778 prev = next = NULL; 779 cum_ackp1 = asoc->tsn_last_delivered + 1; 780 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 781 /* This is the first one on the queue */ 782 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 783 /* 784 * we do not check for delivery of anything when only one 785 * fragment is here 786 */ 787 asoc->size_on_reasm_queue = chk->send_size; 788 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 789 if (chk->rec.data.TSN_seq == cum_ackp1) { 790 if (asoc->fragmented_delivery_inprogress == 0 && 791 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 792 SCTP_DATA_FIRST_FRAG) { 793 /* 794 * An empty queue, no delivery inprogress, 795 * we hit the next one and it does NOT have 796 * a FIRST fragment mark. 797 */ 798 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 799 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 800 0, M_DONTWAIT, 1, MT_DATA); 801 802 if (oper) { 803 struct sctp_paramhdr *ph; 804 uint32_t *ippp; 805 806 SCTP_BUF_LEN(oper) = 807 sizeof(struct sctp_paramhdr) + 808 (sizeof(uint32_t) * 3); 809 ph = mtod(oper, struct sctp_paramhdr *); 810 ph->param_type = 811 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 812 ph->param_length = htons(SCTP_BUF_LEN(oper)); 813 ippp = (uint32_t *) (ph + 1); 814 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 815 ippp++; 816 *ippp = chk->rec.data.TSN_seq; 817 ippp++; 818 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 819 820 } 821 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 822 sctp_abort_an_association(stcb->sctp_ep, stcb, 823 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 824 *abort_flag = 1; 825 } else if (asoc->fragmented_delivery_inprogress && 826 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 827 /* 828 * We are doing a partial delivery and the 829 * NEXT chunk MUST be either the LAST or 830 * MIDDLE fragment NOT a FIRST 831 */ 832 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 833 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 834 0, M_DONTWAIT, 1, MT_DATA); 835 if (oper) { 836 struct sctp_paramhdr *ph; 837 uint32_t *ippp; 838 839 SCTP_BUF_LEN(oper) = 840 sizeof(struct sctp_paramhdr) + 841 (3 * sizeof(uint32_t)); 842 ph = mtod(oper, struct sctp_paramhdr *); 843 ph->param_type = 844 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 845 ph->param_length = htons(SCTP_BUF_LEN(oper)); 846 ippp = (uint32_t *) (ph + 1); 847 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 848 ippp++; 849 *ippp = chk->rec.data.TSN_seq; 850 ippp++; 851 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 852 } 853 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 854 sctp_abort_an_association(stcb->sctp_ep, stcb, 855 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 856 *abort_flag = 1; 857 } else if (asoc->fragmented_delivery_inprogress) { 858 /* 859 * Here we are ok with a MIDDLE or LAST 860 * piece 861 */ 862 if (chk->rec.data.stream_number != 863 asoc->str_of_pdapi) { 864 /* Got to be the right STR No */ 865 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 866 chk->rec.data.stream_number, 867 asoc->str_of_pdapi); 868 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 869 0, M_DONTWAIT, 1, MT_DATA); 870 if (oper) { 871 struct sctp_paramhdr *ph; 872 uint32_t *ippp; 873 874 SCTP_BUF_LEN(oper) = 875 sizeof(struct sctp_paramhdr) + 876 (sizeof(uint32_t) * 3); 877 ph = mtod(oper, 878 struct sctp_paramhdr *); 879 ph->param_type = 880 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 881 ph->param_length = 882 htons(SCTP_BUF_LEN(oper)); 883 ippp = (uint32_t *) (ph + 1); 884 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 885 ippp++; 886 *ippp = chk->rec.data.TSN_seq; 887 ippp++; 888 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 889 } 890 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 891 sctp_abort_an_association(stcb->sctp_ep, 892 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 893 *abort_flag = 1; 894 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 895 SCTP_DATA_UNORDERED && 896 chk->rec.data.stream_seq != 897 asoc->ssn_of_pdapi) { 898 /* Got to be the right STR Seq */ 899 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 900 chk->rec.data.stream_seq, 901 asoc->ssn_of_pdapi); 902 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 903 0, M_DONTWAIT, 1, MT_DATA); 904 if (oper) { 905 struct sctp_paramhdr *ph; 906 uint32_t *ippp; 907 908 SCTP_BUF_LEN(oper) = 909 sizeof(struct sctp_paramhdr) + 910 (3 * sizeof(uint32_t)); 911 ph = mtod(oper, 912 struct sctp_paramhdr *); 913 ph->param_type = 914 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 915 ph->param_length = 916 htons(SCTP_BUF_LEN(oper)); 917 ippp = (uint32_t *) (ph + 1); 918 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 919 ippp++; 920 *ippp = chk->rec.data.TSN_seq; 921 ippp++; 922 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 923 924 } 925 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 926 sctp_abort_an_association(stcb->sctp_ep, 927 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 928 *abort_flag = 1; 929 } 930 } 931 } 932 return; 933 } 934 /* Find its place */ 935 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 936 if (compare_with_wrap(at->rec.data.TSN_seq, 937 chk->rec.data.TSN_seq, MAX_TSN)) { 938 /* 939 * one in queue is bigger than the new one, insert 940 * before this one 941 */ 942 /* A check */ 943 asoc->size_on_reasm_queue += chk->send_size; 944 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 945 next = at; 946 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 947 break; 948 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 949 /* Gak, He sent me a duplicate str seq number */ 950 /* 951 * foo bar, I guess I will just free this new guy, 952 * should we abort too? FIX ME MAYBE? Or it COULD be 953 * that the SSN's have wrapped. Maybe I should 954 * compare to TSN somehow... sigh for now just blow 955 * away the chunk! 956 */ 957 if (chk->data) { 958 sctp_m_freem(chk->data); 959 chk->data = NULL; 960 } 961 sctp_free_a_chunk(stcb, chk); 962 return; 963 } else { 964 last_flags = at->rec.data.rcv_flags; 965 last_tsn = at->rec.data.TSN_seq; 966 prev = at; 967 if (TAILQ_NEXT(at, sctp_next) == NULL) { 968 /* 969 * We are at the end, insert it after this 970 * one 971 */ 972 /* check it first */ 973 asoc->size_on_reasm_queue += chk->send_size; 974 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 975 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 976 break; 977 } 978 } 979 } 980 /* Now the audits */ 981 if (prev) { 982 prev_tsn = chk->rec.data.TSN_seq - 1; 983 if (prev_tsn == prev->rec.data.TSN_seq) { 984 /* 985 * Ok the one I am dropping onto the end is the 986 * NEXT. A bit of valdiation here. 987 */ 988 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 989 SCTP_DATA_FIRST_FRAG || 990 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 991 SCTP_DATA_MIDDLE_FRAG) { 992 /* 993 * Insert chk MUST be a MIDDLE or LAST 994 * fragment 995 */ 996 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 997 SCTP_DATA_FIRST_FRAG) { 998 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 999 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 1000 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1001 0, M_DONTWAIT, 1, MT_DATA); 1002 if (oper) { 1003 struct sctp_paramhdr *ph; 1004 uint32_t *ippp; 1005 1006 SCTP_BUF_LEN(oper) = 1007 sizeof(struct sctp_paramhdr) + 1008 (3 * sizeof(uint32_t)); 1009 ph = mtod(oper, 1010 struct sctp_paramhdr *); 1011 ph->param_type = 1012 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1013 ph->param_length = 1014 htons(SCTP_BUF_LEN(oper)); 1015 ippp = (uint32_t *) (ph + 1); 1016 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1017 ippp++; 1018 *ippp = chk->rec.data.TSN_seq; 1019 ippp++; 1020 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1021 1022 } 1023 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1024 sctp_abort_an_association(stcb->sctp_ep, 1025 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1026 *abort_flag = 1; 1027 return; 1028 } 1029 if (chk->rec.data.stream_number != 1030 prev->rec.data.stream_number) { 1031 /* 1032 * Huh, need the correct STR here, 1033 * they must be the same. 1034 */ 1035 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1036 chk->rec.data.stream_number, 1037 prev->rec.data.stream_number); 1038 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1039 0, M_DONTWAIT, 1, MT_DATA); 1040 if (oper) { 1041 struct sctp_paramhdr *ph; 1042 uint32_t *ippp; 1043 1044 SCTP_BUF_LEN(oper) = 1045 sizeof(struct sctp_paramhdr) + 1046 (3 * sizeof(uint32_t)); 1047 ph = mtod(oper, 1048 struct sctp_paramhdr *); 1049 ph->param_type = 1050 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1051 ph->param_length = 1052 htons(SCTP_BUF_LEN(oper)); 1053 ippp = (uint32_t *) (ph + 1); 1054 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1055 ippp++; 1056 *ippp = chk->rec.data.TSN_seq; 1057 ippp++; 1058 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1059 } 1060 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1061 sctp_abort_an_association(stcb->sctp_ep, 1062 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1063 1064 *abort_flag = 1; 1065 return; 1066 } 1067 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1068 chk->rec.data.stream_seq != 1069 prev->rec.data.stream_seq) { 1070 /* 1071 * Huh, need the correct STR here, 1072 * they must be the same. 1073 */ 1074 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1075 chk->rec.data.stream_seq, 1076 prev->rec.data.stream_seq); 1077 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1078 0, M_DONTWAIT, 1, MT_DATA); 1079 if (oper) { 1080 struct sctp_paramhdr *ph; 1081 uint32_t *ippp; 1082 1083 SCTP_BUF_LEN(oper) = 1084 sizeof(struct sctp_paramhdr) + 1085 (3 * sizeof(uint32_t)); 1086 ph = mtod(oper, 1087 struct sctp_paramhdr *); 1088 ph->param_type = 1089 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1090 ph->param_length = 1091 htons(SCTP_BUF_LEN(oper)); 1092 ippp = (uint32_t *) (ph + 1); 1093 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1094 ippp++; 1095 *ippp = chk->rec.data.TSN_seq; 1096 ippp++; 1097 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1098 } 1099 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1100 sctp_abort_an_association(stcb->sctp_ep, 1101 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1102 1103 *abort_flag = 1; 1104 return; 1105 } 1106 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1107 SCTP_DATA_LAST_FRAG) { 1108 /* Insert chk MUST be a FIRST */ 1109 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1110 SCTP_DATA_FIRST_FRAG) { 1111 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1112 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1113 0, M_DONTWAIT, 1, MT_DATA); 1114 if (oper) { 1115 struct sctp_paramhdr *ph; 1116 uint32_t *ippp; 1117 1118 SCTP_BUF_LEN(oper) = 1119 sizeof(struct sctp_paramhdr) + 1120 (3 * sizeof(uint32_t)); 1121 ph = mtod(oper, 1122 struct sctp_paramhdr *); 1123 ph->param_type = 1124 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1125 ph->param_length = 1126 htons(SCTP_BUF_LEN(oper)); 1127 ippp = (uint32_t *) (ph + 1); 1128 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1129 ippp++; 1130 *ippp = chk->rec.data.TSN_seq; 1131 ippp++; 1132 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1133 1134 } 1135 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1136 sctp_abort_an_association(stcb->sctp_ep, 1137 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1138 1139 *abort_flag = 1; 1140 return; 1141 } 1142 } 1143 } 1144 } 1145 if (next) { 1146 post_tsn = chk->rec.data.TSN_seq + 1; 1147 if (post_tsn == next->rec.data.TSN_seq) { 1148 /* 1149 * Ok the one I am inserting ahead of is my NEXT 1150 * one. A bit of valdiation here. 1151 */ 1152 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1153 /* Insert chk MUST be a last fragment */ 1154 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1155 != SCTP_DATA_LAST_FRAG) { 1156 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1157 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1158 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1159 0, M_DONTWAIT, 1, MT_DATA); 1160 if (oper) { 1161 struct sctp_paramhdr *ph; 1162 uint32_t *ippp; 1163 1164 SCTP_BUF_LEN(oper) = 1165 sizeof(struct sctp_paramhdr) + 1166 (3 * sizeof(uint32_t)); 1167 ph = mtod(oper, 1168 struct sctp_paramhdr *); 1169 ph->param_type = 1170 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1171 ph->param_length = 1172 htons(SCTP_BUF_LEN(oper)); 1173 ippp = (uint32_t *) (ph + 1); 1174 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1175 ippp++; 1176 *ippp = chk->rec.data.TSN_seq; 1177 ippp++; 1178 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1179 } 1180 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1181 sctp_abort_an_association(stcb->sctp_ep, 1182 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1183 1184 *abort_flag = 1; 1185 return; 1186 } 1187 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1188 SCTP_DATA_MIDDLE_FRAG || 1189 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1190 SCTP_DATA_LAST_FRAG) { 1191 /* 1192 * Insert chk CAN be MIDDLE or FIRST NOT 1193 * LAST 1194 */ 1195 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1196 SCTP_DATA_LAST_FRAG) { 1197 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1198 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1199 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1200 0, M_DONTWAIT, 1, MT_DATA); 1201 if (oper) { 1202 struct sctp_paramhdr *ph; 1203 uint32_t *ippp; 1204 1205 SCTP_BUF_LEN(oper) = 1206 sizeof(struct sctp_paramhdr) + 1207 (3 * sizeof(uint32_t)); 1208 ph = mtod(oper, 1209 struct sctp_paramhdr *); 1210 ph->param_type = 1211 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1212 ph->param_length = 1213 htons(SCTP_BUF_LEN(oper)); 1214 ippp = (uint32_t *) (ph + 1); 1215 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1216 ippp++; 1217 *ippp = chk->rec.data.TSN_seq; 1218 ippp++; 1219 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1220 1221 } 1222 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1223 sctp_abort_an_association(stcb->sctp_ep, 1224 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1225 1226 *abort_flag = 1; 1227 return; 1228 } 1229 if (chk->rec.data.stream_number != 1230 next->rec.data.stream_number) { 1231 /* 1232 * Huh, need the correct STR here, 1233 * they must be the same. 1234 */ 1235 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1236 chk->rec.data.stream_number, 1237 next->rec.data.stream_number); 1238 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1239 0, M_DONTWAIT, 1, MT_DATA); 1240 if (oper) { 1241 struct sctp_paramhdr *ph; 1242 uint32_t *ippp; 1243 1244 SCTP_BUF_LEN(oper) = 1245 sizeof(struct sctp_paramhdr) + 1246 (3 * sizeof(uint32_t)); 1247 ph = mtod(oper, 1248 struct sctp_paramhdr *); 1249 ph->param_type = 1250 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1251 ph->param_length = 1252 htons(SCTP_BUF_LEN(oper)); 1253 ippp = (uint32_t *) (ph + 1); 1254 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1255 ippp++; 1256 *ippp = chk->rec.data.TSN_seq; 1257 ippp++; 1258 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1259 1260 } 1261 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1262 sctp_abort_an_association(stcb->sctp_ep, 1263 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1264 1265 *abort_flag = 1; 1266 return; 1267 } 1268 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1269 chk->rec.data.stream_seq != 1270 next->rec.data.stream_seq) { 1271 /* 1272 * Huh, need the correct STR here, 1273 * they must be the same. 1274 */ 1275 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1276 chk->rec.data.stream_seq, 1277 next->rec.data.stream_seq); 1278 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1279 0, M_DONTWAIT, 1, MT_DATA); 1280 if (oper) { 1281 struct sctp_paramhdr *ph; 1282 uint32_t *ippp; 1283 1284 SCTP_BUF_LEN(oper) = 1285 sizeof(struct sctp_paramhdr) + 1286 (3 * sizeof(uint32_t)); 1287 ph = mtod(oper, 1288 struct sctp_paramhdr *); 1289 ph->param_type = 1290 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1291 ph->param_length = 1292 htons(SCTP_BUF_LEN(oper)); 1293 ippp = (uint32_t *) (ph + 1); 1294 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1295 ippp++; 1296 *ippp = chk->rec.data.TSN_seq; 1297 ippp++; 1298 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1299 } 1300 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1301 sctp_abort_an_association(stcb->sctp_ep, 1302 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1303 1304 *abort_flag = 1; 1305 return; 1306 } 1307 } 1308 } 1309 } 1310 /* Do we need to do some delivery? check */ 1311 sctp_deliver_reasm_check(stcb, asoc); 1312 } 1313 1314 /* 1315 * This is an unfortunate routine. It checks to make sure a evil guy is not 1316 * stuffing us full of bad packet fragments. A broken peer could also do this 1317 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1318 * :< more cycles. 1319 */ 1320 static int 1321 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1322 uint32_t TSN_seq) 1323 { 1324 struct sctp_tmit_chunk *at; 1325 uint32_t tsn_est; 1326 1327 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1328 if (compare_with_wrap(TSN_seq, 1329 at->rec.data.TSN_seq, MAX_TSN)) { 1330 /* is it one bigger? */ 1331 tsn_est = at->rec.data.TSN_seq + 1; 1332 if (tsn_est == TSN_seq) { 1333 /* yep. It better be a last then */ 1334 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1335 SCTP_DATA_LAST_FRAG) { 1336 /* 1337 * Ok this guy belongs next to a guy 1338 * that is NOT last, it should be a 1339 * middle/last, not a complete 1340 * chunk. 1341 */ 1342 return (1); 1343 } else { 1344 /* 1345 * This guy is ok since its a LAST 1346 * and the new chunk is a fully 1347 * self- contained one. 1348 */ 1349 return (0); 1350 } 1351 } 1352 } else if (TSN_seq == at->rec.data.TSN_seq) { 1353 /* Software error since I have a dup? */ 1354 return (1); 1355 } else { 1356 /* 1357 * Ok, 'at' is larger than new chunk but does it 1358 * need to be right before it. 1359 */ 1360 tsn_est = TSN_seq + 1; 1361 if (tsn_est == at->rec.data.TSN_seq) { 1362 /* Yep, It better be a first */ 1363 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1364 SCTP_DATA_FIRST_FRAG) { 1365 return (1); 1366 } else { 1367 return (0); 1368 } 1369 } 1370 } 1371 } 1372 return (0); 1373 } 1374 1375 1376 static int 1377 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1378 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1379 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1380 int *break_flag, int last_chunk) 1381 { 1382 /* Process a data chunk */ 1383 /* struct sctp_tmit_chunk *chk; */ 1384 struct sctp_tmit_chunk *chk; 1385 uint32_t tsn, gap; 1386 struct mbuf *dmbuf; 1387 int indx, the_len; 1388 int need_reasm_check = 0; 1389 uint16_t strmno, strmseq; 1390 struct mbuf *oper; 1391 struct sctp_queued_to_read *control; 1392 int ordered; 1393 uint32_t protocol_id; 1394 uint8_t chunk_flags; 1395 struct sctp_stream_reset_list *liste; 1396 1397 chk = NULL; 1398 tsn = ntohl(ch->dp.tsn); 1399 chunk_flags = ch->ch.chunk_flags; 1400 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1401 asoc->send_sack = 1; 1402 } 1403 protocol_id = ch->dp.protocol_id; 1404 ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0); 1405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1406 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1407 } 1408 if (stcb == NULL) { 1409 return (0); 1410 } 1411 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1412 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1413 asoc->cumulative_tsn == tsn) { 1414 /* It is a duplicate */ 1415 SCTP_STAT_INCR(sctps_recvdupdata); 1416 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1417 /* Record a dup for the next outbound sack */ 1418 asoc->dup_tsns[asoc->numduptsns] = tsn; 1419 asoc->numduptsns++; 1420 } 1421 asoc->send_sack = 1; 1422 return (0); 1423 } 1424 /* Calculate the number of TSN's between the base and this TSN */ 1425 if (tsn >= asoc->mapping_array_base_tsn) { 1426 gap = tsn - asoc->mapping_array_base_tsn; 1427 } else { 1428 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1429 } 1430 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1431 /* Can't hold the bit in the mapping at max array, toss it */ 1432 return (0); 1433 } 1434 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1435 SCTP_TCB_LOCK_ASSERT(stcb); 1436 if (sctp_expand_mapping_array(asoc, gap)) { 1437 /* Can't expand, drop it */ 1438 return (0); 1439 } 1440 } 1441 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1442 *high_tsn = tsn; 1443 } 1444 /* See if we have received this one already */ 1445 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1446 SCTP_STAT_INCR(sctps_recvdupdata); 1447 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1448 /* Record a dup for the next outbound sack */ 1449 asoc->dup_tsns[asoc->numduptsns] = tsn; 1450 asoc->numduptsns++; 1451 } 1452 asoc->send_sack = 1; 1453 return (0); 1454 } 1455 /* 1456 * Check to see about the GONE flag, duplicates would cause a sack 1457 * to be sent up above 1458 */ 1459 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1460 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1461 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1462 ) { 1463 /* 1464 * wait a minute, this guy is gone, there is no longer a 1465 * receiver. Send peer an ABORT! 1466 */ 1467 struct mbuf *op_err; 1468 1469 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1470 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED); 1471 *abort_flag = 1; 1472 return (0); 1473 } 1474 /* 1475 * Now before going further we see if there is room. If NOT then we 1476 * MAY let one through only IF this TSN is the one we are waiting 1477 * for on a partial delivery API. 1478 */ 1479 1480 /* now do the tests */ 1481 if (((asoc->cnt_on_all_streams + 1482 asoc->cnt_on_reasm_queue + 1483 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1484 (((int)asoc->my_rwnd) <= 0)) { 1485 /* 1486 * When we have NO room in the rwnd we check to make sure 1487 * the reader is doing its job... 1488 */ 1489 if (stcb->sctp_socket->so_rcv.sb_cc) { 1490 /* some to read, wake-up */ 1491 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1492 struct socket *so; 1493 1494 so = SCTP_INP_SO(stcb->sctp_ep); 1495 atomic_add_int(&stcb->asoc.refcnt, 1); 1496 SCTP_TCB_UNLOCK(stcb); 1497 SCTP_SOCKET_LOCK(so, 1); 1498 SCTP_TCB_LOCK(stcb); 1499 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1500 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1501 /* assoc was freed while we were unlocked */ 1502 SCTP_SOCKET_UNLOCK(so, 1); 1503 return (0); 1504 } 1505 #endif 1506 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1507 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1508 SCTP_SOCKET_UNLOCK(so, 1); 1509 #endif 1510 } 1511 /* now is it in the mapping array of what we have accepted? */ 1512 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 1513 /* Nope not in the valid range dump it */ 1514 sctp_set_rwnd(stcb, asoc); 1515 if ((asoc->cnt_on_all_streams + 1516 asoc->cnt_on_reasm_queue + 1517 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1518 SCTP_STAT_INCR(sctps_datadropchklmt); 1519 } else { 1520 SCTP_STAT_INCR(sctps_datadroprwnd); 1521 } 1522 indx = *break_flag; 1523 *break_flag = 1; 1524 return (0); 1525 } 1526 } 1527 strmno = ntohs(ch->dp.stream_id); 1528 if (strmno >= asoc->streamincnt) { 1529 struct sctp_paramhdr *phdr; 1530 struct mbuf *mb; 1531 1532 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1533 0, M_DONTWAIT, 1, MT_DATA); 1534 if (mb != NULL) { 1535 /* add some space up front so prepend will work well */ 1536 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1537 phdr = mtod(mb, struct sctp_paramhdr *); 1538 /* 1539 * Error causes are just param's and this one has 1540 * two back to back phdr, one with the error type 1541 * and size, the other with the streamid and a rsvd 1542 */ 1543 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1544 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1545 phdr->param_length = 1546 htons(sizeof(struct sctp_paramhdr) * 2); 1547 phdr++; 1548 /* We insert the stream in the type field */ 1549 phdr->param_type = ch->dp.stream_id; 1550 /* And set the length to 0 for the rsvd field */ 1551 phdr->param_length = 0; 1552 sctp_queue_op_err(stcb, mb); 1553 } 1554 SCTP_STAT_INCR(sctps_badsid); 1555 SCTP_TCB_LOCK_ASSERT(stcb); 1556 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1557 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 1558 /* we have a new high score */ 1559 asoc->highest_tsn_inside_map = tsn; 1560 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1561 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 1562 } 1563 } 1564 if (tsn == (asoc->cumulative_tsn + 1)) { 1565 /* Update cum-ack */ 1566 asoc->cumulative_tsn = tsn; 1567 } 1568 return (0); 1569 } 1570 /* 1571 * Before we continue lets validate that we are not being fooled by 1572 * an evil attacker. We can only have 4k chunks based on our TSN 1573 * spread allowed by the mapping array 512 * 8 bits, so there is no 1574 * way our stream sequence numbers could have wrapped. We of course 1575 * only validate the FIRST fragment so the bit must be set. 1576 */ 1577 strmseq = ntohs(ch->dp.stream_sequence); 1578 #ifdef SCTP_ASOCLOG_OF_TSNS 1579 SCTP_TCB_LOCK_ASSERT(stcb); 1580 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1581 asoc->tsn_in_at = 0; 1582 asoc->tsn_in_wrapped = 1; 1583 } 1584 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1585 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1586 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1587 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1588 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1589 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1590 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1591 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1592 asoc->tsn_in_at++; 1593 #endif 1594 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1595 (TAILQ_EMPTY(&asoc->resetHead)) && 1596 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1597 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1598 strmseq, MAX_SEQ) || 1599 asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1600 /* The incoming sseq is behind where we last delivered? */ 1601 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1602 strmseq, asoc->strmin[strmno].last_sequence_delivered); 1603 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1604 0, M_DONTWAIT, 1, MT_DATA); 1605 if (oper) { 1606 struct sctp_paramhdr *ph; 1607 uint32_t *ippp; 1608 1609 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1610 (3 * sizeof(uint32_t)); 1611 ph = mtod(oper, struct sctp_paramhdr *); 1612 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1613 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1614 ippp = (uint32_t *) (ph + 1); 1615 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1616 ippp++; 1617 *ippp = tsn; 1618 ippp++; 1619 *ippp = ((strmno << 16) | strmseq); 1620 1621 } 1622 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1623 sctp_abort_an_association(stcb->sctp_ep, stcb, 1624 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1625 *abort_flag = 1; 1626 return (0); 1627 } 1628 /************************************ 1629 * From here down we may find ch-> invalid 1630 * so its a good idea NOT to use it. 1631 *************************************/ 1632 1633 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1634 if (last_chunk == 0) { 1635 dmbuf = SCTP_M_COPYM(*m, 1636 (offset + sizeof(struct sctp_data_chunk)), 1637 the_len, M_DONTWAIT); 1638 #ifdef SCTP_MBUF_LOGGING 1639 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1640 struct mbuf *mat; 1641 1642 mat = dmbuf; 1643 while (mat) { 1644 if (SCTP_BUF_IS_EXTENDED(mat)) { 1645 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1646 } 1647 mat = SCTP_BUF_NEXT(mat); 1648 } 1649 } 1650 #endif 1651 } else { 1652 /* We can steal the last chunk */ 1653 int l_len; 1654 1655 dmbuf = *m; 1656 /* lop off the top part */ 1657 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1658 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1659 l_len = SCTP_BUF_LEN(dmbuf); 1660 } else { 1661 /* 1662 * need to count up the size hopefully does not hit 1663 * this to often :-0 1664 */ 1665 struct mbuf *lat; 1666 1667 l_len = 0; 1668 lat = dmbuf; 1669 while (lat) { 1670 l_len += SCTP_BUF_LEN(lat); 1671 lat = SCTP_BUF_NEXT(lat); 1672 } 1673 } 1674 if (l_len > the_len) { 1675 /* Trim the end round bytes off too */ 1676 m_adj(dmbuf, -(l_len - the_len)); 1677 } 1678 } 1679 if (dmbuf == NULL) { 1680 SCTP_STAT_INCR(sctps_nomem); 1681 return (0); 1682 } 1683 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1684 asoc->fragmented_delivery_inprogress == 0 && 1685 TAILQ_EMPTY(&asoc->resetHead) && 1686 ((ordered == 0) || 1687 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1688 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1689 /* Candidate for express delivery */ 1690 /* 1691 * Its not fragmented, No PD-API is up, Nothing in the 1692 * delivery queue, Its un-ordered OR ordered and the next to 1693 * deliver AND nothing else is stuck on the stream queue, 1694 * And there is room for it in the socket buffer. Lets just 1695 * stuff it up the buffer.... 1696 */ 1697 1698 /* It would be nice to avoid this copy if we could :< */ 1699 sctp_alloc_a_readq(stcb, control); 1700 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1701 protocol_id, 1702 stcb->asoc.context, 1703 strmno, strmseq, 1704 chunk_flags, 1705 dmbuf); 1706 if (control == NULL) { 1707 goto failed_express_del; 1708 } 1709 sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 1710 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1711 /* for ordered, bump what we delivered */ 1712 asoc->strmin[strmno].last_sequence_delivered++; 1713 } 1714 SCTP_STAT_INCR(sctps_recvexpress); 1715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1716 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1717 SCTP_STR_LOG_FROM_EXPRS_DEL); 1718 } 1719 control = NULL; 1720 goto finish_express_del; 1721 } 1722 failed_express_del: 1723 /* If we reach here this is a new chunk */ 1724 chk = NULL; 1725 control = NULL; 1726 /* Express for fragmented delivery? */ 1727 if ((asoc->fragmented_delivery_inprogress) && 1728 (stcb->asoc.control_pdapi) && 1729 (asoc->str_of_pdapi == strmno) && 1730 (asoc->ssn_of_pdapi == strmseq) 1731 ) { 1732 control = stcb->asoc.control_pdapi; 1733 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1734 /* Can't be another first? */ 1735 goto failed_pdapi_express_del; 1736 } 1737 if (tsn == (control->sinfo_tsn + 1)) { 1738 /* Yep, we can add it on */ 1739 int end = 0; 1740 uint32_t cumack; 1741 1742 if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1743 end = 1; 1744 } 1745 cumack = asoc->cumulative_tsn; 1746 if ((cumack + 1) == tsn) 1747 cumack = tsn; 1748 1749 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1750 tsn, 1751 &stcb->sctp_socket->so_rcv)) { 1752 SCTP_PRINTF("Append fails end:%d\n", end); 1753 goto failed_pdapi_express_del; 1754 } 1755 SCTP_STAT_INCR(sctps_recvexpressm); 1756 control->sinfo_tsn = tsn; 1757 asoc->tsn_last_delivered = tsn; 1758 asoc->fragment_flags = chunk_flags; 1759 asoc->tsn_of_pdapi_last_delivered = tsn; 1760 asoc->last_flags_delivered = chunk_flags; 1761 asoc->last_strm_seq_delivered = strmseq; 1762 asoc->last_strm_no_delivered = strmno; 1763 if (end) { 1764 /* clean up the flags and such */ 1765 asoc->fragmented_delivery_inprogress = 0; 1766 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1767 asoc->strmin[strmno].last_sequence_delivered++; 1768 } 1769 stcb->asoc.control_pdapi = NULL; 1770 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1771 /* 1772 * There could be another message 1773 * ready 1774 */ 1775 need_reasm_check = 1; 1776 } 1777 } 1778 control = NULL; 1779 goto finish_express_del; 1780 } 1781 } 1782 failed_pdapi_express_del: 1783 control = NULL; 1784 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1785 sctp_alloc_a_chunk(stcb, chk); 1786 if (chk == NULL) { 1787 /* No memory so we drop the chunk */ 1788 SCTP_STAT_INCR(sctps_nomem); 1789 if (last_chunk == 0) { 1790 /* we copied it, free the copy */ 1791 sctp_m_freem(dmbuf); 1792 } 1793 return (0); 1794 } 1795 chk->rec.data.TSN_seq = tsn; 1796 chk->no_fr_allowed = 0; 1797 chk->rec.data.stream_seq = strmseq; 1798 chk->rec.data.stream_number = strmno; 1799 chk->rec.data.payloadtype = protocol_id; 1800 chk->rec.data.context = stcb->asoc.context; 1801 chk->rec.data.doing_fast_retransmit = 0; 1802 chk->rec.data.rcv_flags = chunk_flags; 1803 chk->asoc = asoc; 1804 chk->send_size = the_len; 1805 chk->whoTo = net; 1806 atomic_add_int(&net->ref_count, 1); 1807 chk->data = dmbuf; 1808 } else { 1809 sctp_alloc_a_readq(stcb, control); 1810 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1811 protocol_id, 1812 stcb->asoc.context, 1813 strmno, strmseq, 1814 chunk_flags, 1815 dmbuf); 1816 if (control == NULL) { 1817 /* No memory so we drop the chunk */ 1818 SCTP_STAT_INCR(sctps_nomem); 1819 if (last_chunk == 0) { 1820 /* we copied it, free the copy */ 1821 sctp_m_freem(dmbuf); 1822 } 1823 return (0); 1824 } 1825 control->length = the_len; 1826 } 1827 1828 /* Mark it as received */ 1829 /* Now queue it where it belongs */ 1830 if (control != NULL) { 1831 /* First a sanity check */ 1832 if (asoc->fragmented_delivery_inprogress) { 1833 /* 1834 * Ok, we have a fragmented delivery in progress if 1835 * this chunk is next to deliver OR belongs in our 1836 * view to the reassembly, the peer is evil or 1837 * broken. 1838 */ 1839 uint32_t estimate_tsn; 1840 1841 estimate_tsn = asoc->tsn_last_delivered + 1; 1842 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1843 (estimate_tsn == control->sinfo_tsn)) { 1844 /* Evil/Broke peer */ 1845 sctp_m_freem(control->data); 1846 control->data = NULL; 1847 if (control->whoFrom) { 1848 sctp_free_remote_addr(control->whoFrom); 1849 control->whoFrom = NULL; 1850 } 1851 sctp_free_a_readq(stcb, control); 1852 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1853 0, M_DONTWAIT, 1, MT_DATA); 1854 if (oper) { 1855 struct sctp_paramhdr *ph; 1856 uint32_t *ippp; 1857 1858 SCTP_BUF_LEN(oper) = 1859 sizeof(struct sctp_paramhdr) + 1860 (3 * sizeof(uint32_t)); 1861 ph = mtod(oper, struct sctp_paramhdr *); 1862 ph->param_type = 1863 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1864 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1865 ippp = (uint32_t *) (ph + 1); 1866 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1867 ippp++; 1868 *ippp = tsn; 1869 ippp++; 1870 *ippp = ((strmno << 16) | strmseq); 1871 } 1872 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1873 sctp_abort_an_association(stcb->sctp_ep, stcb, 1874 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1875 1876 *abort_flag = 1; 1877 return (0); 1878 } else { 1879 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1880 sctp_m_freem(control->data); 1881 control->data = NULL; 1882 if (control->whoFrom) { 1883 sctp_free_remote_addr(control->whoFrom); 1884 control->whoFrom = NULL; 1885 } 1886 sctp_free_a_readq(stcb, control); 1887 1888 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1889 0, M_DONTWAIT, 1, MT_DATA); 1890 if (oper) { 1891 struct sctp_paramhdr *ph; 1892 uint32_t *ippp; 1893 1894 SCTP_BUF_LEN(oper) = 1895 sizeof(struct sctp_paramhdr) + 1896 (3 * sizeof(uint32_t)); 1897 ph = mtod(oper, 1898 struct sctp_paramhdr *); 1899 ph->param_type = 1900 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1901 ph->param_length = 1902 htons(SCTP_BUF_LEN(oper)); 1903 ippp = (uint32_t *) (ph + 1); 1904 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 1905 ippp++; 1906 *ippp = tsn; 1907 ippp++; 1908 *ippp = ((strmno << 16) | strmseq); 1909 } 1910 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1911 sctp_abort_an_association(stcb->sctp_ep, 1912 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1913 1914 *abort_flag = 1; 1915 return (0); 1916 } 1917 } 1918 } else { 1919 /* No PDAPI running */ 1920 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1921 /* 1922 * Reassembly queue is NOT empty validate 1923 * that this tsn does not need to be in 1924 * reasembly queue. If it does then our peer 1925 * is broken or evil. 1926 */ 1927 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1928 sctp_m_freem(control->data); 1929 control->data = NULL; 1930 if (control->whoFrom) { 1931 sctp_free_remote_addr(control->whoFrom); 1932 control->whoFrom = NULL; 1933 } 1934 sctp_free_a_readq(stcb, control); 1935 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1936 0, M_DONTWAIT, 1, MT_DATA); 1937 if (oper) { 1938 struct sctp_paramhdr *ph; 1939 uint32_t *ippp; 1940 1941 SCTP_BUF_LEN(oper) = 1942 sizeof(struct sctp_paramhdr) + 1943 (3 * sizeof(uint32_t)); 1944 ph = mtod(oper, 1945 struct sctp_paramhdr *); 1946 ph->param_type = 1947 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1948 ph->param_length = 1949 htons(SCTP_BUF_LEN(oper)); 1950 ippp = (uint32_t *) (ph + 1); 1951 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 1952 ippp++; 1953 *ippp = tsn; 1954 ippp++; 1955 *ippp = ((strmno << 16) | strmseq); 1956 } 1957 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1958 sctp_abort_an_association(stcb->sctp_ep, 1959 stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 1960 1961 *abort_flag = 1; 1962 return (0); 1963 } 1964 } 1965 } 1966 /* ok, if we reach here we have passed the sanity checks */ 1967 if (chunk_flags & SCTP_DATA_UNORDERED) { 1968 /* queue directly into socket buffer */ 1969 sctp_add_to_readq(stcb->sctp_ep, stcb, 1970 control, 1971 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 1972 } else { 1973 /* 1974 * Special check for when streams are resetting. We 1975 * could be more smart about this and check the 1976 * actual stream to see if it is not being reset.. 1977 * that way we would not create a HOLB when amongst 1978 * streams being reset and those not being reset. 1979 * 1980 * We take complete messages that have a stream reset 1981 * intervening (aka the TSN is after where our 1982 * cum-ack needs to be) off and put them on a 1983 * pending_reply_queue. The reassembly ones we do 1984 * not have to worry about since they are all sorted 1985 * and proceessed by TSN order. It is only the 1986 * singletons I must worry about. 1987 */ 1988 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 1989 ((compare_with_wrap(tsn, liste->tsn, MAX_TSN))) 1990 ) { 1991 /* 1992 * yep its past where we need to reset... go 1993 * ahead and queue it. 1994 */ 1995 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 1996 /* first one on */ 1997 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 1998 } else { 1999 struct sctp_queued_to_read *ctlOn; 2000 unsigned char inserted = 0; 2001 2002 ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue); 2003 while (ctlOn) { 2004 if (compare_with_wrap(control->sinfo_tsn, 2005 ctlOn->sinfo_tsn, MAX_TSN)) { 2006 ctlOn = TAILQ_NEXT(ctlOn, next); 2007 } else { 2008 /* found it */ 2009 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2010 inserted = 1; 2011 break; 2012 } 2013 } 2014 if (inserted == 0) { 2015 /* 2016 * must be put at end, use 2017 * prevP (all setup from 2018 * loop) to setup nextP. 2019 */ 2020 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2021 } 2022 } 2023 } else { 2024 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2025 if (*abort_flag) { 2026 return (0); 2027 } 2028 } 2029 } 2030 } else { 2031 /* Into the re-assembly queue */ 2032 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2033 if (*abort_flag) { 2034 /* 2035 * the assoc is now gone and chk was put onto the 2036 * reasm queue, which has all been freed. 2037 */ 2038 *m = NULL; 2039 return (0); 2040 } 2041 } 2042 finish_express_del: 2043 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2044 /* we have a new high score */ 2045 asoc->highest_tsn_inside_map = tsn; 2046 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2047 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2048 } 2049 } 2050 if (tsn == (asoc->cumulative_tsn + 1)) { 2051 /* Update cum-ack */ 2052 asoc->cumulative_tsn = tsn; 2053 } 2054 if (last_chunk) { 2055 *m = NULL; 2056 } 2057 if (ordered) { 2058 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2059 } else { 2060 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2061 } 2062 SCTP_STAT_INCR(sctps_recvdata); 2063 /* Set it present please */ 2064 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2065 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2066 } 2067 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2068 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2069 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2070 } 2071 SCTP_TCB_LOCK_ASSERT(stcb); 2072 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2073 /* check the special flag for stream resets */ 2074 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2075 ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) || 2076 (asoc->cumulative_tsn == liste->tsn)) 2077 ) { 2078 /* 2079 * we have finished working through the backlogged TSN's now 2080 * time to reset streams. 1: call reset function. 2: free 2081 * pending_reply space 3: distribute any chunks in 2082 * pending_reply_queue. 2083 */ 2084 struct sctp_queued_to_read *ctl; 2085 2086 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams); 2087 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2088 SCTP_FREE(liste, SCTP_M_STRESET); 2089 /* sa_ignore FREED_MEMORY */ 2090 liste = TAILQ_FIRST(&asoc->resetHead); 2091 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2092 if (ctl && (liste == NULL)) { 2093 /* All can be removed */ 2094 while (ctl) { 2095 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2096 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2097 if (*abort_flag) { 2098 return (0); 2099 } 2100 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2101 } 2102 } else if (ctl) { 2103 /* more than one in queue */ 2104 while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) { 2105 /* 2106 * if ctl->sinfo_tsn is <= liste->tsn we can 2107 * process it which is the NOT of 2108 * ctl->sinfo_tsn > liste->tsn 2109 */ 2110 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2111 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2112 if (*abort_flag) { 2113 return (0); 2114 } 2115 ctl = TAILQ_FIRST(&asoc->pending_reply_queue); 2116 } 2117 } 2118 /* 2119 * Now service re-assembly to pick up anything that has been 2120 * held on reassembly queue? 2121 */ 2122 sctp_deliver_reasm_check(stcb, asoc); 2123 need_reasm_check = 0; 2124 } 2125 if (need_reasm_check) { 2126 /* Another one waits ? */ 2127 sctp_deliver_reasm_check(stcb, asoc); 2128 } 2129 return (1); 2130 } 2131 2132 int8_t sctp_map_lookup_tab[256] = { 2133 -1, 0, -1, 1, -1, 0, -1, 2, 2134 -1, 0, -1, 1, -1, 0, -1, 3, 2135 -1, 0, -1, 1, -1, 0, -1, 2, 2136 -1, 0, -1, 1, -1, 0, -1, 4, 2137 -1, 0, -1, 1, -1, 0, -1, 2, 2138 -1, 0, -1, 1, -1, 0, -1, 3, 2139 -1, 0, -1, 1, -1, 0, -1, 2, 2140 -1, 0, -1, 1, -1, 0, -1, 5, 2141 -1, 0, -1, 1, -1, 0, -1, 2, 2142 -1, 0, -1, 1, -1, 0, -1, 3, 2143 -1, 0, -1, 1, -1, 0, -1, 2, 2144 -1, 0, -1, 1, -1, 0, -1, 4, 2145 -1, 0, -1, 1, -1, 0, -1, 2, 2146 -1, 0, -1, 1, -1, 0, -1, 3, 2147 -1, 0, -1, 1, -1, 0, -1, 2, 2148 -1, 0, -1, 1, -1, 0, -1, 6, 2149 -1, 0, -1, 1, -1, 0, -1, 2, 2150 -1, 0, -1, 1, -1, 0, -1, 3, 2151 -1, 0, -1, 1, -1, 0, -1, 2, 2152 -1, 0, -1, 1, -1, 0, -1, 4, 2153 -1, 0, -1, 1, -1, 0, -1, 2, 2154 -1, 0, -1, 1, -1, 0, -1, 3, 2155 -1, 0, -1, 1, -1, 0, -1, 2, 2156 -1, 0, -1, 1, -1, 0, -1, 5, 2157 -1, 0, -1, 1, -1, 0, -1, 2, 2158 -1, 0, -1, 1, -1, 0, -1, 3, 2159 -1, 0, -1, 1, -1, 0, -1, 2, 2160 -1, 0, -1, 1, -1, 0, -1, 4, 2161 -1, 0, -1, 1, -1, 0, -1, 2, 2162 -1, 0, -1, 1, -1, 0, -1, 3, 2163 -1, 0, -1, 1, -1, 0, -1, 2, 2164 -1, 0, -1, 1, -1, 0, -1, 7, 2165 }; 2166 2167 2168 void 2169 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2170 { 2171 /* 2172 * Now we also need to check the mapping array in a couple of ways. 2173 * 1) Did we move the cum-ack point? 2174 */ 2175 struct sctp_association *asoc; 2176 int at; 2177 int last_all_ones = 0; 2178 int slide_from, slide_end, lgap, distance; 2179 uint32_t old_cumack, old_base, old_highest; 2180 unsigned char aux_array[64]; 2181 2182 2183 asoc = &stcb->asoc; 2184 at = 0; 2185 2186 old_cumack = asoc->cumulative_tsn; 2187 old_base = asoc->mapping_array_base_tsn; 2188 old_highest = asoc->highest_tsn_inside_map; 2189 if (asoc->mapping_array_size < 64) 2190 memcpy(aux_array, asoc->mapping_array, 2191 asoc->mapping_array_size); 2192 else 2193 memcpy(aux_array, asoc->mapping_array, 64); 2194 2195 /* 2196 * We could probably improve this a small bit by calculating the 2197 * offset of the current cum-ack as the starting point. 2198 */ 2199 at = 0; 2200 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2201 2202 if (asoc->mapping_array[slide_from] == 0xff) { 2203 at += 8; 2204 last_all_ones = 1; 2205 } else { 2206 /* there is a 0 bit */ 2207 at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]]; 2208 last_all_ones = 0; 2209 break; 2210 } 2211 } 2212 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones); 2213 /* at is one off, since in the table a embedded -1 is present */ 2214 at++; 2215 2216 if (compare_with_wrap(asoc->cumulative_tsn, 2217 asoc->highest_tsn_inside_map, 2218 MAX_TSN)) { 2219 #ifdef INVARIANTS 2220 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2221 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2222 #else 2223 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2224 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2225 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2226 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2227 } 2228 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2229 #endif 2230 } 2231 if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) { 2232 /* The complete array was completed by a single FR */ 2233 /* higest becomes the cum-ack */ 2234 int clr; 2235 2236 asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2237 /* clear the array */ 2238 clr = (at >> 3) + 1; 2239 if (clr > asoc->mapping_array_size) { 2240 clr = asoc->mapping_array_size; 2241 } 2242 memset(asoc->mapping_array, 0, clr); 2243 /* base becomes one ahead of the cum-ack */ 2244 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2245 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2246 sctp_log_map(old_base, old_cumack, old_highest, 2247 SCTP_MAP_PREPARE_SLIDE); 2248 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2249 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2250 } 2251 } else if (at >= 8) { 2252 /* we can slide the mapping array down */ 2253 /* slide_from holds where we hit the first NON 0xff byte */ 2254 2255 /* 2256 * now calculate the ceiling of the move using our highest 2257 * TSN value 2258 */ 2259 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2260 lgap = asoc->highest_tsn_inside_map - 2261 asoc->mapping_array_base_tsn; 2262 } else { 2263 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2264 asoc->highest_tsn_inside_map + 1; 2265 } 2266 slide_end = lgap >> 3; 2267 if (slide_end < slide_from) { 2268 #ifdef INVARIANTS 2269 panic("impossible slide"); 2270 #else 2271 printf("impossible slide?\n"); 2272 return; 2273 #endif 2274 } 2275 if (slide_end > asoc->mapping_array_size) { 2276 #ifdef INVARIANTS 2277 panic("would overrun buffer"); 2278 #else 2279 printf("Gak, would have overrun map end:%d slide_end:%d\n", 2280 asoc->mapping_array_size, slide_end); 2281 slide_end = asoc->mapping_array_size; 2282 #endif 2283 } 2284 distance = (slide_end - slide_from) + 1; 2285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2286 sctp_log_map(old_base, old_cumack, old_highest, 2287 SCTP_MAP_PREPARE_SLIDE); 2288 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2289 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2290 } 2291 if (distance + slide_from > asoc->mapping_array_size || 2292 distance < 0) { 2293 /* 2294 * Here we do NOT slide forward the array so that 2295 * hopefully when more data comes in to fill it up 2296 * we will be able to slide it forward. Really I 2297 * don't think this should happen :-0 2298 */ 2299 2300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2301 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2302 (uint32_t) asoc->mapping_array_size, 2303 SCTP_MAP_SLIDE_NONE); 2304 } 2305 } else { 2306 int ii; 2307 2308 for (ii = 0; ii < distance; ii++) { 2309 asoc->mapping_array[ii] = 2310 asoc->mapping_array[slide_from + ii]; 2311 } 2312 for (ii = distance; ii <= slide_end; ii++) { 2313 asoc->mapping_array[ii] = 0; 2314 } 2315 asoc->mapping_array_base_tsn += (slide_from << 3); 2316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2317 sctp_log_map(asoc->mapping_array_base_tsn, 2318 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2319 SCTP_MAP_SLIDE_RESULT); 2320 } 2321 } 2322 } 2323 /* 2324 * Now we need to see if we need to queue a sack or just start the 2325 * timer (if allowed). 2326 */ 2327 if (ok_to_sack) { 2328 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2329 /* 2330 * Ok special case, in SHUTDOWN-SENT case. here we 2331 * maker sure SACK timer is off and instead send a 2332 * SHUTDOWN and a SACK 2333 */ 2334 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2335 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2336 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2337 } 2338 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2339 sctp_send_sack(stcb); 2340 } else { 2341 int is_a_gap; 2342 2343 /* is there a gap now ? */ 2344 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2345 stcb->asoc.cumulative_tsn, MAX_TSN); 2346 2347 /* 2348 * CMT DAC algorithm: increase number of packets 2349 * received since last ack 2350 */ 2351 stcb->asoc.cmt_dac_pkts_rcvd++; 2352 2353 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2354 * SACK */ 2355 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2356 * longer is one */ 2357 (stcb->asoc.numduptsns) || /* we have dup's */ 2358 (is_a_gap) || /* is still a gap */ 2359 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2360 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2361 ) { 2362 2363 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) && 2364 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2365 (stcb->asoc.send_sack == 0) && 2366 (stcb->asoc.numduptsns == 0) && 2367 (stcb->asoc.delayed_ack) && 2368 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2369 2370 /* 2371 * CMT DAC algorithm: With CMT, 2372 * delay acks even in the face of 2373 * 2374 * reordering. Therefore, if acks that 2375 * do not have to be sent because of 2376 * the above reasons, will be 2377 * delayed. That is, acks that would 2378 * have been sent due to gap reports 2379 * will be delayed with DAC. Start 2380 * the delayed ack timer. 2381 */ 2382 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2383 stcb->sctp_ep, stcb, NULL); 2384 } else { 2385 /* 2386 * Ok we must build a SACK since the 2387 * timer is pending, we got our 2388 * first packet OR there are gaps or 2389 * duplicates. 2390 */ 2391 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2392 sctp_send_sack(stcb); 2393 } 2394 } else { 2395 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2396 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2397 stcb->sctp_ep, stcb, NULL); 2398 } 2399 } 2400 } 2401 } 2402 } 2403 2404 void 2405 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2406 { 2407 struct sctp_tmit_chunk *chk; 2408 uint32_t tsize; 2409 uint16_t nxt_todel; 2410 2411 if (asoc->fragmented_delivery_inprogress) { 2412 sctp_service_reassembly(stcb, asoc); 2413 } 2414 /* Can we proceed further, i.e. the PD-API is complete */ 2415 if (asoc->fragmented_delivery_inprogress) { 2416 /* no */ 2417 return; 2418 } 2419 /* 2420 * Now is there some other chunk I can deliver from the reassembly 2421 * queue. 2422 */ 2423 doit_again: 2424 chk = TAILQ_FIRST(&asoc->reasmqueue); 2425 if (chk == NULL) { 2426 asoc->size_on_reasm_queue = 0; 2427 asoc->cnt_on_reasm_queue = 0; 2428 return; 2429 } 2430 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2431 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2432 ((nxt_todel == chk->rec.data.stream_seq) || 2433 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2434 /* 2435 * Yep the first one is here. We setup to start reception, 2436 * by backing down the TSN just in case we can't deliver. 2437 */ 2438 2439 /* 2440 * Before we start though either all of the message should 2441 * be here or 1/4 the socket buffer max or nothing on the 2442 * delivery queue and something can be delivered. 2443 */ 2444 if ((sctp_is_all_msg_on_reasm(asoc, &tsize) || 2445 (tsize >= stcb->sctp_ep->partial_delivery_point))) { 2446 asoc->fragmented_delivery_inprogress = 1; 2447 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2448 asoc->str_of_pdapi = chk->rec.data.stream_number; 2449 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2450 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2451 asoc->fragment_flags = chk->rec.data.rcv_flags; 2452 sctp_service_reassembly(stcb, asoc); 2453 if (asoc->fragmented_delivery_inprogress == 0) { 2454 goto doit_again; 2455 } 2456 } 2457 } 2458 } 2459 2460 int 2461 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2462 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2463 struct sctp_nets *net, uint32_t * high_tsn) 2464 { 2465 struct sctp_data_chunk *ch, chunk_buf; 2466 struct sctp_association *asoc; 2467 int num_chunks = 0; /* number of control chunks processed */ 2468 int stop_proc = 0; 2469 int chk_length, break_flag, last_chunk; 2470 int abort_flag = 0, was_a_gap = 0; 2471 struct mbuf *m; 2472 2473 /* set the rwnd */ 2474 sctp_set_rwnd(stcb, &stcb->asoc); 2475 2476 m = *mm; 2477 SCTP_TCB_LOCK_ASSERT(stcb); 2478 asoc = &stcb->asoc; 2479 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2480 stcb->asoc.cumulative_tsn, MAX_TSN)) { 2481 /* there was a gap before this data was processed */ 2482 was_a_gap = 1; 2483 } 2484 /* 2485 * setup where we got the last DATA packet from for any SACK that 2486 * may need to go out. Don't bump the net. This is done ONLY when a 2487 * chunk is assigned. 2488 */ 2489 asoc->last_data_chunk_from = net; 2490 2491 /*- 2492 * Now before we proceed we must figure out if this is a wasted 2493 * cluster... i.e. it is a small packet sent in and yet the driver 2494 * underneath allocated a full cluster for it. If so we must copy it 2495 * to a smaller mbuf and free up the cluster mbuf. This will help 2496 * with cluster starvation. Note for __Panda__ we don't do this 2497 * since it has clusters all the way down to 64 bytes. 2498 */ 2499 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2500 /* we only handle mbufs that are singletons.. not chains */ 2501 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA); 2502 if (m) { 2503 /* ok lets see if we can copy the data up */ 2504 caddr_t *from, *to; 2505 2506 /* get the pointers and copy */ 2507 to = mtod(m, caddr_t *); 2508 from = mtod((*mm), caddr_t *); 2509 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2510 /* copy the length and free up the old */ 2511 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2512 sctp_m_freem(*mm); 2513 /* sucess, back copy */ 2514 *mm = m; 2515 } else { 2516 /* We are in trouble in the mbuf world .. yikes */ 2517 m = *mm; 2518 } 2519 } 2520 /* get pointer to the first chunk header */ 2521 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2522 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2523 if (ch == NULL) { 2524 return (1); 2525 } 2526 /* 2527 * process all DATA chunks... 2528 */ 2529 *high_tsn = asoc->cumulative_tsn; 2530 break_flag = 0; 2531 asoc->data_pkts_seen++; 2532 while (stop_proc == 0) { 2533 /* validate chunk length */ 2534 chk_length = ntohs(ch->ch.chunk_length); 2535 if (length - *offset < chk_length) { 2536 /* all done, mutulated chunk */ 2537 stop_proc = 1; 2538 break; 2539 } 2540 if (ch->ch.chunk_type == SCTP_DATA) { 2541 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2542 /* 2543 * Need to send an abort since we had a 2544 * invalid data chunk. 2545 */ 2546 struct mbuf *op_err; 2547 2548 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 2549 0, M_DONTWAIT, 1, MT_DATA); 2550 2551 if (op_err) { 2552 struct sctp_paramhdr *ph; 2553 uint32_t *ippp; 2554 2555 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 2556 (2 * sizeof(uint32_t)); 2557 ph = mtod(op_err, struct sctp_paramhdr *); 2558 ph->param_type = 2559 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2560 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2561 ippp = (uint32_t *) (ph + 1); 2562 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2563 ippp++; 2564 *ippp = asoc->cumulative_tsn; 2565 2566 } 2567 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2568 sctp_abort_association(inp, stcb, m, iphlen, sh, 2569 op_err, 0, net->port); 2570 return (2); 2571 } 2572 #ifdef SCTP_AUDITING_ENABLED 2573 sctp_audit_log(0xB1, 0); 2574 #endif 2575 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2576 last_chunk = 1; 2577 } else { 2578 last_chunk = 0; 2579 } 2580 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2581 chk_length, net, high_tsn, &abort_flag, &break_flag, 2582 last_chunk)) { 2583 num_chunks++; 2584 } 2585 if (abort_flag) 2586 return (2); 2587 2588 if (break_flag) { 2589 /* 2590 * Set because of out of rwnd space and no 2591 * drop rep space left. 2592 */ 2593 stop_proc = 1; 2594 break; 2595 } 2596 } else { 2597 /* not a data chunk in the data region */ 2598 switch (ch->ch.chunk_type) { 2599 case SCTP_INITIATION: 2600 case SCTP_INITIATION_ACK: 2601 case SCTP_SELECTIVE_ACK: 2602 case SCTP_HEARTBEAT_REQUEST: 2603 case SCTP_HEARTBEAT_ACK: 2604 case SCTP_ABORT_ASSOCIATION: 2605 case SCTP_SHUTDOWN: 2606 case SCTP_SHUTDOWN_ACK: 2607 case SCTP_OPERATION_ERROR: 2608 case SCTP_COOKIE_ECHO: 2609 case SCTP_COOKIE_ACK: 2610 case SCTP_ECN_ECHO: 2611 case SCTP_ECN_CWR: 2612 case SCTP_SHUTDOWN_COMPLETE: 2613 case SCTP_AUTHENTICATION: 2614 case SCTP_ASCONF_ACK: 2615 case SCTP_PACKET_DROPPED: 2616 case SCTP_STREAM_RESET: 2617 case SCTP_FORWARD_CUM_TSN: 2618 case SCTP_ASCONF: 2619 /* 2620 * Now, what do we do with KNOWN chunks that 2621 * are NOT in the right place? 2622 * 2623 * For now, I do nothing but ignore them. We 2624 * may later want to add sysctl stuff to 2625 * switch out and do either an ABORT() or 2626 * possibly process them. 2627 */ 2628 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) { 2629 struct mbuf *op_err; 2630 2631 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2632 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port); 2633 return (2); 2634 } 2635 break; 2636 default: 2637 /* unknown chunk type, use bit rules */ 2638 if (ch->ch.chunk_type & 0x40) { 2639 /* Add a error report to the queue */ 2640 struct mbuf *merr; 2641 struct sctp_paramhdr *phd; 2642 2643 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA); 2644 if (merr) { 2645 phd = mtod(merr, struct sctp_paramhdr *); 2646 /* 2647 * We cheat and use param 2648 * type since we did not 2649 * bother to define a error 2650 * cause struct. They are 2651 * the same basic format 2652 * with different names. 2653 */ 2654 phd->param_type = 2655 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2656 phd->param_length = 2657 htons(chk_length + sizeof(*phd)); 2658 SCTP_BUF_LEN(merr) = sizeof(*phd); 2659 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, 2660 SCTP_SIZE32(chk_length), 2661 M_DONTWAIT); 2662 if (SCTP_BUF_NEXT(merr)) { 2663 sctp_queue_op_err(stcb, merr); 2664 } else { 2665 sctp_m_freem(merr); 2666 } 2667 } 2668 } 2669 if ((ch->ch.chunk_type & 0x80) == 0) { 2670 /* discard the rest of this packet */ 2671 stop_proc = 1; 2672 } /* else skip this bad chunk and 2673 * continue... */ 2674 break; 2675 }; /* switch of chunk type */ 2676 } 2677 *offset += SCTP_SIZE32(chk_length); 2678 if ((*offset >= length) || stop_proc) { 2679 /* no more data left in the mbuf chain */ 2680 stop_proc = 1; 2681 continue; 2682 } 2683 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2684 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2685 if (ch == NULL) { 2686 *offset = length; 2687 stop_proc = 1; 2688 break; 2689 2690 } 2691 } /* while */ 2692 if (break_flag) { 2693 /* 2694 * we need to report rwnd overrun drops. 2695 */ 2696 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2697 } 2698 if (num_chunks) { 2699 /* 2700 * Did we get data, if so update the time for auto-close and 2701 * give peer credit for being alive. 2702 */ 2703 SCTP_STAT_INCR(sctps_recvpktwithdata); 2704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2705 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2706 stcb->asoc.overall_error_count, 2707 0, 2708 SCTP_FROM_SCTP_INDATA, 2709 __LINE__); 2710 } 2711 stcb->asoc.overall_error_count = 0; 2712 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2713 } 2714 /* now service all of the reassm queue if needed */ 2715 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2716 sctp_service_queues(stcb, asoc); 2717 2718 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2719 /* Assure that we ack right away */ 2720 stcb->asoc.send_sack = 1; 2721 } 2722 /* Start a sack timer or QUEUE a SACK for sending */ 2723 if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) && 2724 (stcb->asoc.mapping_array[0] != 0xff)) { 2725 if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) || 2726 (stcb->asoc.delayed_ack == 0) || 2727 (stcb->asoc.numduptsns) || 2728 (stcb->asoc.send_sack == 1)) { 2729 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2730 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2731 } 2732 sctp_send_sack(stcb); 2733 } else { 2734 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2735 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2736 stcb->sctp_ep, stcb, NULL); 2737 } 2738 } 2739 } else { 2740 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2741 } 2742 if (abort_flag) 2743 return (2); 2744 2745 return (0); 2746 } 2747 2748 static void 2749 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 2750 struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2751 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 2752 int num_seg, int *ecn_seg_sums) 2753 { 2754 /************************************************/ 2755 /* process fragments and update sendqueue */ 2756 /************************************************/ 2757 struct sctp_sack *sack; 2758 struct sctp_gap_ack_block *frag, block; 2759 struct sctp_tmit_chunk *tp1; 2760 int i; 2761 unsigned int j; 2762 int num_frs = 0; 2763 2764 uint16_t frag_strt, frag_end, primary_flag_set; 2765 u_long last_frag_high; 2766 2767 /* 2768 * @@@ JRI : TODO: This flag is not used anywhere .. remove? 2769 */ 2770 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2771 primary_flag_set = 1; 2772 } else { 2773 primary_flag_set = 0; 2774 } 2775 sack = &ch->sack; 2776 2777 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 2778 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 2779 *offset += sizeof(block); 2780 if (frag == NULL) { 2781 return; 2782 } 2783 tp1 = NULL; 2784 last_frag_high = 0; 2785 for (i = 0; i < num_seg; i++) { 2786 frag_strt = ntohs(frag->start); 2787 frag_end = ntohs(frag->end); 2788 /* some sanity checks on the fargment offsets */ 2789 if (frag_strt > frag_end) { 2790 /* this one is malformed, skip */ 2791 frag++; 2792 continue; 2793 } 2794 if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked, 2795 MAX_TSN)) 2796 *biggest_tsn_acked = frag_end + last_tsn; 2797 2798 /* mark acked dgs and find out the highestTSN being acked */ 2799 if (tp1 == NULL) { 2800 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2801 2802 /* save the locations of the last frags */ 2803 last_frag_high = frag_end + last_tsn; 2804 } else { 2805 /* 2806 * now lets see if we need to reset the queue due to 2807 * a out-of-order SACK fragment 2808 */ 2809 if (compare_with_wrap(frag_strt + last_tsn, 2810 last_frag_high, MAX_TSN)) { 2811 /* 2812 * if the new frag starts after the last TSN 2813 * frag covered, we are ok and this one is 2814 * beyond the last one 2815 */ 2816 ; 2817 } else { 2818 /* 2819 * ok, they have reset us, so we need to 2820 * reset the queue this will cause extra 2821 * hunting but hey, they chose the 2822 * performance hit when they failed to order 2823 * there gaps.. 2824 */ 2825 tp1 = TAILQ_FIRST(&asoc->sent_queue); 2826 } 2827 last_frag_high = frag_end + last_tsn; 2828 } 2829 for (j = frag_strt + last_tsn; (compare_with_wrap((frag_end + last_tsn), j, MAX_TSN)); j++) { 2830 while (tp1) { 2831 if (tp1->rec.data.doing_fast_retransmit) 2832 num_frs++; 2833 2834 /* 2835 * CMT: CUCv2 algorithm. For each TSN being 2836 * processed from the sent queue, track the 2837 * next expected pseudo-cumack, or 2838 * rtx_pseudo_cumack, if required. Separate 2839 * cumack trackers for first transmissions, 2840 * and retransmissions. 2841 */ 2842 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2843 (tp1->snd_count == 1)) { 2844 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2845 tp1->whoTo->find_pseudo_cumack = 0; 2846 } 2847 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2848 (tp1->snd_count > 1)) { 2849 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2850 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2851 } 2852 if (tp1->rec.data.TSN_seq == j) { 2853 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2854 /* 2855 * must be held until 2856 * cum-ack passes 2857 */ 2858 /* 2859 * ECN Nonce: Add the nonce 2860 * value to the sender's 2861 * nonce sum 2862 */ 2863 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2864 /*- 2865 * If it is less than RESEND, it is 2866 * now no-longer in flight. 2867 * Higher values may already be set 2868 * via previous Gap Ack Blocks... 2869 * i.e. ACKED or RESEND. 2870 */ 2871 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2872 *biggest_newly_acked_tsn, MAX_TSN)) { 2873 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2874 } 2875 /* 2876 * CMT: SFR algo 2877 * (and HTNA) - set 2878 * saw_newack to 1 2879 * for dest being 2880 * newly acked. 2881 * update 2882 * this_sack_highest_ 2883 * newack if 2884 * appropriate. 2885 */ 2886 if (tp1->rec.data.chunk_was_revoked == 0) 2887 tp1->whoTo->saw_newack = 1; 2888 2889 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2890 tp1->whoTo->this_sack_highest_newack, 2891 MAX_TSN)) { 2892 tp1->whoTo->this_sack_highest_newack = 2893 tp1->rec.data.TSN_seq; 2894 } 2895 /* 2896 * CMT DAC algo: 2897 * also update 2898 * this_sack_lowest_n 2899 * ewack 2900 */ 2901 if (*this_sack_lowest_newack == 0) { 2902 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2903 sctp_log_sack(*this_sack_lowest_newack, 2904 last_tsn, 2905 tp1->rec.data.TSN_seq, 2906 0, 2907 0, 2908 SCTP_LOG_TSN_ACKED); 2909 } 2910 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2911 } 2912 /* 2913 * CMT: CUCv2 2914 * algorithm. If 2915 * (rtx-)pseudo-cumac 2916 * k for corresp 2917 * dest is being 2918 * acked, then we 2919 * have a new 2920 * (rtx-)pseudo-cumac 2921 * k. Set 2922 * new_(rtx_)pseudo_c 2923 * umack to TRUE so 2924 * that the cwnd for 2925 * this dest can be 2926 * updated. Also 2927 * trigger search 2928 * for the next 2929 * expected 2930 * (rtx-)pseudo-cumac 2931 * k. Separate 2932 * pseudo_cumack 2933 * trackers for 2934 * first 2935 * transmissions and 2936 * retransmissions. 2937 */ 2938 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2939 if (tp1->rec.data.chunk_was_revoked == 0) { 2940 tp1->whoTo->new_pseudo_cumack = 1; 2941 } 2942 tp1->whoTo->find_pseudo_cumack = 1; 2943 } 2944 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2945 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2946 } 2947 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2948 if (tp1->rec.data.chunk_was_revoked == 0) { 2949 tp1->whoTo->new_pseudo_cumack = 1; 2950 } 2951 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2952 } 2953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2954 sctp_log_sack(*biggest_newly_acked_tsn, 2955 last_tsn, 2956 tp1->rec.data.TSN_seq, 2957 frag_strt, 2958 frag_end, 2959 SCTP_LOG_TSN_ACKED); 2960 } 2961 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2962 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2963 tp1->whoTo->flight_size, 2964 tp1->book_size, 2965 (uintptr_t) tp1->whoTo, 2966 tp1->rec.data.TSN_seq); 2967 } 2968 sctp_flight_size_decrease(tp1); 2969 sctp_total_flight_decrease(stcb, tp1); 2970 2971 tp1->whoTo->net_ack += tp1->send_size; 2972 if (tp1->snd_count < 2) { 2973 /* 2974 * True 2975 * non-retran 2976 * smited 2977 * chunk */ 2978 tp1->whoTo->net_ack2 += tp1->send_size; 2979 2980 /* 2981 * update RTO 2982 * too ? */ 2983 if (tp1->do_rtt) { 2984 tp1->whoTo->RTO = 2985 sctp_calculate_rto(stcb, 2986 asoc, 2987 tp1->whoTo, 2988 &tp1->sent_rcv_time, 2989 sctp_align_safe_nocopy); 2990 tp1->do_rtt = 0; 2991 } 2992 } 2993 } 2994 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 2995 (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 2996 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 2997 if (compare_with_wrap(tp1->rec.data.TSN_seq, 2998 asoc->this_sack_highest_gap, 2999 MAX_TSN)) { 3000 asoc->this_sack_highest_gap = 3001 tp1->rec.data.TSN_seq; 3002 } 3003 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3004 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3005 #ifdef SCTP_AUDITING_ENABLED 3006 sctp_audit_log(0xB2, 3007 (asoc->sent_queue_retran_cnt & 0x000000ff)); 3008 #endif 3009 } 3010 } 3011 /* 3012 * All chunks NOT UNSENT 3013 * fall through here and are 3014 * marked 3015 */ 3016 tp1->sent = SCTP_DATAGRAM_MARKED; 3017 if (tp1->rec.data.chunk_was_revoked) { 3018 /* deflate the cwnd */ 3019 tp1->whoTo->cwnd -= tp1->book_size; 3020 tp1->rec.data.chunk_was_revoked = 0; 3021 } 3022 } 3023 break; 3024 } /* if (tp1->TSN_seq == j) */ 3025 if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 3026 MAX_TSN)) 3027 break; 3028 3029 tp1 = TAILQ_NEXT(tp1, sctp_next); 3030 } /* end while (tp1) */ 3031 } /* end for (j = fragStart */ 3032 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3033 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 3034 *offset += sizeof(block); 3035 if (frag == NULL) { 3036 break; 3037 } 3038 } 3039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3040 if (num_frs) 3041 sctp_log_fr(*biggest_tsn_acked, 3042 *biggest_newly_acked_tsn, 3043 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3044 } 3045 } 3046 3047 static void 3048 sctp_check_for_revoked(struct sctp_tcb *stcb, 3049 struct sctp_association *asoc, uint32_t cumack, 3050 u_long biggest_tsn_acked) 3051 { 3052 struct sctp_tmit_chunk *tp1; 3053 int tot_revoked = 0; 3054 3055 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3056 while (tp1) { 3057 if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack, 3058 MAX_TSN)) { 3059 /* 3060 * ok this guy is either ACK or MARKED. If it is 3061 * ACKED it has been previously acked but not this 3062 * time i.e. revoked. If it is MARKED it was ACK'ed 3063 * again. 3064 */ 3065 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3066 MAX_TSN)) 3067 break; 3068 3069 3070 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3071 /* it has been revoked */ 3072 tp1->sent = SCTP_DATAGRAM_SENT; 3073 tp1->rec.data.chunk_was_revoked = 1; 3074 /* 3075 * We must add this stuff back in to assure 3076 * timers and such get started. 3077 */ 3078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3079 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3080 tp1->whoTo->flight_size, 3081 tp1->book_size, 3082 (uintptr_t) tp1->whoTo, 3083 tp1->rec.data.TSN_seq); 3084 } 3085 sctp_flight_size_increase(tp1); 3086 sctp_total_flight_increase(stcb, tp1); 3087 /* 3088 * We inflate the cwnd to compensate for our 3089 * artificial inflation of the flight_size. 3090 */ 3091 tp1->whoTo->cwnd += tp1->book_size; 3092 tot_revoked++; 3093 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3094 sctp_log_sack(asoc->last_acked_seq, 3095 cumack, 3096 tp1->rec.data.TSN_seq, 3097 0, 3098 0, 3099 SCTP_LOG_TSN_REVOKED); 3100 } 3101 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3102 /* it has been re-acked in this SACK */ 3103 tp1->sent = SCTP_DATAGRAM_ACKED; 3104 } 3105 } 3106 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3107 break; 3108 tp1 = TAILQ_NEXT(tp1, sctp_next); 3109 } 3110 if (tot_revoked > 0) { 3111 /* 3112 * Setup the ecn nonce re-sync point. We do this since once 3113 * data is revoked we begin to retransmit things, which do 3114 * NOT have the ECN bits set. This means we are now out of 3115 * sync and must wait until we get back in sync with the 3116 * peer to check ECN bits. 3117 */ 3118 tp1 = TAILQ_FIRST(&asoc->send_queue); 3119 if (tp1 == NULL) { 3120 asoc->nonce_resync_tsn = asoc->sending_seq; 3121 } else { 3122 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 3123 } 3124 asoc->nonce_wait_for_ecne = 0; 3125 asoc->nonce_sum_check = 0; 3126 } 3127 } 3128 3129 static void 3130 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3131 u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved) 3132 { 3133 struct sctp_tmit_chunk *tp1; 3134 int strike_flag = 0; 3135 struct timeval now; 3136 int tot_retrans = 0; 3137 uint32_t sending_seq; 3138 struct sctp_nets *net; 3139 int num_dests_sacked = 0; 3140 3141 /* 3142 * select the sending_seq, this is either the next thing ready to be 3143 * sent but not transmitted, OR, the next seq we assign. 3144 */ 3145 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3146 if (tp1 == NULL) { 3147 sending_seq = asoc->sending_seq; 3148 } else { 3149 sending_seq = tp1->rec.data.TSN_seq; 3150 } 3151 3152 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3153 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3154 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3155 if (net->saw_newack) 3156 num_dests_sacked++; 3157 } 3158 } 3159 if (stcb->asoc.peer_supports_prsctp) { 3160 (void)SCTP_GETTIME_TIMEVAL(&now); 3161 } 3162 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3163 while (tp1) { 3164 strike_flag = 0; 3165 if (tp1->no_fr_allowed) { 3166 /* this one had a timeout or something */ 3167 tp1 = TAILQ_NEXT(tp1, sctp_next); 3168 continue; 3169 } 3170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3171 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3172 sctp_log_fr(biggest_tsn_newly_acked, 3173 tp1->rec.data.TSN_seq, 3174 tp1->sent, 3175 SCTP_FR_LOG_CHECK_STRIKE); 3176 } 3177 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 3178 MAX_TSN) || 3179 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3180 /* done */ 3181 break; 3182 } 3183 if (stcb->asoc.peer_supports_prsctp) { 3184 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3185 /* Is it expired? */ 3186 if ( 3187 /* 3188 * TODO sctp_constants.h needs alternative 3189 * time macros when _KERNEL is undefined. 3190 */ 3191 (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) 3192 ) { 3193 /* Yes so drop it */ 3194 if (tp1->data != NULL) { 3195 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3196 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3197 &asoc->sent_queue, SCTP_SO_NOT_LOCKED); 3198 } 3199 tp1 = TAILQ_NEXT(tp1, sctp_next); 3200 continue; 3201 } 3202 } 3203 if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3204 /* Has it been retransmitted tv_sec times? */ 3205 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3206 /* Yes, so drop it */ 3207 if (tp1->data != NULL) { 3208 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3209 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3210 &asoc->sent_queue, SCTP_SO_NOT_LOCKED); 3211 } 3212 tp1 = TAILQ_NEXT(tp1, sctp_next); 3213 continue; 3214 } 3215 } 3216 } 3217 if (compare_with_wrap(tp1->rec.data.TSN_seq, 3218 asoc->this_sack_highest_gap, MAX_TSN)) { 3219 /* we are beyond the tsn in the sack */ 3220 break; 3221 } 3222 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3223 /* either a RESEND, ACKED, or MARKED */ 3224 /* skip */ 3225 tp1 = TAILQ_NEXT(tp1, sctp_next); 3226 continue; 3227 } 3228 /* 3229 * CMT : SFR algo (covers part of DAC and HTNA as well) 3230 */ 3231 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3232 /* 3233 * No new acks were receieved for data sent to this 3234 * dest. Therefore, according to the SFR algo for 3235 * CMT, no data sent to this dest can be marked for 3236 * FR using this SACK. 3237 */ 3238 tp1 = TAILQ_NEXT(tp1, sctp_next); 3239 continue; 3240 } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq, 3241 tp1->whoTo->this_sack_highest_newack, MAX_TSN)) { 3242 /* 3243 * CMT: New acks were receieved for data sent to 3244 * this dest. But no new acks were seen for data 3245 * sent after tp1. Therefore, according to the SFR 3246 * algo for CMT, tp1 cannot be marked for FR using 3247 * this SACK. This step covers part of the DAC algo 3248 * and the HTNA algo as well. 3249 */ 3250 tp1 = TAILQ_NEXT(tp1, sctp_next); 3251 continue; 3252 } 3253 /* 3254 * Here we check to see if we were have already done a FR 3255 * and if so we see if the biggest TSN we saw in the sack is 3256 * smaller than the recovery point. If so we don't strike 3257 * the tsn... otherwise we CAN strike the TSN. 3258 */ 3259 /* 3260 * @@@ JRI: Check for CMT if (accum_moved && 3261 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3262 * 0)) { 3263 */ 3264 if (accum_moved && asoc->fast_retran_loss_recovery) { 3265 /* 3266 * Strike the TSN if in fast-recovery and cum-ack 3267 * moved. 3268 */ 3269 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3270 sctp_log_fr(biggest_tsn_newly_acked, 3271 tp1->rec.data.TSN_seq, 3272 tp1->sent, 3273 SCTP_FR_LOG_STRIKE_CHUNK); 3274 } 3275 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3276 tp1->sent++; 3277 } 3278 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3279 /* 3280 * CMT DAC algorithm: If SACK flag is set to 3281 * 0, then lowest_newack test will not pass 3282 * because it would have been set to the 3283 * cumack earlier. If not already to be 3284 * rtx'd, If not a mixed sack and if tp1 is 3285 * not between two sacked TSNs, then mark by 3286 * one more. NOTE that we are marking by one 3287 * additional time since the SACK DAC flag 3288 * indicates that two packets have been 3289 * received after this missing TSN. 3290 */ 3291 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3292 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3294 sctp_log_fr(16 + num_dests_sacked, 3295 tp1->rec.data.TSN_seq, 3296 tp1->sent, 3297 SCTP_FR_LOG_STRIKE_CHUNK); 3298 } 3299 tp1->sent++; 3300 } 3301 } 3302 } else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) { 3303 /* 3304 * For those that have done a FR we must take 3305 * special consideration if we strike. I.e the 3306 * biggest_newly_acked must be higher than the 3307 * sending_seq at the time we did the FR. 3308 */ 3309 if ( 3310 #ifdef SCTP_FR_TO_ALTERNATE 3311 /* 3312 * If FR's go to new networks, then we must only do 3313 * this for singly homed asoc's. However if the FR's 3314 * go to the same network (Armando's work) then its 3315 * ok to FR multiple times. 3316 */ 3317 (asoc->numnets < 2) 3318 #else 3319 (1) 3320 #endif 3321 ) { 3322 3323 if ((compare_with_wrap(biggest_tsn_newly_acked, 3324 tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3325 (biggest_tsn_newly_acked == 3326 tp1->rec.data.fast_retran_tsn)) { 3327 /* 3328 * Strike the TSN, since this ack is 3329 * beyond where things were when we 3330 * did a FR. 3331 */ 3332 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3333 sctp_log_fr(biggest_tsn_newly_acked, 3334 tp1->rec.data.TSN_seq, 3335 tp1->sent, 3336 SCTP_FR_LOG_STRIKE_CHUNK); 3337 } 3338 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3339 tp1->sent++; 3340 } 3341 strike_flag = 1; 3342 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3343 /* 3344 * CMT DAC algorithm: If 3345 * SACK flag is set to 0, 3346 * then lowest_newack test 3347 * will not pass because it 3348 * would have been set to 3349 * the cumack earlier. If 3350 * not already to be rtx'd, 3351 * If not a mixed sack and 3352 * if tp1 is not between two 3353 * sacked TSNs, then mark by 3354 * one more. NOTE that we 3355 * are marking by one 3356 * additional time since the 3357 * SACK DAC flag indicates 3358 * that two packets have 3359 * been received after this 3360 * missing TSN. 3361 */ 3362 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3363 (num_dests_sacked == 1) && 3364 compare_with_wrap(this_sack_lowest_newack, 3365 tp1->rec.data.TSN_seq, MAX_TSN)) { 3366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3367 sctp_log_fr(32 + num_dests_sacked, 3368 tp1->rec.data.TSN_seq, 3369 tp1->sent, 3370 SCTP_FR_LOG_STRIKE_CHUNK); 3371 } 3372 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3373 tp1->sent++; 3374 } 3375 } 3376 } 3377 } 3378 } 3379 /* 3380 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3381 * algo covers HTNA. 3382 */ 3383 } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3384 biggest_tsn_newly_acked, MAX_TSN)) { 3385 /* 3386 * We don't strike these: This is the HTNA 3387 * algorithm i.e. we don't strike If our TSN is 3388 * larger than the Highest TSN Newly Acked. 3389 */ 3390 ; 3391 } else { 3392 /* Strike the TSN */ 3393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3394 sctp_log_fr(biggest_tsn_newly_acked, 3395 tp1->rec.data.TSN_seq, 3396 tp1->sent, 3397 SCTP_FR_LOG_STRIKE_CHUNK); 3398 } 3399 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3400 tp1->sent++; 3401 } 3402 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3403 /* 3404 * CMT DAC algorithm: If SACK flag is set to 3405 * 0, then lowest_newack test will not pass 3406 * because it would have been set to the 3407 * cumack earlier. If not already to be 3408 * rtx'd, If not a mixed sack and if tp1 is 3409 * not between two sacked TSNs, then mark by 3410 * one more. NOTE that we are marking by one 3411 * additional time since the SACK DAC flag 3412 * indicates that two packets have been 3413 * received after this missing TSN. 3414 */ 3415 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3416 compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) { 3417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3418 sctp_log_fr(48 + num_dests_sacked, 3419 tp1->rec.data.TSN_seq, 3420 tp1->sent, 3421 SCTP_FR_LOG_STRIKE_CHUNK); 3422 } 3423 tp1->sent++; 3424 } 3425 } 3426 } 3427 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3428 /* Increment the count to resend */ 3429 struct sctp_nets *alt; 3430 3431 /* printf("OK, we are now ready to FR this guy\n"); */ 3432 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3433 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3434 0, SCTP_FR_MARKED); 3435 } 3436 if (strike_flag) { 3437 /* This is a subsequent FR */ 3438 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3439 } 3440 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3441 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 3442 /* 3443 * CMT: Using RTX_SSTHRESH policy for CMT. 3444 * If CMT is being used, then pick dest with 3445 * largest ssthresh for any retransmission. 3446 */ 3447 tp1->no_fr_allowed = 1; 3448 alt = tp1->whoTo; 3449 /* sa_ignore NO_NULL_CHK */ 3450 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 3451 /* 3452 * JRS 5/18/07 - If CMT PF is on, 3453 * use the PF version of 3454 * find_alt_net() 3455 */ 3456 alt = sctp_find_alternate_net(stcb, alt, 2); 3457 } else { 3458 /* 3459 * JRS 5/18/07 - If only CMT is on, 3460 * use the CMT version of 3461 * find_alt_net() 3462 */ 3463 /* sa_ignore NO_NULL_CHK */ 3464 alt = sctp_find_alternate_net(stcb, alt, 1); 3465 } 3466 if (alt == NULL) { 3467 alt = tp1->whoTo; 3468 } 3469 /* 3470 * CUCv2: If a different dest is picked for 3471 * the retransmission, then new 3472 * (rtx-)pseudo_cumack needs to be tracked 3473 * for orig dest. Let CUCv2 track new (rtx-) 3474 * pseudo-cumack always. 3475 */ 3476 if (tp1->whoTo) { 3477 tp1->whoTo->find_pseudo_cumack = 1; 3478 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3479 } 3480 } else {/* CMT is OFF */ 3481 3482 #ifdef SCTP_FR_TO_ALTERNATE 3483 /* Can we find an alternate? */ 3484 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3485 #else 3486 /* 3487 * default behavior is to NOT retransmit 3488 * FR's to an alternate. Armando Caro's 3489 * paper details why. 3490 */ 3491 alt = tp1->whoTo; 3492 #endif 3493 } 3494 3495 tp1->rec.data.doing_fast_retransmit = 1; 3496 tot_retrans++; 3497 /* mark the sending seq for possible subsequent FR's */ 3498 /* 3499 * printf("Marking TSN for FR new value %x\n", 3500 * (uint32_t)tpi->rec.data.TSN_seq); 3501 */ 3502 if (TAILQ_EMPTY(&asoc->send_queue)) { 3503 /* 3504 * If the queue of send is empty then its 3505 * the next sequence number that will be 3506 * assigned so we subtract one from this to 3507 * get the one we last sent. 3508 */ 3509 tp1->rec.data.fast_retran_tsn = sending_seq; 3510 } else { 3511 /* 3512 * If there are chunks on the send queue 3513 * (unsent data that has made it from the 3514 * stream queues but not out the door, we 3515 * take the first one (which will have the 3516 * lowest TSN) and subtract one to get the 3517 * one we last sent. 3518 */ 3519 struct sctp_tmit_chunk *ttt; 3520 3521 ttt = TAILQ_FIRST(&asoc->send_queue); 3522 tp1->rec.data.fast_retran_tsn = 3523 ttt->rec.data.TSN_seq; 3524 } 3525 3526 if (tp1->do_rtt) { 3527 /* 3528 * this guy had a RTO calculation pending on 3529 * it, cancel it 3530 */ 3531 tp1->do_rtt = 0; 3532 } 3533 /* fix counts and things */ 3534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3535 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3536 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3537 tp1->book_size, 3538 (uintptr_t) tp1->whoTo, 3539 tp1->rec.data.TSN_seq); 3540 } 3541 if (tp1->whoTo) { 3542 tp1->whoTo->net_ack++; 3543 sctp_flight_size_decrease(tp1); 3544 } 3545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3546 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3547 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3548 } 3549 /* add back to the rwnd */ 3550 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3551 3552 /* remove from the total flight */ 3553 sctp_total_flight_decrease(stcb, tp1); 3554 if (alt != tp1->whoTo) { 3555 /* yes, there is an alternate. */ 3556 sctp_free_remote_addr(tp1->whoTo); 3557 /* sa_ignore FREED_MEMORY */ 3558 tp1->whoTo = alt; 3559 atomic_add_int(&alt->ref_count, 1); 3560 } 3561 } 3562 tp1 = TAILQ_NEXT(tp1, sctp_next); 3563 } /* while (tp1) */ 3564 3565 if (tot_retrans > 0) { 3566 /* 3567 * Setup the ecn nonce re-sync point. We do this since once 3568 * we go to FR something we introduce a Karn's rule scenario 3569 * and won't know the totals for the ECN bits. 3570 */ 3571 asoc->nonce_resync_tsn = sending_seq; 3572 asoc->nonce_wait_for_ecne = 0; 3573 asoc->nonce_sum_check = 0; 3574 } 3575 } 3576 3577 struct sctp_tmit_chunk * 3578 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3579 struct sctp_association *asoc) 3580 { 3581 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3582 struct timeval now; 3583 int now_filled = 0; 3584 3585 if (asoc->peer_supports_prsctp == 0) { 3586 return (NULL); 3587 } 3588 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3589 while (tp1) { 3590 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3591 tp1->sent != SCTP_DATAGRAM_RESEND) { 3592 /* no chance to advance, out of here */ 3593 break; 3594 } 3595 if (!PR_SCTP_ENABLED(tp1->flags)) { 3596 /* 3597 * We can't fwd-tsn past any that are reliable aka 3598 * retransmitted until the asoc fails. 3599 */ 3600 break; 3601 } 3602 if (!now_filled) { 3603 (void)SCTP_GETTIME_TIMEVAL(&now); 3604 now_filled = 1; 3605 } 3606 tp2 = TAILQ_NEXT(tp1, sctp_next); 3607 /* 3608 * now we got a chunk which is marked for another 3609 * retransmission to a PR-stream but has run out its chances 3610 * already maybe OR has been marked to skip now. Can we skip 3611 * it if its a resend? 3612 */ 3613 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3614 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3615 /* 3616 * Now is this one marked for resend and its time is 3617 * now up? 3618 */ 3619 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3620 /* Yes so drop it */ 3621 if (tp1->data) { 3622 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3623 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3624 &asoc->sent_queue, SCTP_SO_NOT_LOCKED); 3625 } 3626 } else { 3627 /* 3628 * No, we are done when hit one for resend 3629 * whos time as not expired. 3630 */ 3631 break; 3632 } 3633 } 3634 /* 3635 * Ok now if this chunk is marked to drop it we can clean up 3636 * the chunk, advance our peer ack point and we can check 3637 * the next chunk. 3638 */ 3639 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3640 /* advance PeerAckPoint goes forward */ 3641 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3642 a_adv = tp1; 3643 /* 3644 * we don't want to de-queue it here. Just wait for 3645 * the next peer SACK to come with a new cumTSN and 3646 * then the chunk will be droped in the normal 3647 * fashion. 3648 */ 3649 if (tp1->data) { 3650 sctp_free_bufspace(stcb, asoc, tp1, 1); 3651 /* 3652 * Maybe there should be another 3653 * notification type 3654 */ 3655 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3656 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 3657 tp1, SCTP_SO_NOT_LOCKED); 3658 sctp_m_freem(tp1->data); 3659 tp1->data = NULL; 3660 if (stcb->sctp_socket) { 3661 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3662 struct socket *so; 3663 3664 so = SCTP_INP_SO(stcb->sctp_ep); 3665 atomic_add_int(&stcb->asoc.refcnt, 1); 3666 SCTP_TCB_UNLOCK(stcb); 3667 SCTP_SOCKET_LOCK(so, 1); 3668 SCTP_TCB_LOCK(stcb); 3669 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3670 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3671 /* 3672 * assoc was freed while we 3673 * were unlocked 3674 */ 3675 SCTP_SOCKET_UNLOCK(so, 1); 3676 return (NULL); 3677 } 3678 #endif 3679 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 3680 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3681 SCTP_SOCKET_UNLOCK(so, 1); 3682 #endif 3683 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 3684 sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN); 3685 } 3686 } 3687 } 3688 } else { 3689 /* 3690 * If it is still in RESEND we can advance no 3691 * further 3692 */ 3693 break; 3694 } 3695 /* 3696 * If we hit here we just dumped tp1, move to next tsn on 3697 * sent queue. 3698 */ 3699 tp1 = tp2; 3700 } 3701 return (a_adv); 3702 } 3703 3704 static void 3705 sctp_fs_audit(struct sctp_association *asoc) 3706 { 3707 struct sctp_tmit_chunk *chk; 3708 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3709 3710 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3711 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3712 inflight++; 3713 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3714 resend++; 3715 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3716 inbetween++; 3717 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3718 above++; 3719 } else { 3720 acked++; 3721 } 3722 } 3723 3724 if ((inflight > 0) || (inbetween > 0)) { 3725 #ifdef INVARIANTS 3726 panic("Flight size-express incorrect? \n"); 3727 #else 3728 SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n", 3729 inflight, inbetween); 3730 #endif 3731 } 3732 } 3733 3734 3735 static void 3736 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3737 struct sctp_association *asoc, 3738 struct sctp_nets *net, 3739 struct sctp_tmit_chunk *tp1) 3740 { 3741 struct sctp_tmit_chunk *chk; 3742 3743 /* First setup this one and get it moved back */ 3744 tp1->sent = SCTP_DATAGRAM_UNSENT; 3745 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3746 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3747 tp1->whoTo->flight_size, 3748 tp1->book_size, 3749 (uintptr_t) tp1->whoTo, 3750 tp1->rec.data.TSN_seq); 3751 } 3752 sctp_flight_size_decrease(tp1); 3753 sctp_total_flight_decrease(stcb, tp1); 3754 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3755 TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next); 3756 asoc->sent_queue_cnt--; 3757 asoc->send_queue_cnt++; 3758 /* 3759 * Now all guys marked for RESEND on the sent_queue must be moved 3760 * back too. 3761 */ 3762 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3763 if (chk->sent == SCTP_DATAGRAM_RESEND) { 3764 /* Another chunk to move */ 3765 chk->sent = SCTP_DATAGRAM_UNSENT; 3766 /* It should not be in flight */ 3767 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3768 TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next); 3769 asoc->sent_queue_cnt--; 3770 asoc->send_queue_cnt++; 3771 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3772 } 3773 } 3774 } 3775 3776 void 3777 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3778 uint32_t rwnd, int nonce_sum_flag, int *abort_now) 3779 { 3780 struct sctp_nets *net; 3781 struct sctp_association *asoc; 3782 struct sctp_tmit_chunk *tp1, *tp2; 3783 uint32_t old_rwnd; 3784 int win_probe_recovery = 0; 3785 int win_probe_recovered = 0; 3786 int j, done_once = 0; 3787 3788 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3789 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3790 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3791 } 3792 SCTP_TCB_LOCK_ASSERT(stcb); 3793 #ifdef SCTP_ASOCLOG_OF_TSNS 3794 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3795 stcb->asoc.cumack_log_at++; 3796 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3797 stcb->asoc.cumack_log_at = 0; 3798 } 3799 #endif 3800 asoc = &stcb->asoc; 3801 old_rwnd = asoc->peers_rwnd; 3802 if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) { 3803 /* old ack */ 3804 return; 3805 } else if (asoc->last_acked_seq == cumack) { 3806 /* Window update sack */ 3807 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3808 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3809 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3810 /* SWS sender side engages */ 3811 asoc->peers_rwnd = 0; 3812 } 3813 if (asoc->peers_rwnd > old_rwnd) { 3814 goto again; 3815 } 3816 return; 3817 } 3818 /* First setup for CC stuff */ 3819 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3820 net->prev_cwnd = net->cwnd; 3821 net->net_ack = 0; 3822 net->net_ack2 = 0; 3823 3824 /* 3825 * CMT: Reset CUC and Fast recovery algo variables before 3826 * SACK processing 3827 */ 3828 net->new_pseudo_cumack = 0; 3829 net->will_exit_fast_recovery = 0; 3830 } 3831 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 3832 uint32_t send_s; 3833 3834 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3835 tp1 = TAILQ_LAST(&asoc->sent_queue, 3836 sctpchunk_listhead); 3837 send_s = tp1->rec.data.TSN_seq + 1; 3838 } else { 3839 send_s = asoc->sending_seq; 3840 } 3841 if ((cumack == send_s) || 3842 compare_with_wrap(cumack, send_s, MAX_TSN)) { 3843 #ifndef INVARIANTS 3844 struct mbuf *oper; 3845 3846 #endif 3847 #ifdef INVARIANTS 3848 panic("Impossible sack 1"); 3849 #else 3850 *abort_now = 1; 3851 /* XXX */ 3852 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 3853 0, M_DONTWAIT, 1, MT_DATA); 3854 if (oper) { 3855 struct sctp_paramhdr *ph; 3856 uint32_t *ippp; 3857 3858 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 3859 sizeof(uint32_t); 3860 ph = mtod(oper, struct sctp_paramhdr *); 3861 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 3862 ph->param_length = htons(SCTP_BUF_LEN(oper)); 3863 ippp = (uint32_t *) (ph + 1); 3864 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 3865 } 3866 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 3867 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 3868 return; 3869 #endif 3870 } 3871 } 3872 asoc->this_sack_highest_gap = cumack; 3873 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 3874 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3875 stcb->asoc.overall_error_count, 3876 0, 3877 SCTP_FROM_SCTP_INDATA, 3878 __LINE__); 3879 } 3880 stcb->asoc.overall_error_count = 0; 3881 if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) { 3882 /* process the new consecutive TSN first */ 3883 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3884 while (tp1) { 3885 tp2 = TAILQ_NEXT(tp1, sctp_next); 3886 if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq, 3887 MAX_TSN) || 3888 cumack == tp1->rec.data.TSN_seq) { 3889 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3890 printf("Warning, an unsent is now acked?\n"); 3891 } 3892 /* 3893 * ECN Nonce: Add the nonce to the sender's 3894 * nonce sum 3895 */ 3896 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 3897 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3898 /* 3899 * If it is less than ACKED, it is 3900 * now no-longer in flight. Higher 3901 * values may occur during marking 3902 */ 3903 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3905 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3906 tp1->whoTo->flight_size, 3907 tp1->book_size, 3908 (uintptr_t) tp1->whoTo, 3909 tp1->rec.data.TSN_seq); 3910 } 3911 sctp_flight_size_decrease(tp1); 3912 /* sa_ignore NO_NULL_CHK */ 3913 sctp_total_flight_decrease(stcb, tp1); 3914 } 3915 tp1->whoTo->net_ack += tp1->send_size; 3916 if (tp1->snd_count < 2) { 3917 /* 3918 * True non-retransmited 3919 * chunk 3920 */ 3921 tp1->whoTo->net_ack2 += 3922 tp1->send_size; 3923 3924 /* update RTO too? */ 3925 if (tp1->do_rtt) { 3926 tp1->whoTo->RTO = 3927 /* 3928 * sa_ignore 3929 * NO_NULL_CHK 3930 */ 3931 sctp_calculate_rto(stcb, 3932 asoc, tp1->whoTo, 3933 &tp1->sent_rcv_time, 3934 sctp_align_safe_nocopy); 3935 tp1->do_rtt = 0; 3936 } 3937 } 3938 /* 3939 * CMT: CUCv2 algorithm. From the 3940 * cumack'd TSNs, for each TSN being 3941 * acked for the first time, set the 3942 * following variables for the 3943 * corresp destination. 3944 * new_pseudo_cumack will trigger a 3945 * cwnd update. 3946 * find_(rtx_)pseudo_cumack will 3947 * trigger search for the next 3948 * expected (rtx-)pseudo-cumack. 3949 */ 3950 tp1->whoTo->new_pseudo_cumack = 1; 3951 tp1->whoTo->find_pseudo_cumack = 1; 3952 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3953 3954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3955 /* sa_ignore NO_NULL_CHK */ 3956 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3957 } 3958 } 3959 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3960 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3961 } 3962 if (tp1->rec.data.chunk_was_revoked) { 3963 /* deflate the cwnd */ 3964 tp1->whoTo->cwnd -= tp1->book_size; 3965 tp1->rec.data.chunk_was_revoked = 0; 3966 } 3967 tp1->sent = SCTP_DATAGRAM_ACKED; 3968 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3969 if (tp1->data) { 3970 /* sa_ignore NO_NULL_CHK */ 3971 sctp_free_bufspace(stcb, asoc, tp1, 1); 3972 sctp_m_freem(tp1->data); 3973 } 3974 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3975 sctp_log_sack(asoc->last_acked_seq, 3976 cumack, 3977 tp1->rec.data.TSN_seq, 3978 0, 3979 0, 3980 SCTP_LOG_FREE_SENT); 3981 } 3982 tp1->data = NULL; 3983 asoc->sent_queue_cnt--; 3984 sctp_free_a_chunk(stcb, tp1); 3985 tp1 = tp2; 3986 } else { 3987 break; 3988 } 3989 } 3990 3991 } 3992 /* sa_ignore NO_NULL_CHK */ 3993 if (stcb->sctp_socket) { 3994 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3995 struct socket *so; 3996 3997 #endif 3998 3999 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4000 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4001 /* sa_ignore NO_NULL_CHK */ 4002 sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK); 4003 } 4004 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4005 so = SCTP_INP_SO(stcb->sctp_ep); 4006 atomic_add_int(&stcb->asoc.refcnt, 1); 4007 SCTP_TCB_UNLOCK(stcb); 4008 SCTP_SOCKET_LOCK(so, 1); 4009 SCTP_TCB_LOCK(stcb); 4010 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4011 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4012 /* assoc was freed while we were unlocked */ 4013 SCTP_SOCKET_UNLOCK(so, 1); 4014 return; 4015 } 4016 #endif 4017 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4018 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4019 SCTP_SOCKET_UNLOCK(so, 1); 4020 #endif 4021 } else { 4022 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4023 sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK); 4024 } 4025 } 4026 4027 /* JRS - Use the congestion control given in the CC module */ 4028 if (asoc->last_acked_seq != cumack) 4029 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4030 4031 asoc->last_acked_seq = cumack; 4032 4033 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4034 /* nothing left in-flight */ 4035 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4036 net->flight_size = 0; 4037 net->partial_bytes_acked = 0; 4038 } 4039 asoc->total_flight = 0; 4040 asoc->total_flight_count = 0; 4041 } 4042 /* Fix up the a-p-a-p for future PR-SCTP sends */ 4043 if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4044 asoc->advanced_peer_ack_point = cumack; 4045 } 4046 /* ECN Nonce updates */ 4047 if (asoc->ecn_nonce_allowed) { 4048 if (asoc->nonce_sum_check) { 4049 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) { 4050 if (asoc->nonce_wait_for_ecne == 0) { 4051 struct sctp_tmit_chunk *lchk; 4052 4053 lchk = TAILQ_FIRST(&asoc->send_queue); 4054 asoc->nonce_wait_for_ecne = 1; 4055 if (lchk) { 4056 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4057 } else { 4058 asoc->nonce_wait_tsn = asoc->sending_seq; 4059 } 4060 } else { 4061 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4062 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4063 /* 4064 * Misbehaving peer. We need 4065 * to react to this guy 4066 */ 4067 asoc->ecn_allowed = 0; 4068 asoc->ecn_nonce_allowed = 0; 4069 } 4070 } 4071 } 4072 } else { 4073 /* See if Resynchronization Possible */ 4074 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4075 asoc->nonce_sum_check = 1; 4076 /* 4077 * now we must calculate what the base is. 4078 * We do this based on two things, we know 4079 * the total's for all the segments 4080 * gap-acked in the SACK (none), We also 4081 * know the SACK's nonce sum, its in 4082 * nonce_sum_flag. So we can build a truth 4083 * table to back-calculate the new value of 4084 * asoc->nonce_sum_expect_base: 4085 * 4086 * SACK-flag-Value Seg-Sums Base 0 0 0 4087 * 1 0 1 0 1 1 1 4088 * 1 0 4089 */ 4090 asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4091 } 4092 } 4093 } 4094 /* RWND update */ 4095 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4096 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4097 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4098 /* SWS sender side engages */ 4099 asoc->peers_rwnd = 0; 4100 } 4101 if (asoc->peers_rwnd > old_rwnd) { 4102 win_probe_recovery = 1; 4103 } 4104 /* Now assure a timer where data is queued at */ 4105 again: 4106 j = 0; 4107 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4108 if (win_probe_recovery && (net->window_probe)) { 4109 net->window_probe = 0; 4110 win_probe_recovered = 1; 4111 /* 4112 * Find first chunk that was used with window probe 4113 * and clear the sent 4114 */ 4115 /* sa_ignore FREED_MEMORY */ 4116 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4117 if (tp1->window_probe) { 4118 /* move back to data send queue */ 4119 sctp_window_probe_recovery(stcb, asoc, net, tp1); 4120 break; 4121 } 4122 } 4123 } 4124 if (net->flight_size) { 4125 int to_ticks; 4126 4127 if (net->RTO == 0) { 4128 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4129 } else { 4130 to_ticks = MSEC_TO_TICKS(net->RTO); 4131 } 4132 j++; 4133 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4134 sctp_timeout_handler, &net->rxt_timer); 4135 } else { 4136 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4137 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4138 stcb, net, 4139 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4140 } 4141 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 4142 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4143 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4144 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4145 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4146 } 4147 } 4148 } 4149 } 4150 if ((j == 0) && 4151 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4152 (asoc->sent_queue_retran_cnt == 0) && 4153 (win_probe_recovered == 0) && 4154 (done_once == 0)) { 4155 /* huh, this should not happen */ 4156 sctp_fs_audit(asoc); 4157 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4158 net->flight_size = 0; 4159 } 4160 asoc->total_flight = 0; 4161 asoc->total_flight_count = 0; 4162 asoc->sent_queue_retran_cnt = 0; 4163 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4164 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4165 sctp_flight_size_increase(tp1); 4166 sctp_total_flight_increase(stcb, tp1); 4167 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4168 asoc->sent_queue_retran_cnt++; 4169 } 4170 } 4171 done_once = 1; 4172 goto again; 4173 } 4174 /**********************************/ 4175 /* Now what about shutdown issues */ 4176 /**********************************/ 4177 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4178 /* nothing left on sendqueue.. consider done */ 4179 /* clean up */ 4180 if ((asoc->stream_queue_cnt == 1) && 4181 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4182 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4183 (asoc->locked_on_sending) 4184 ) { 4185 struct sctp_stream_queue_pending *sp; 4186 4187 /* 4188 * I may be in a state where we got all across.. but 4189 * cannot write more due to a shutdown... we abort 4190 * since the user did not indicate EOR in this case. 4191 * The sp will be cleaned during free of the asoc. 4192 */ 4193 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4194 sctp_streamhead); 4195 if ((sp) && (sp->length == 0)) { 4196 /* Let cleanup code purge it */ 4197 if (sp->msg_is_complete) { 4198 asoc->stream_queue_cnt--; 4199 } else { 4200 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4201 asoc->locked_on_sending = NULL; 4202 asoc->stream_queue_cnt--; 4203 } 4204 } 4205 } 4206 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4207 (asoc->stream_queue_cnt == 0)) { 4208 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4209 /* Need to abort here */ 4210 struct mbuf *oper; 4211 4212 abort_out_now: 4213 *abort_now = 1; 4214 /* XXX */ 4215 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4216 0, M_DONTWAIT, 1, MT_DATA); 4217 if (oper) { 4218 struct sctp_paramhdr *ph; 4219 uint32_t *ippp; 4220 4221 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4222 sizeof(uint32_t); 4223 ph = mtod(oper, struct sctp_paramhdr *); 4224 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4225 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4226 ippp = (uint32_t *) (ph + 1); 4227 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4228 } 4229 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4230 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED); 4231 } else { 4232 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4233 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4234 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4235 } 4236 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4237 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4238 sctp_stop_timers_for_shutdown(stcb); 4239 sctp_send_shutdown(stcb, 4240 stcb->asoc.primary_destination); 4241 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4242 stcb->sctp_ep, stcb, asoc->primary_destination); 4243 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4244 stcb->sctp_ep, stcb, asoc->primary_destination); 4245 } 4246 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4247 (asoc->stream_queue_cnt == 0)) { 4248 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4249 goto abort_out_now; 4250 } 4251 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4252 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4253 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4254 sctp_send_shutdown_ack(stcb, 4255 stcb->asoc.primary_destination); 4256 4257 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4258 stcb->sctp_ep, stcb, asoc->primary_destination); 4259 } 4260 } 4261 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4262 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4263 rwnd, 4264 stcb->asoc.peers_rwnd, 4265 stcb->asoc.total_flight, 4266 stcb->asoc.total_output_queue_size); 4267 } 4268 } 4269 4270 void 4271 sctp_handle_sack(struct mbuf *m, int offset, 4272 struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 4273 struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd) 4274 { 4275 struct sctp_association *asoc; 4276 struct sctp_sack *sack; 4277 struct sctp_tmit_chunk *tp1, *tp2; 4278 uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, 4279 this_sack_lowest_newack; 4280 uint32_t sav_cum_ack; 4281 uint16_t num_seg, num_dup; 4282 uint16_t wake_him = 0; 4283 unsigned int sack_length; 4284 uint32_t send_s = 0; 4285 long j; 4286 int accum_moved = 0; 4287 int will_exit_fast_recovery = 0; 4288 uint32_t a_rwnd, old_rwnd; 4289 int win_probe_recovery = 0; 4290 int win_probe_recovered = 0; 4291 struct sctp_nets *net = NULL; 4292 int nonce_sum_flag, ecn_seg_sums = 0; 4293 int done_once; 4294 uint8_t reneged_all = 0; 4295 uint8_t cmt_dac_flag; 4296 4297 /* 4298 * we take any chance we can to service our queues since we cannot 4299 * get awoken when the socket is read from :< 4300 */ 4301 /* 4302 * Now perform the actual SACK handling: 1) Verify that it is not an 4303 * old sack, if so discard. 2) If there is nothing left in the send 4304 * queue (cum-ack is equal to last acked) then you have a duplicate 4305 * too, update any rwnd change and verify no timers are running. 4306 * then return. 3) Process any new consequtive data i.e. cum-ack 4307 * moved process these first and note that it moved. 4) Process any 4308 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4309 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4310 * sync up flightsizes and things, stop all timers and also check 4311 * for shutdown_pending state. If so then go ahead and send off the 4312 * shutdown. If in shutdown recv, send off the shutdown-ack and 4313 * start that timer, Ret. 9) Strike any non-acked things and do FR 4314 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4315 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4316 * if in shutdown_recv state. 4317 */ 4318 SCTP_TCB_LOCK_ASSERT(stcb); 4319 sack = &ch->sack; 4320 /* CMT DAC algo */ 4321 this_sack_lowest_newack = 0; 4322 j = 0; 4323 sack_length = (unsigned int)sack_len; 4324 /* ECN Nonce */ 4325 SCTP_STAT_INCR(sctps_slowpath_sack); 4326 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 4327 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 4328 #ifdef SCTP_ASOCLOG_OF_TSNS 4329 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4330 stcb->asoc.cumack_log_at++; 4331 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4332 stcb->asoc.cumack_log_at = 0; 4333 } 4334 #endif 4335 num_seg = ntohs(sack->num_gap_ack_blks); 4336 a_rwnd = rwnd; 4337 4338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4339 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4340 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4341 } 4342 /* CMT DAC algo */ 4343 cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC; 4344 num_dup = ntohs(sack->num_dup_tsns); 4345 4346 old_rwnd = stcb->asoc.peers_rwnd; 4347 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4348 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4349 stcb->asoc.overall_error_count, 4350 0, 4351 SCTP_FROM_SCTP_INDATA, 4352 __LINE__); 4353 } 4354 stcb->asoc.overall_error_count = 0; 4355 asoc = &stcb->asoc; 4356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4357 sctp_log_sack(asoc->last_acked_seq, 4358 cum_ack, 4359 0, 4360 num_seg, 4361 num_dup, 4362 SCTP_LOG_NEW_SACK); 4363 } 4364 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) { 4365 int off_to_dup, iii; 4366 uint32_t *dupdata, dblock; 4367 4368 off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk); 4369 if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) { 4370 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup, 4371 sizeof(uint32_t), (uint8_t *) & dblock); 4372 off_to_dup += sizeof(uint32_t); 4373 if (dupdata) { 4374 for (iii = 0; iii < num_dup; iii++) { 4375 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4376 dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup, 4377 sizeof(uint32_t), (uint8_t *) & dblock); 4378 if (dupdata == NULL) 4379 break; 4380 off_to_dup += sizeof(uint32_t); 4381 } 4382 } 4383 } else { 4384 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n", 4385 off_to_dup, num_dup, sack_length, num_seg); 4386 } 4387 } 4388 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4389 /* reality check */ 4390 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4391 tp1 = TAILQ_LAST(&asoc->sent_queue, 4392 sctpchunk_listhead); 4393 send_s = tp1->rec.data.TSN_seq + 1; 4394 } else { 4395 send_s = asoc->sending_seq; 4396 } 4397 if (cum_ack == send_s || 4398 compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 4399 #ifndef INVARIANTS 4400 struct mbuf *oper; 4401 4402 #endif 4403 #ifdef INVARIANTS 4404 hopeless_peer: 4405 panic("Impossible sack 1"); 4406 #else 4407 4408 4409 /* 4410 * no way, we have not even sent this TSN out yet. 4411 * Peer is hopelessly messed up with us. 4412 */ 4413 hopeless_peer: 4414 *abort_now = 1; 4415 /* XXX */ 4416 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4417 0, M_DONTWAIT, 1, MT_DATA); 4418 if (oper) { 4419 struct sctp_paramhdr *ph; 4420 uint32_t *ippp; 4421 4422 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4423 sizeof(uint32_t); 4424 ph = mtod(oper, struct sctp_paramhdr *); 4425 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4426 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4427 ippp = (uint32_t *) (ph + 1); 4428 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4429 } 4430 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4431 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 4432 return; 4433 #endif 4434 } 4435 } 4436 /**********************/ 4437 /* 1) check the range */ 4438 /**********************/ 4439 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 4440 /* acking something behind */ 4441 return; 4442 } 4443 sav_cum_ack = asoc->last_acked_seq; 4444 4445 /* update the Rwnd of the peer */ 4446 if (TAILQ_EMPTY(&asoc->sent_queue) && 4447 TAILQ_EMPTY(&asoc->send_queue) && 4448 (asoc->stream_queue_cnt == 0) 4449 ) { 4450 /* nothing left on send/sent and strmq */ 4451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4452 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4453 asoc->peers_rwnd, 0, 0, a_rwnd); 4454 } 4455 asoc->peers_rwnd = a_rwnd; 4456 if (asoc->sent_queue_retran_cnt) { 4457 asoc->sent_queue_retran_cnt = 0; 4458 } 4459 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4460 /* SWS sender side engages */ 4461 asoc->peers_rwnd = 0; 4462 } 4463 /* stop any timers */ 4464 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4465 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4466 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4467 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 4468 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4469 SCTP_STAT_INCR(sctps_earlyfrstpidsck1); 4470 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4471 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4472 } 4473 } 4474 net->partial_bytes_acked = 0; 4475 net->flight_size = 0; 4476 } 4477 asoc->total_flight = 0; 4478 asoc->total_flight_count = 0; 4479 return; 4480 } 4481 /* 4482 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4483 * things. The total byte count acked is tracked in netAckSz AND 4484 * netAck2 is used to track the total bytes acked that are un- 4485 * amibguious and were never retransmitted. We track these on a per 4486 * destination address basis. 4487 */ 4488 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4489 net->prev_cwnd = net->cwnd; 4490 net->net_ack = 0; 4491 net->net_ack2 = 0; 4492 4493 /* 4494 * CMT: Reset CUC and Fast recovery algo variables before 4495 * SACK processing 4496 */ 4497 net->new_pseudo_cumack = 0; 4498 net->will_exit_fast_recovery = 0; 4499 } 4500 /* process the new consecutive TSN first */ 4501 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4502 while (tp1) { 4503 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 4504 MAX_TSN) || 4505 last_tsn == tp1->rec.data.TSN_seq) { 4506 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4507 /* 4508 * ECN Nonce: Add the nonce to the sender's 4509 * nonce sum 4510 */ 4511 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 4512 accum_moved = 1; 4513 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4514 /* 4515 * If it is less than ACKED, it is 4516 * now no-longer in flight. Higher 4517 * values may occur during marking 4518 */ 4519 if ((tp1->whoTo->dest_state & 4520 SCTP_ADDR_UNCONFIRMED) && 4521 (tp1->snd_count < 2)) { 4522 /* 4523 * If there was no retran 4524 * and the address is 4525 * un-confirmed and we sent 4526 * there and are now 4527 * sacked.. its confirmed, 4528 * mark it so. 4529 */ 4530 tp1->whoTo->dest_state &= 4531 ~SCTP_ADDR_UNCONFIRMED; 4532 } 4533 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4535 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4536 tp1->whoTo->flight_size, 4537 tp1->book_size, 4538 (uintptr_t) tp1->whoTo, 4539 tp1->rec.data.TSN_seq); 4540 } 4541 sctp_flight_size_decrease(tp1); 4542 sctp_total_flight_decrease(stcb, tp1); 4543 } 4544 tp1->whoTo->net_ack += tp1->send_size; 4545 4546 /* CMT SFR and DAC algos */ 4547 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4548 tp1->whoTo->saw_newack = 1; 4549 4550 if (tp1->snd_count < 2) { 4551 /* 4552 * True non-retransmited 4553 * chunk 4554 */ 4555 tp1->whoTo->net_ack2 += 4556 tp1->send_size; 4557 4558 /* update RTO too? */ 4559 if (tp1->do_rtt) { 4560 tp1->whoTo->RTO = 4561 sctp_calculate_rto(stcb, 4562 asoc, tp1->whoTo, 4563 &tp1->sent_rcv_time, 4564 sctp_align_safe_nocopy); 4565 tp1->do_rtt = 0; 4566 } 4567 } 4568 /* 4569 * CMT: CUCv2 algorithm. From the 4570 * cumack'd TSNs, for each TSN being 4571 * acked for the first time, set the 4572 * following variables for the 4573 * corresp destination. 4574 * new_pseudo_cumack will trigger a 4575 * cwnd update. 4576 * find_(rtx_)pseudo_cumack will 4577 * trigger search for the next 4578 * expected (rtx-)pseudo-cumack. 4579 */ 4580 tp1->whoTo->new_pseudo_cumack = 1; 4581 tp1->whoTo->find_pseudo_cumack = 1; 4582 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4583 4584 4585 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4586 sctp_log_sack(asoc->last_acked_seq, 4587 cum_ack, 4588 tp1->rec.data.TSN_seq, 4589 0, 4590 0, 4591 SCTP_LOG_TSN_ACKED); 4592 } 4593 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4594 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4595 } 4596 } 4597 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4598 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4599 #ifdef SCTP_AUDITING_ENABLED 4600 sctp_audit_log(0xB3, 4601 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4602 #endif 4603 } 4604 if (tp1->rec.data.chunk_was_revoked) { 4605 /* deflate the cwnd */ 4606 tp1->whoTo->cwnd -= tp1->book_size; 4607 tp1->rec.data.chunk_was_revoked = 0; 4608 } 4609 tp1->sent = SCTP_DATAGRAM_ACKED; 4610 } 4611 } else { 4612 break; 4613 } 4614 tp1 = TAILQ_NEXT(tp1, sctp_next); 4615 } 4616 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4617 /* always set this up to cum-ack */ 4618 asoc->this_sack_highest_gap = last_tsn; 4619 4620 /* Move offset up to point to gaps/dups */ 4621 offset += sizeof(struct sctp_sack_chunk); 4622 if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) { 4623 4624 /* skip corrupt segments */ 4625 goto skip_segments; 4626 } 4627 if (num_seg > 0) { 4628 4629 /* 4630 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4631 * to be greater than the cumack. Also reset saw_newack to 0 4632 * for all dests. 4633 */ 4634 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4635 net->saw_newack = 0; 4636 net->this_sack_highest_newack = last_tsn; 4637 } 4638 4639 /* 4640 * thisSackHighestGap will increase while handling NEW 4641 * segments this_sack_highest_newack will increase while 4642 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4643 * used for CMT DAC algo. saw_newack will also change. 4644 */ 4645 sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn, 4646 &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4647 num_seg, &ecn_seg_sums); 4648 4649 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4650 /* 4651 * validate the biggest_tsn_acked in the gap acks if 4652 * strict adherence is wanted. 4653 */ 4654 if ((biggest_tsn_acked == send_s) || 4655 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 4656 /* 4657 * peer is either confused or we are under 4658 * attack. We must abort. 4659 */ 4660 goto hopeless_peer; 4661 } 4662 } 4663 } 4664 skip_segments: 4665 /*******************************************/ 4666 /* cancel ALL T3-send timer if accum moved */ 4667 /*******************************************/ 4668 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 4669 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4670 if (net->new_pseudo_cumack) 4671 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4672 stcb, net, 4673 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4674 4675 } 4676 } else { 4677 if (accum_moved) { 4678 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4679 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4680 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4681 } 4682 } 4683 } 4684 /********************************************/ 4685 /* drop the acked chunks from the sendqueue */ 4686 /********************************************/ 4687 asoc->last_acked_seq = cum_ack; 4688 4689 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4690 if (tp1 == NULL) 4691 goto done_with_it; 4692 do { 4693 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 4694 MAX_TSN)) { 4695 break; 4696 } 4697 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4698 /* no more sent on list */ 4699 printf("Warning, tp1->sent == %d and its now acked?\n", 4700 tp1->sent); 4701 } 4702 tp2 = TAILQ_NEXT(tp1, sctp_next); 4703 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4704 if (tp1->pr_sctp_on) { 4705 if (asoc->pr_sctp_cnt != 0) 4706 asoc->pr_sctp_cnt--; 4707 } 4708 if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) && 4709 (asoc->total_flight > 0)) { 4710 #ifdef INVARIANTS 4711 panic("Warning flight size is postive and should be 0"); 4712 #else 4713 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4714 asoc->total_flight); 4715 #endif 4716 asoc->total_flight = 0; 4717 } 4718 if (tp1->data) { 4719 /* sa_ignore NO_NULL_CHK */ 4720 sctp_free_bufspace(stcb, asoc, tp1, 1); 4721 sctp_m_freem(tp1->data); 4722 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4723 asoc->sent_queue_cnt_removeable--; 4724 } 4725 } 4726 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4727 sctp_log_sack(asoc->last_acked_seq, 4728 cum_ack, 4729 tp1->rec.data.TSN_seq, 4730 0, 4731 0, 4732 SCTP_LOG_FREE_SENT); 4733 } 4734 tp1->data = NULL; 4735 asoc->sent_queue_cnt--; 4736 sctp_free_a_chunk(stcb, tp1); 4737 wake_him++; 4738 tp1 = tp2; 4739 } while (tp1 != NULL); 4740 4741 done_with_it: 4742 /* sa_ignore NO_NULL_CHK */ 4743 if ((wake_him) && (stcb->sctp_socket)) { 4744 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4745 struct socket *so; 4746 4747 #endif 4748 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4749 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4750 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK); 4751 } 4752 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4753 so = SCTP_INP_SO(stcb->sctp_ep); 4754 atomic_add_int(&stcb->asoc.refcnt, 1); 4755 SCTP_TCB_UNLOCK(stcb); 4756 SCTP_SOCKET_LOCK(so, 1); 4757 SCTP_TCB_LOCK(stcb); 4758 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4759 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4760 /* assoc was freed while we were unlocked */ 4761 SCTP_SOCKET_UNLOCK(so, 1); 4762 return; 4763 } 4764 #endif 4765 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4766 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4767 SCTP_SOCKET_UNLOCK(so, 1); 4768 #endif 4769 } else { 4770 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4771 sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK); 4772 } 4773 } 4774 4775 if (asoc->fast_retran_loss_recovery && accum_moved) { 4776 if (compare_with_wrap(asoc->last_acked_seq, 4777 asoc->fast_recovery_tsn, MAX_TSN) || 4778 asoc->last_acked_seq == asoc->fast_recovery_tsn) { 4779 /* Setup so we will exit RFC2582 fast recovery */ 4780 will_exit_fast_recovery = 1; 4781 } 4782 } 4783 /* 4784 * Check for revoked fragments: 4785 * 4786 * if Previous sack - Had no frags then we can't have any revoked if 4787 * Previous sack - Had frag's then - If we now have frags aka 4788 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4789 * some of them. else - The peer revoked all ACKED fragments, since 4790 * we had some before and now we have NONE. 4791 */ 4792 4793 if (num_seg) 4794 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4795 else if (asoc->saw_sack_with_frags) { 4796 int cnt_revoked = 0; 4797 4798 tp1 = TAILQ_FIRST(&asoc->sent_queue); 4799 if (tp1 != NULL) { 4800 /* Peer revoked all dg's marked or acked */ 4801 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4802 if ((tp1->sent > SCTP_DATAGRAM_RESEND) && 4803 (tp1->sent < SCTP_FORWARD_TSN_SKIP)) { 4804 tp1->sent = SCTP_DATAGRAM_SENT; 4805 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4806 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4807 tp1->whoTo->flight_size, 4808 tp1->book_size, 4809 (uintptr_t) tp1->whoTo, 4810 tp1->rec.data.TSN_seq); 4811 } 4812 sctp_flight_size_increase(tp1); 4813 sctp_total_flight_increase(stcb, tp1); 4814 tp1->rec.data.chunk_was_revoked = 1; 4815 /* 4816 * To ensure that this increase in 4817 * flightsize, which is artificial, 4818 * does not throttle the sender, we 4819 * also increase the cwnd 4820 * artificially. 4821 */ 4822 tp1->whoTo->cwnd += tp1->book_size; 4823 cnt_revoked++; 4824 } 4825 } 4826 if (cnt_revoked) { 4827 reneged_all = 1; 4828 } 4829 } 4830 asoc->saw_sack_with_frags = 0; 4831 } 4832 if (num_seg) 4833 asoc->saw_sack_with_frags = 1; 4834 else 4835 asoc->saw_sack_with_frags = 0; 4836 4837 /* JRS - Use the congestion control given in the CC module */ 4838 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4839 4840 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4841 /* nothing left in-flight */ 4842 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4843 /* stop all timers */ 4844 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 4845 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 4846 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 4847 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 4848 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4849 } 4850 } 4851 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4852 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4853 net->flight_size = 0; 4854 net->partial_bytes_acked = 0; 4855 } 4856 asoc->total_flight = 0; 4857 asoc->total_flight_count = 0; 4858 } 4859 /**********************************/ 4860 /* Now what about shutdown issues */ 4861 /**********************************/ 4862 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4863 /* nothing left on sendqueue.. consider done */ 4864 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4865 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4866 asoc->peers_rwnd, 0, 0, a_rwnd); 4867 } 4868 asoc->peers_rwnd = a_rwnd; 4869 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4870 /* SWS sender side engages */ 4871 asoc->peers_rwnd = 0; 4872 } 4873 /* clean up */ 4874 if ((asoc->stream_queue_cnt == 1) && 4875 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4876 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4877 (asoc->locked_on_sending) 4878 ) { 4879 struct sctp_stream_queue_pending *sp; 4880 4881 /* 4882 * I may be in a state where we got all across.. but 4883 * cannot write more due to a shutdown... we abort 4884 * since the user did not indicate EOR in this case. 4885 */ 4886 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4887 sctp_streamhead); 4888 if ((sp) && (sp->length == 0)) { 4889 asoc->locked_on_sending = NULL; 4890 if (sp->msg_is_complete) { 4891 asoc->stream_queue_cnt--; 4892 } else { 4893 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4894 asoc->stream_queue_cnt--; 4895 } 4896 } 4897 } 4898 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4899 (asoc->stream_queue_cnt == 0)) { 4900 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4901 /* Need to abort here */ 4902 struct mbuf *oper; 4903 4904 abort_out_now: 4905 *abort_now = 1; 4906 /* XXX */ 4907 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4908 0, M_DONTWAIT, 1, MT_DATA); 4909 if (oper) { 4910 struct sctp_paramhdr *ph; 4911 uint32_t *ippp; 4912 4913 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4914 sizeof(uint32_t); 4915 ph = mtod(oper, struct sctp_paramhdr *); 4916 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4917 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4918 ippp = (uint32_t *) (ph + 1); 4919 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4920 } 4921 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 4922 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED); 4923 return; 4924 } else { 4925 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4926 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4927 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4928 } 4929 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4930 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4931 sctp_stop_timers_for_shutdown(stcb); 4932 sctp_send_shutdown(stcb, 4933 stcb->asoc.primary_destination); 4934 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4935 stcb->sctp_ep, stcb, asoc->primary_destination); 4936 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4937 stcb->sctp_ep, stcb, asoc->primary_destination); 4938 } 4939 return; 4940 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4941 (asoc->stream_queue_cnt == 0)) { 4942 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4943 goto abort_out_now; 4944 } 4945 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4946 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4947 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4948 sctp_send_shutdown_ack(stcb, 4949 stcb->asoc.primary_destination); 4950 4951 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4952 stcb->sctp_ep, stcb, asoc->primary_destination); 4953 return; 4954 } 4955 } 4956 /* 4957 * Now here we are going to recycle net_ack for a different use... 4958 * HEADS UP. 4959 */ 4960 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4961 net->net_ack = 0; 4962 } 4963 4964 /* 4965 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 4966 * to be done. Setting this_sack_lowest_newack to the cum_ack will 4967 * automatically ensure that. 4968 */ 4969 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) { 4970 this_sack_lowest_newack = cum_ack; 4971 } 4972 if (num_seg > 0) { 4973 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4974 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 4975 } 4976 /*********************************************/ 4977 /* Here we perform PR-SCTP procedures */ 4978 /* (section 4.2) */ 4979 /*********************************************/ 4980 /* C1. update advancedPeerAckPoint */ 4981 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4982 asoc->advanced_peer_ack_point = cum_ack; 4983 } 4984 /* C2. try to further move advancedPeerAckPoint ahead */ 4985 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 4986 struct sctp_tmit_chunk *lchk; 4987 4988 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4989 /* C3. See if we need to send a Fwd-TSN */ 4990 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 4991 MAX_TSN)) { 4992 /* 4993 * ISSUE with ECN, see FWD-TSN processing for notes 4994 * on issues that will occur when the ECN NONCE 4995 * stuff is put into SCTP for cross checking. 4996 */ 4997 send_forward_tsn(stcb, asoc); 4998 4999 /* 5000 * ECN Nonce: Disable Nonce Sum check when FWD TSN 5001 * is sent and store resync tsn 5002 */ 5003 asoc->nonce_sum_check = 0; 5004 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 5005 if (lchk) { 5006 /* Assure a timer is up */ 5007 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5008 stcb->sctp_ep, stcb, lchk->whoTo); 5009 } 5010 } 5011 } 5012 /* JRS - Use the congestion control given in the CC module */ 5013 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5014 5015 /****************************************************************** 5016 * Here we do the stuff with ECN Nonce checking. 5017 * We basically check to see if the nonce sum flag was incorrect 5018 * or if resynchronization needs to be done. Also if we catch a 5019 * misbehaving receiver we give him the kick. 5020 ******************************************************************/ 5021 5022 if (asoc->ecn_nonce_allowed) { 5023 if (asoc->nonce_sum_check) { 5024 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 5025 if (asoc->nonce_wait_for_ecne == 0) { 5026 struct sctp_tmit_chunk *lchk; 5027 5028 lchk = TAILQ_FIRST(&asoc->send_queue); 5029 asoc->nonce_wait_for_ecne = 1; 5030 if (lchk) { 5031 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 5032 } else { 5033 asoc->nonce_wait_tsn = asoc->sending_seq; 5034 } 5035 } else { 5036 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 5037 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 5038 /* 5039 * Misbehaving peer. We need 5040 * to react to this guy 5041 */ 5042 asoc->ecn_allowed = 0; 5043 asoc->ecn_nonce_allowed = 0; 5044 } 5045 } 5046 } 5047 } else { 5048 /* See if Resynchronization Possible */ 5049 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 5050 asoc->nonce_sum_check = 1; 5051 /* 5052 * now we must calculate what the base is. 5053 * We do this based on two things, we know 5054 * the total's for all the segments 5055 * gap-acked in the SACK, its stored in 5056 * ecn_seg_sums. We also know the SACK's 5057 * nonce sum, its in nonce_sum_flag. So we 5058 * can build a truth table to back-calculate 5059 * the new value of 5060 * asoc->nonce_sum_expect_base: 5061 * 5062 * SACK-flag-Value Seg-Sums Base 0 0 0 5063 * 1 0 1 0 1 1 1 5064 * 1 0 5065 */ 5066 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 5067 } 5068 } 5069 } 5070 /* Now are we exiting loss recovery ? */ 5071 if (will_exit_fast_recovery) { 5072 /* Ok, we must exit fast recovery */ 5073 asoc->fast_retran_loss_recovery = 0; 5074 } 5075 if ((asoc->sat_t3_loss_recovery) && 5076 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 5077 MAX_TSN) || 5078 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 5079 /* end satellite t3 loss recovery */ 5080 asoc->sat_t3_loss_recovery = 0; 5081 } 5082 /* 5083 * CMT Fast recovery 5084 */ 5085 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5086 if (net->will_exit_fast_recovery) { 5087 /* Ok, we must exit fast recovery */ 5088 net->fast_retran_loss_recovery = 0; 5089 } 5090 } 5091 5092 /* Adjust and set the new rwnd value */ 5093 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5094 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5095 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5096 } 5097 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5098 (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5099 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5100 /* SWS sender side engages */ 5101 asoc->peers_rwnd = 0; 5102 } 5103 if (asoc->peers_rwnd > old_rwnd) { 5104 win_probe_recovery = 1; 5105 } 5106 /* 5107 * Now we must setup so we have a timer up for anyone with 5108 * outstanding data. 5109 */ 5110 done_once = 0; 5111 again: 5112 j = 0; 5113 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5114 if (win_probe_recovery && (net->window_probe)) { 5115 net->window_probe = 0; 5116 win_probe_recovered = 1; 5117 /*- 5118 * Find first chunk that was used with 5119 * window probe and clear the event. Put 5120 * it back into the send queue as if has 5121 * not been sent. 5122 */ 5123 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5124 if (tp1->window_probe) { 5125 sctp_window_probe_recovery(stcb, asoc, net, tp1); 5126 break; 5127 } 5128 } 5129 } 5130 if (net->flight_size) { 5131 j++; 5132 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5133 stcb->sctp_ep, stcb, net); 5134 } else { 5135 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5136 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5137 stcb, net, 5138 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 5139 } 5140 if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 5141 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 5142 SCTP_STAT_INCR(sctps_earlyfrstpidsck4); 5143 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 5144 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 5145 } 5146 } 5147 } 5148 } 5149 if ((j == 0) && 5150 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5151 (asoc->sent_queue_retran_cnt == 0) && 5152 (win_probe_recovered == 0) && 5153 (done_once == 0)) { 5154 /* huh, this should not happen */ 5155 sctp_fs_audit(asoc); 5156 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5157 net->flight_size = 0; 5158 } 5159 asoc->total_flight = 0; 5160 asoc->total_flight_count = 0; 5161 asoc->sent_queue_retran_cnt = 0; 5162 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5163 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5164 sctp_flight_size_increase(tp1); 5165 sctp_total_flight_increase(stcb, tp1); 5166 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5167 asoc->sent_queue_retran_cnt++; 5168 } 5169 } 5170 done_once = 1; 5171 goto again; 5172 } 5173 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5174 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5175 a_rwnd, 5176 stcb->asoc.peers_rwnd, 5177 stcb->asoc.total_flight, 5178 stcb->asoc.total_output_queue_size); 5179 } 5180 } 5181 5182 void 5183 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 5184 struct sctp_nets *netp, int *abort_flag) 5185 { 5186 /* Copy cum-ack */ 5187 uint32_t cum_ack, a_rwnd; 5188 5189 cum_ack = ntohl(cp->cumulative_tsn_ack); 5190 /* Arrange so a_rwnd does NOT change */ 5191 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5192 5193 /* Now call the express sack handling */ 5194 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag); 5195 } 5196 5197 static void 5198 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5199 struct sctp_stream_in *strmin) 5200 { 5201 struct sctp_queued_to_read *ctl, *nctl; 5202 struct sctp_association *asoc; 5203 int tt; 5204 5205 asoc = &stcb->asoc; 5206 tt = strmin->last_sequence_delivered; 5207 /* 5208 * First deliver anything prior to and including the stream no that 5209 * came in 5210 */ 5211 ctl = TAILQ_FIRST(&strmin->inqueue); 5212 while (ctl) { 5213 nctl = TAILQ_NEXT(ctl, next); 5214 if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) || 5215 (tt == ctl->sinfo_ssn)) { 5216 /* this is deliverable now */ 5217 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5218 /* subtract pending on streams */ 5219 asoc->size_on_all_streams -= ctl->length; 5220 sctp_ucount_decr(asoc->cnt_on_all_streams); 5221 /* deliver it to at least the delivery-q */ 5222 if (stcb->sctp_socket) { 5223 sctp_add_to_readq(stcb->sctp_ep, stcb, 5224 ctl, 5225 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 5226 } 5227 } else { 5228 /* no more delivery now. */ 5229 break; 5230 } 5231 ctl = nctl; 5232 } 5233 /* 5234 * now we must deliver things in queue the normal way if any are 5235 * now ready. 5236 */ 5237 tt = strmin->last_sequence_delivered + 1; 5238 ctl = TAILQ_FIRST(&strmin->inqueue); 5239 while (ctl) { 5240 nctl = TAILQ_NEXT(ctl, next); 5241 if (tt == ctl->sinfo_ssn) { 5242 /* this is deliverable now */ 5243 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5244 /* subtract pending on streams */ 5245 asoc->size_on_all_streams -= ctl->length; 5246 sctp_ucount_decr(asoc->cnt_on_all_streams); 5247 /* deliver it to at least the delivery-q */ 5248 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5249 if (stcb->sctp_socket) { 5250 sctp_add_to_readq(stcb->sctp_ep, stcb, 5251 ctl, 5252 &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED); 5253 } 5254 tt = strmin->last_sequence_delivered + 1; 5255 } else { 5256 break; 5257 } 5258 ctl = nctl; 5259 } 5260 } 5261 5262 void 5263 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5264 struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset) 5265 { 5266 /* 5267 * ISSUES that MUST be fixed for ECN! When we are the sender of the 5268 * forward TSN, when the SACK comes back that acknowledges the 5269 * FWD-TSN we must reset the NONCE sum to match correctly. This will 5270 * get quite tricky since we may have sent more data interveneing 5271 * and must carefully account for what the SACK says on the nonce 5272 * and any gaps that are reported. This work will NOT be done here, 5273 * but I note it here since it is really related to PR-SCTP and 5274 * FWD-TSN's 5275 */ 5276 5277 /* The pr-sctp fwd tsn */ 5278 /* 5279 * here we will perform all the data receiver side steps for 5280 * processing FwdTSN, as required in by pr-sctp draft: 5281 * 5282 * Assume we get FwdTSN(x): 5283 * 5284 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5285 * others we have 3) examine and update re-ordering queue on 5286 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5287 * report where we are. 5288 */ 5289 struct sctp_association *asoc; 5290 uint32_t new_cum_tsn, gap; 5291 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 5292 struct sctp_stream_in *strm; 5293 struct sctp_tmit_chunk *chk, *at; 5294 5295 cumack_set_flag = 0; 5296 asoc = &stcb->asoc; 5297 cnt_gone = 0; 5298 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5299 SCTPDBG(SCTP_DEBUG_INDATA1, 5300 "Bad size too small/big fwd-tsn\n"); 5301 return; 5302 } 5303 m_size = (stcb->asoc.mapping_array_size << 3); 5304 /*************************************************************/ 5305 /* 1. Here we update local cumTSN and shift the bitmap array */ 5306 /*************************************************************/ 5307 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5308 5309 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 5310 asoc->cumulative_tsn == new_cum_tsn) { 5311 /* Already got there ... */ 5312 return; 5313 } 5314 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 5315 MAX_TSN)) { 5316 asoc->highest_tsn_inside_map = new_cum_tsn; 5317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5318 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5319 } 5320 } 5321 /* 5322 * now we know the new TSN is more advanced, let's find the actual 5323 * gap 5324 */ 5325 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 5326 MAX_TSN)) || 5327 (new_cum_tsn == asoc->mapping_array_base_tsn)) { 5328 gap = new_cum_tsn - asoc->mapping_array_base_tsn; 5329 } else { 5330 /* try to prevent underflow here */ 5331 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 5332 } 5333 5334 if (gap >= m_size) { 5335 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5336 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5337 } 5338 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5339 struct mbuf *oper; 5340 5341 /* 5342 * out of range (of single byte chunks in the rwnd I 5343 * give out). This must be an attacker. 5344 */ 5345 *abort_flag = 1; 5346 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 5347 0, M_DONTWAIT, 1, MT_DATA); 5348 if (oper) { 5349 struct sctp_paramhdr *ph; 5350 uint32_t *ippp; 5351 5352 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 5353 (sizeof(uint32_t) * 3); 5354 ph = mtod(oper, struct sctp_paramhdr *); 5355 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 5356 ph->param_length = htons(SCTP_BUF_LEN(oper)); 5357 ippp = (uint32_t *) (ph + 1); 5358 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 5359 ippp++; 5360 *ippp = asoc->highest_tsn_inside_map; 5361 ippp++; 5362 *ippp = new_cum_tsn; 5363 } 5364 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5365 sctp_abort_an_association(stcb->sctp_ep, stcb, 5366 SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED); 5367 return; 5368 } 5369 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5370 slide_out: 5371 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5372 cumack_set_flag = 1; 5373 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5374 asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn; 5375 5376 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5377 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5378 } 5379 asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 5380 } else { 5381 SCTP_TCB_LOCK_ASSERT(stcb); 5382 if ((compare_with_wrap(((uint32_t) asoc->cumulative_tsn + gap), asoc->highest_tsn_inside_map, MAX_TSN)) || 5383 (((uint32_t) asoc->cumulative_tsn + gap) == asoc->highest_tsn_inside_map)) { 5384 goto slide_out; 5385 } else { 5386 for (i = 0; i <= gap; i++) { 5387 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 5388 } 5389 } 5390 /* 5391 * Now after marking all, slide thing forward but no sack 5392 * please. 5393 */ 5394 sctp_sack_check(stcb, 0, 0, abort_flag); 5395 if (*abort_flag) 5396 return; 5397 } 5398 5399 /*************************************************************/ 5400 /* 2. Clear up re-assembly queue */ 5401 /*************************************************************/ 5402 /* 5403 * First service it if pd-api is up, just in case we can progress it 5404 * forward 5405 */ 5406 if (asoc->fragmented_delivery_inprogress) { 5407 sctp_service_reassembly(stcb, asoc); 5408 } 5409 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5410 /* For each one on here see if we need to toss it */ 5411 /* 5412 * For now large messages held on the reasmqueue that are 5413 * complete will be tossed too. We could in theory do more 5414 * work to spin through and stop after dumping one msg aka 5415 * seeing the start of a new msg at the head, and call the 5416 * delivery function... to see if it can be delivered... But 5417 * for now we just dump everything on the queue. 5418 */ 5419 chk = TAILQ_FIRST(&asoc->reasmqueue); 5420 while (chk) { 5421 at = TAILQ_NEXT(chk, sctp_next); 5422 if (compare_with_wrap(asoc->cumulative_tsn, 5423 chk->rec.data.TSN_seq, MAX_TSN) || 5424 asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 5425 /* It needs to be tossed */ 5426 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5427 if (compare_with_wrap(chk->rec.data.TSN_seq, 5428 asoc->tsn_last_delivered, MAX_TSN)) { 5429 asoc->tsn_last_delivered = 5430 chk->rec.data.TSN_seq; 5431 asoc->str_of_pdapi = 5432 chk->rec.data.stream_number; 5433 asoc->ssn_of_pdapi = 5434 chk->rec.data.stream_seq; 5435 asoc->fragment_flags = 5436 chk->rec.data.rcv_flags; 5437 } 5438 asoc->size_on_reasm_queue -= chk->send_size; 5439 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5440 cnt_gone++; 5441 5442 /* Clear up any stream problem */ 5443 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 5444 SCTP_DATA_UNORDERED && 5445 (compare_with_wrap(chk->rec.data.stream_seq, 5446 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 5447 MAX_SEQ))) { 5448 /* 5449 * We must dump forward this streams 5450 * sequence number if the chunk is 5451 * not unordered that is being 5452 * skipped. There is a chance that 5453 * if the peer does not include the 5454 * last fragment in its FWD-TSN we 5455 * WILL have a problem here since 5456 * you would have a partial chunk in 5457 * queue that may not be 5458 * deliverable. Also if a Partial 5459 * delivery API as started the user 5460 * may get a partial chunk. The next 5461 * read returning a new chunk... 5462 * really ugly but I see no way 5463 * around it! Maybe a notify?? 5464 */ 5465 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 5466 chk->rec.data.stream_seq; 5467 } 5468 if (chk->data) { 5469 sctp_m_freem(chk->data); 5470 chk->data = NULL; 5471 } 5472 sctp_free_a_chunk(stcb, chk); 5473 } else { 5474 /* 5475 * Ok we have gone beyond the end of the 5476 * fwd-tsn's mark. Some checks... 5477 */ 5478 if ((asoc->fragmented_delivery_inprogress) && 5479 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 5480 uint32_t str_seq; 5481 5482 /* 5483 * Special case PD-API is up and 5484 * what we fwd-tsn' over includes 5485 * one that had the LAST_FRAG. We no 5486 * longer need to do the PD-API. 5487 */ 5488 asoc->fragmented_delivery_inprogress = 0; 5489 5490 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5491 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5492 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED); 5493 5494 } 5495 break; 5496 } 5497 chk = at; 5498 } 5499 } 5500 if (asoc->fragmented_delivery_inprogress) { 5501 /* 5502 * Ok we removed cnt_gone chunks in the PD-API queue that 5503 * were being delivered. So now we must turn off the flag. 5504 */ 5505 uint32_t str_seq; 5506 5507 str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi; 5508 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5509 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED); 5510 asoc->fragmented_delivery_inprogress = 0; 5511 } 5512 /*************************************************************/ 5513 /* 3. Update the PR-stream re-ordering queues */ 5514 /*************************************************************/ 5515 fwd_sz -= sizeof(*fwd); 5516 if (m && fwd_sz) { 5517 /* New method. */ 5518 unsigned int num_str; 5519 struct sctp_strseq *stseq, strseqbuf; 5520 5521 offset += sizeof(*fwd); 5522 5523 num_str = fwd_sz / sizeof(struct sctp_strseq); 5524 for (i = 0; i < num_str; i++) { 5525 uint16_t st; 5526 5527 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5528 sizeof(struct sctp_strseq), 5529 (uint8_t *) & strseqbuf); 5530 offset += sizeof(struct sctp_strseq); 5531 if (stseq == NULL) { 5532 break; 5533 } 5534 /* Convert */ 5535 st = ntohs(stseq->stream); 5536 stseq->stream = st; 5537 st = ntohs(stseq->sequence); 5538 stseq->sequence = st; 5539 /* now process */ 5540 if (stseq->stream >= asoc->streamincnt) { 5541 /* screwed up streams, stop! */ 5542 break; 5543 } 5544 strm = &asoc->strmin[stseq->stream]; 5545 if (compare_with_wrap(stseq->sequence, 5546 strm->last_sequence_delivered, MAX_SEQ)) { 5547 /* Update the sequence number */ 5548 strm->last_sequence_delivered = 5549 stseq->sequence; 5550 } 5551 /* now kick the stream the new way */ 5552 /* sa_ignore NO_NULL_CHK */ 5553 sctp_kick_prsctp_reorder_queue(stcb, strm); 5554 } 5555 } 5556 if (TAILQ_FIRST(&asoc->reasmqueue)) { 5557 /* now lets kick out and check for more fragmented delivery */ 5558 /* sa_ignore NO_NULL_CHK */ 5559 sctp_deliver_reasm_check(stcb, &stcb->asoc); 5560 } 5561 } 5562