1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <sys/proc.h> 40 #include <netinet/sctp_var.h> 41 #include <netinet/sctp_sysctl.h> 42 #include <netinet/sctp_header.h> 43 #include <netinet/sctp_pcb.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_auth.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/sctp_input.h> 53 #include <netinet/sctp_crc32.h> 54 #include <netinet/sctp_lock_bsd.h> 55 /* 56 * NOTES: On the outbound side of things I need to check the sack timer to 57 * see if I should generate a sack into the chunk queue (if I have data to 58 * send that is and will be sending it .. for bundling. 59 * 60 * The callback in sctp_usrreq.c will get called when the socket is read from. 61 * This will cause sctp_service_queues() to get called on the top entry in 62 * the list. 63 */ 64 static uint32_t 65 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 66 struct sctp_stream_in *strm, 67 struct sctp_tcb *stcb, 68 struct sctp_association *asoc, 69 struct sctp_tmit_chunk *chk, int lock_held); 70 71 72 void 73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 74 { 75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 76 } 77 78 /* Calculate what the rwnd would be */ 79 uint32_t 80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 81 { 82 uint32_t calc = 0; 83 84 /* 85 * This is really set wrong with respect to a 1-2-m socket. Since 86 * the sb_cc is the count that everyone as put up. When we re-write 87 * sctp_soreceive then we will fix this so that ONLY this 88 * associations data is taken into account. 89 */ 90 if (stcb->sctp_socket == NULL) { 91 return (calc); 92 } 93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 96 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 97 if (stcb->asoc.sb_cc == 0 && 98 asoc->cnt_on_reasm_queue == 0 && 99 asoc->cnt_on_all_streams == 0) { 100 /* Full rwnd granted */ 101 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 102 return (calc); 103 } 104 /* get actual space */ 105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 106 /* 107 * take out what has NOT been put on socket queue and we yet hold 108 * for putting up. 109 */ 110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 111 asoc->cnt_on_reasm_queue * MSIZE)); 112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 113 asoc->cnt_on_all_streams * MSIZE)); 114 if (calc == 0) { 115 /* out of space */ 116 return (calc); 117 } 118 /* what is the overhead of all these rwnd's */ 119 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 120 /* 121 * If the window gets too small due to ctrl-stuff, reduce it to 1, 122 * even it is 0. SWS engaged 123 */ 124 if (calc < stcb->asoc.my_rwnd_control_len) { 125 calc = 1; 126 } 127 return (calc); 128 } 129 130 131 132 /* 133 * Build out our readq entry based on the incoming packet. 134 */ 135 struct sctp_queued_to_read * 136 sctp_build_readq_entry(struct sctp_tcb *stcb, 137 struct sctp_nets *net, 138 uint32_t tsn, uint32_t ppid, 139 uint32_t context, uint16_t sid, 140 uint32_t mid, uint8_t flags, 141 struct mbuf *dm) 142 { 143 struct sctp_queued_to_read *read_queue_e = NULL; 144 145 sctp_alloc_a_readq(stcb, read_queue_e); 146 if (read_queue_e == NULL) { 147 goto failed_build; 148 } 149 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 150 read_queue_e->sinfo_stream = sid; 151 read_queue_e->sinfo_flags = (flags << 8); 152 read_queue_e->sinfo_ppid = ppid; 153 read_queue_e->sinfo_context = context; 154 read_queue_e->sinfo_tsn = tsn; 155 read_queue_e->sinfo_cumtsn = tsn; 156 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 157 read_queue_e->mid = mid; 158 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 159 TAILQ_INIT(&read_queue_e->reasm); 160 read_queue_e->whoFrom = net; 161 atomic_add_int(&net->ref_count, 1); 162 read_queue_e->data = dm; 163 read_queue_e->stcb = stcb; 164 read_queue_e->port_from = stcb->rport; 165 failed_build: 166 return (read_queue_e); 167 } 168 169 struct mbuf * 170 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 171 { 172 struct sctp_extrcvinfo *seinfo; 173 struct sctp_sndrcvinfo *outinfo; 174 struct sctp_rcvinfo *rcvinfo; 175 struct sctp_nxtinfo *nxtinfo; 176 struct cmsghdr *cmh; 177 struct mbuf *ret; 178 int len; 179 int use_extended; 180 int provide_nxt; 181 182 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 184 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 185 /* user does not want any ancillary data */ 186 return (NULL); 187 } 188 len = 0; 189 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 190 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 191 } 192 seinfo = (struct sctp_extrcvinfo *)sinfo; 193 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 194 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 195 provide_nxt = 1; 196 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 197 } else { 198 provide_nxt = 0; 199 } 200 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 202 use_extended = 1; 203 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 204 } else { 205 use_extended = 0; 206 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 207 } 208 } else { 209 use_extended = 0; 210 } 211 212 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 213 if (ret == NULL) { 214 /* No space */ 215 return (ret); 216 } 217 SCTP_BUF_LEN(ret) = 0; 218 219 /* We need a CMSG header followed by the struct */ 220 cmh = mtod(ret, struct cmsghdr *); 221 /* 222 * Make sure that there is no un-initialized padding between the 223 * cmsg header and cmsg data and after the cmsg data. 224 */ 225 memset(cmh, 0, len); 226 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 227 cmh->cmsg_level = IPPROTO_SCTP; 228 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 229 cmh->cmsg_type = SCTP_RCVINFO; 230 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 231 rcvinfo->rcv_sid = sinfo->sinfo_stream; 232 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 233 rcvinfo->rcv_flags = sinfo->sinfo_flags; 234 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 235 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 236 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 237 rcvinfo->rcv_context = sinfo->sinfo_context; 238 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 239 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 240 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 241 } 242 if (provide_nxt) { 243 cmh->cmsg_level = IPPROTO_SCTP; 244 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 245 cmh->cmsg_type = SCTP_NXTINFO; 246 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 247 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 248 nxtinfo->nxt_flags = 0; 249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 250 nxtinfo->nxt_flags |= SCTP_UNORDERED; 251 } 252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 253 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 254 } 255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 256 nxtinfo->nxt_flags |= SCTP_COMPLETE; 257 } 258 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 259 nxtinfo->nxt_length = seinfo->serinfo_next_length; 260 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 261 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 262 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 263 } 264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 265 cmh->cmsg_level = IPPROTO_SCTP; 266 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 267 if (use_extended) { 268 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 269 cmh->cmsg_type = SCTP_EXTRCV; 270 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 271 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 272 } else { 273 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 274 cmh->cmsg_type = SCTP_SNDRCV; 275 *outinfo = *sinfo; 276 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 277 } 278 } 279 return (ret); 280 } 281 282 283 static void 284 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 285 { 286 uint32_t gap, i, cumackp1; 287 int fnd = 0; 288 int in_r = 0, in_nr = 0; 289 290 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 291 return; 292 } 293 cumackp1 = asoc->cumulative_tsn + 1; 294 if (SCTP_TSN_GT(cumackp1, tsn)) { 295 /* 296 * this tsn is behind the cum ack and thus we don't need to 297 * worry about it being moved from one to the other. 298 */ 299 return; 300 } 301 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 302 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 303 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 304 if ((in_r == 0) && (in_nr == 0)) { 305 #ifdef INVARIANTS 306 panic("Things are really messed up now"); 307 #else 308 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 309 sctp_print_mapping_array(asoc); 310 #endif 311 } 312 if (in_nr == 0) 313 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 314 if (in_r) 315 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 316 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 317 asoc->highest_tsn_inside_nr_map = tsn; 318 } 319 if (tsn == asoc->highest_tsn_inside_map) { 320 /* We must back down to see what the new highest is */ 321 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 322 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 323 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 324 asoc->highest_tsn_inside_map = i; 325 fnd = 1; 326 break; 327 } 328 } 329 if (!fnd) { 330 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 331 } 332 } 333 } 334 335 static int 336 sctp_place_control_in_stream(struct sctp_stream_in *strm, 337 struct sctp_association *asoc, 338 struct sctp_queued_to_read *control) 339 { 340 struct sctp_queued_to_read *at; 341 struct sctp_readhead *q; 342 uint8_t flags, unordered; 343 344 flags = (control->sinfo_flags >> 8); 345 unordered = flags & SCTP_DATA_UNORDERED; 346 if (unordered) { 347 q = &strm->uno_inqueue; 348 if (asoc->idata_supported == 0) { 349 if (!TAILQ_EMPTY(q)) { 350 /* 351 * Only one stream can be here in old style 352 * -- abort 353 */ 354 return (-1); 355 } 356 TAILQ_INSERT_TAIL(q, control, next_instrm); 357 control->on_strm_q = SCTP_ON_UNORDERED; 358 return (0); 359 } 360 } else { 361 q = &strm->inqueue; 362 } 363 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 364 control->end_added = 1; 365 control->first_frag_seen = 1; 366 control->last_frag_seen = 1; 367 } 368 if (TAILQ_EMPTY(q)) { 369 /* Empty queue */ 370 TAILQ_INSERT_HEAD(q, control, next_instrm); 371 if (unordered) { 372 control->on_strm_q = SCTP_ON_UNORDERED; 373 } else { 374 control->on_strm_q = SCTP_ON_ORDERED; 375 } 376 return (0); 377 } else { 378 TAILQ_FOREACH(at, q, next_instrm) { 379 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 380 /* 381 * one in queue is bigger than the new one, 382 * insert before this one 383 */ 384 TAILQ_INSERT_BEFORE(at, control, next_instrm); 385 if (unordered) { 386 control->on_strm_q = SCTP_ON_UNORDERED; 387 } else { 388 control->on_strm_q = SCTP_ON_ORDERED; 389 } 390 break; 391 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 392 /* 393 * Gak, He sent me a duplicate msg id 394 * number?? return -1 to abort. 395 */ 396 return (-1); 397 } else { 398 if (TAILQ_NEXT(at, next_instrm) == NULL) { 399 /* 400 * We are at the end, insert it 401 * after this one 402 */ 403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 404 sctp_log_strm_del(control, at, 405 SCTP_STR_LOG_FROM_INSERT_TL); 406 } 407 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 408 if (unordered) { 409 control->on_strm_q = SCTP_ON_UNORDERED; 410 } else { 411 control->on_strm_q = SCTP_ON_ORDERED; 412 } 413 break; 414 } 415 } 416 } 417 } 418 return (0); 419 } 420 421 static void 422 sctp_abort_in_reasm(struct sctp_tcb *stcb, 423 struct sctp_queued_to_read *control, 424 struct sctp_tmit_chunk *chk, 425 int *abort_flag, int opspot) 426 { 427 char msg[SCTP_DIAG_INFO_LEN]; 428 struct mbuf *oper; 429 430 if (stcb->asoc.idata_supported) { 431 snprintf(msg, sizeof(msg), 432 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 433 opspot, 434 control->fsn_included, 435 chk->rec.data.tsn, 436 chk->rec.data.sid, 437 chk->rec.data.fsn, chk->rec.data.mid); 438 } else { 439 snprintf(msg, sizeof(msg), 440 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 441 opspot, 442 control->fsn_included, 443 chk->rec.data.tsn, 444 chk->rec.data.sid, 445 chk->rec.data.fsn, 446 (uint16_t)chk->rec.data.mid); 447 } 448 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 449 sctp_m_freem(chk->data); 450 chk->data = NULL; 451 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 452 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 453 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 454 *abort_flag = 1; 455 } 456 457 static void 458 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 459 { 460 /* 461 * The control could not be placed and must be cleaned. 462 */ 463 struct sctp_tmit_chunk *chk, *nchk; 464 465 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 466 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 467 if (chk->data) 468 sctp_m_freem(chk->data); 469 chk->data = NULL; 470 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 471 } 472 sctp_free_a_readq(stcb, control); 473 } 474 475 /* 476 * Queue the chunk either right into the socket buffer if it is the next one 477 * to go OR put it in the correct place in the delivery queue. If we do 478 * append to the so_buf, keep doing so until we are out of order as 479 * long as the control's entered are non-fragmented. 480 */ 481 static void 482 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 483 struct sctp_association *asoc, 484 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 485 { 486 /* 487 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 488 * all the data in one stream this could happen quite rapidly. One 489 * could use the TSN to keep track of things, but this scheme breaks 490 * down in the other type of stream usage that could occur. Send a 491 * single msg to stream 0, send 4Billion messages to stream 1, now 492 * send a message to stream 0. You have a situation where the TSN 493 * has wrapped but not in the stream. Is this worth worrying about 494 * or should we just change our queue sort at the bottom to be by 495 * TSN. 496 * 497 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 498 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 499 * assignment this could happen... and I don't see how this would be 500 * a violation. So for now I am undecided an will leave the sort by 501 * SSN alone. Maybe a hybred approach is the answer 502 * 503 */ 504 struct sctp_queued_to_read *at; 505 int queue_needed; 506 uint32_t nxt_todel; 507 struct mbuf *op_err; 508 struct sctp_stream_in *strm; 509 char msg[SCTP_DIAG_INFO_LEN]; 510 511 strm = &asoc->strmin[control->sinfo_stream]; 512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 513 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 514 } 515 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 516 /* The incoming sseq is behind where we last delivered? */ 517 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 518 strm->last_mid_delivered, control->mid); 519 /* 520 * throw it in the stream so it gets cleaned up in 521 * association destruction 522 */ 523 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 524 if (asoc->idata_supported) { 525 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 526 strm->last_mid_delivered, control->sinfo_tsn, 527 control->sinfo_stream, control->mid); 528 } else { 529 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 530 (uint16_t)strm->last_mid_delivered, 531 control->sinfo_tsn, 532 control->sinfo_stream, 533 (uint16_t)control->mid); 534 } 535 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 536 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 537 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 538 *abort_flag = 1; 539 return; 540 541 } 542 queue_needed = 1; 543 asoc->size_on_all_streams += control->length; 544 sctp_ucount_incr(asoc->cnt_on_all_streams); 545 nxt_todel = strm->last_mid_delivered + 1; 546 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 547 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 548 struct socket *so; 549 550 so = SCTP_INP_SO(stcb->sctp_ep); 551 atomic_add_int(&stcb->asoc.refcnt, 1); 552 SCTP_TCB_UNLOCK(stcb); 553 SCTP_SOCKET_LOCK(so, 1); 554 SCTP_TCB_LOCK(stcb); 555 atomic_subtract_int(&stcb->asoc.refcnt, 1); 556 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 557 SCTP_SOCKET_UNLOCK(so, 1); 558 return; 559 } 560 #endif 561 /* can be delivered right away? */ 562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 563 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 564 } 565 /* EY it wont be queued if it could be delivered directly */ 566 queue_needed = 0; 567 if (asoc->size_on_all_streams >= control->length) { 568 asoc->size_on_all_streams -= control->length; 569 } else { 570 #ifdef INVARIANTS 571 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 572 #else 573 asoc->size_on_all_streams = 0; 574 #endif 575 } 576 sctp_ucount_decr(asoc->cnt_on_all_streams); 577 strm->last_mid_delivered++; 578 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 579 sctp_add_to_readq(stcb->sctp_ep, stcb, 580 control, 581 &stcb->sctp_socket->so_rcv, 1, 582 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 583 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 584 /* all delivered */ 585 nxt_todel = strm->last_mid_delivered + 1; 586 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 587 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 588 if (control->on_strm_q == SCTP_ON_ORDERED) { 589 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 590 if (asoc->size_on_all_streams >= control->length) { 591 asoc->size_on_all_streams -= control->length; 592 } else { 593 #ifdef INVARIANTS 594 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 595 #else 596 asoc->size_on_all_streams = 0; 597 #endif 598 } 599 sctp_ucount_decr(asoc->cnt_on_all_streams); 600 #ifdef INVARIANTS 601 } else { 602 panic("Huh control: %p is on_strm_q: %d", 603 control, control->on_strm_q); 604 #endif 605 } 606 control->on_strm_q = 0; 607 strm->last_mid_delivered++; 608 /* 609 * We ignore the return of deliver_data here 610 * since we always can hold the chunk on the 611 * d-queue. And we have a finite number that 612 * can be delivered from the strq. 613 */ 614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 615 sctp_log_strm_del(control, NULL, 616 SCTP_STR_LOG_FROM_IMMED_DEL); 617 } 618 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 619 sctp_add_to_readq(stcb->sctp_ep, stcb, 620 control, 621 &stcb->sctp_socket->so_rcv, 1, 622 SCTP_READ_LOCK_NOT_HELD, 623 SCTP_SO_LOCKED); 624 continue; 625 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 626 *need_reasm = 1; 627 } 628 break; 629 } 630 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 631 SCTP_SOCKET_UNLOCK(so, 1); 632 #endif 633 } 634 if (queue_needed) { 635 /* 636 * Ok, we did not deliver this guy, find the correct place 637 * to put it on the queue. 638 */ 639 if (sctp_place_control_in_stream(strm, asoc, control)) { 640 snprintf(msg, sizeof(msg), 641 "Queue to str MID: %u duplicate", 642 control->mid); 643 sctp_clean_up_control(stcb, control); 644 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 645 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 646 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 647 *abort_flag = 1; 648 } 649 } 650 } 651 652 653 static void 654 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 655 { 656 struct mbuf *m, *prev = NULL; 657 struct sctp_tcb *stcb; 658 659 stcb = control->stcb; 660 control->held_length = 0; 661 control->length = 0; 662 m = control->data; 663 while (m) { 664 if (SCTP_BUF_LEN(m) == 0) { 665 /* Skip mbufs with NO length */ 666 if (prev == NULL) { 667 /* First one */ 668 control->data = sctp_m_free(m); 669 m = control->data; 670 } else { 671 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 672 m = SCTP_BUF_NEXT(prev); 673 } 674 if (m == NULL) { 675 control->tail_mbuf = prev; 676 } 677 continue; 678 } 679 prev = m; 680 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 681 if (control->on_read_q) { 682 /* 683 * On read queue so we must increment the SB stuff, 684 * we assume caller has done any locks of SB. 685 */ 686 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 687 } 688 m = SCTP_BUF_NEXT(m); 689 } 690 if (prev) { 691 control->tail_mbuf = prev; 692 } 693 } 694 695 static void 696 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 697 { 698 struct mbuf *prev = NULL; 699 struct sctp_tcb *stcb; 700 701 stcb = control->stcb; 702 if (stcb == NULL) { 703 #ifdef INVARIANTS 704 panic("Control broken"); 705 #else 706 return; 707 #endif 708 } 709 if (control->tail_mbuf == NULL) { 710 /* TSNH */ 711 control->data = m; 712 sctp_setup_tail_pointer(control); 713 return; 714 } 715 control->tail_mbuf->m_next = m; 716 while (m) { 717 if (SCTP_BUF_LEN(m) == 0) { 718 /* Skip mbufs with NO length */ 719 if (prev == NULL) { 720 /* First one */ 721 control->tail_mbuf->m_next = sctp_m_free(m); 722 m = control->tail_mbuf->m_next; 723 } else { 724 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 725 m = SCTP_BUF_NEXT(prev); 726 } 727 if (m == NULL) { 728 control->tail_mbuf = prev; 729 } 730 continue; 731 } 732 prev = m; 733 if (control->on_read_q) { 734 /* 735 * On read queue so we must increment the SB stuff, 736 * we assume caller has done any locks of SB. 737 */ 738 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 739 } 740 *added += SCTP_BUF_LEN(m); 741 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 742 m = SCTP_BUF_NEXT(m); 743 } 744 if (prev) { 745 control->tail_mbuf = prev; 746 } 747 } 748 749 static void 750 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 751 { 752 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 753 nc->sinfo_stream = control->sinfo_stream; 754 nc->mid = control->mid; 755 TAILQ_INIT(&nc->reasm); 756 nc->top_fsn = control->top_fsn; 757 nc->mid = control->mid; 758 nc->sinfo_flags = control->sinfo_flags; 759 nc->sinfo_ppid = control->sinfo_ppid; 760 nc->sinfo_context = control->sinfo_context; 761 nc->fsn_included = 0xffffffff; 762 nc->sinfo_tsn = control->sinfo_tsn; 763 nc->sinfo_cumtsn = control->sinfo_cumtsn; 764 nc->sinfo_assoc_id = control->sinfo_assoc_id; 765 nc->whoFrom = control->whoFrom; 766 atomic_add_int(&nc->whoFrom->ref_count, 1); 767 nc->stcb = control->stcb; 768 nc->port_from = control->port_from; 769 } 770 771 static void 772 sctp_reset_a_control(struct sctp_queued_to_read *control, 773 struct sctp_inpcb *inp, uint32_t tsn) 774 { 775 control->fsn_included = tsn; 776 if (control->on_read_q) { 777 /* 778 * We have to purge it from there, hopefully this will work 779 * :-) 780 */ 781 TAILQ_REMOVE(&inp->read_queue, control, next); 782 control->on_read_q = 0; 783 } 784 } 785 786 static int 787 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 788 struct sctp_association *asoc, 789 struct sctp_stream_in *strm, 790 struct sctp_queued_to_read *control, 791 uint32_t pd_point, 792 int inp_read_lock_held) 793 { 794 /* 795 * Special handling for the old un-ordered data chunk. All the 796 * chunks/TSN's go to mid 0. So we have to do the old style watching 797 * to see if we have it all. If you return one, no other control 798 * entries on the un-ordered queue will be looked at. In theory 799 * there should be no others entries in reality, unless the guy is 800 * sending both unordered NDATA and unordered DATA... 801 */ 802 struct sctp_tmit_chunk *chk, *lchk, *tchk; 803 uint32_t fsn; 804 struct sctp_queued_to_read *nc; 805 int cnt_added; 806 807 if (control->first_frag_seen == 0) { 808 /* Nothing we can do, we have not seen the first piece yet */ 809 return (1); 810 } 811 /* Collapse any we can */ 812 cnt_added = 0; 813 restart: 814 fsn = control->fsn_included + 1; 815 /* Now what can we add? */ 816 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 817 if (chk->rec.data.fsn == fsn) { 818 /* Ok lets add it */ 819 sctp_alloc_a_readq(stcb, nc); 820 if (nc == NULL) { 821 break; 822 } 823 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 824 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 825 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD); 826 fsn++; 827 cnt_added++; 828 chk = NULL; 829 if (control->end_added) { 830 /* We are done */ 831 if (!TAILQ_EMPTY(&control->reasm)) { 832 /* 833 * Ok we have to move anything left 834 * on the control queue to a new 835 * control. 836 */ 837 sctp_build_readq_entry_from_ctl(nc, control); 838 tchk = TAILQ_FIRST(&control->reasm); 839 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 840 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 841 if (asoc->size_on_reasm_queue >= tchk->send_size) { 842 asoc->size_on_reasm_queue -= tchk->send_size; 843 } else { 844 #ifdef INVARIANTS 845 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 846 #else 847 asoc->size_on_reasm_queue = 0; 848 #endif 849 } 850 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 851 nc->first_frag_seen = 1; 852 nc->fsn_included = tchk->rec.data.fsn; 853 nc->data = tchk->data; 854 nc->sinfo_ppid = tchk->rec.data.ppid; 855 nc->sinfo_tsn = tchk->rec.data.tsn; 856 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 857 tchk->data = NULL; 858 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 859 sctp_setup_tail_pointer(nc); 860 tchk = TAILQ_FIRST(&control->reasm); 861 } 862 /* Spin the rest onto the queue */ 863 while (tchk) { 864 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 865 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 866 tchk = TAILQ_FIRST(&control->reasm); 867 } 868 /* 869 * Now lets add it to the queue 870 * after removing control 871 */ 872 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 873 nc->on_strm_q = SCTP_ON_UNORDERED; 874 if (control->on_strm_q) { 875 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 876 control->on_strm_q = 0; 877 } 878 } 879 if (control->pdapi_started) { 880 strm->pd_api_started = 0; 881 control->pdapi_started = 0; 882 } 883 if (control->on_strm_q) { 884 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 885 control->on_strm_q = 0; 886 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 887 } 888 if (control->on_read_q == 0) { 889 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 890 &stcb->sctp_socket->so_rcv, control->end_added, 891 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 892 } 893 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 894 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 895 /* 896 * Switch to the new guy and 897 * continue 898 */ 899 control = nc; 900 goto restart; 901 } else { 902 if (nc->on_strm_q == 0) { 903 sctp_free_a_readq(stcb, nc); 904 } 905 } 906 return (1); 907 } else { 908 sctp_free_a_readq(stcb, nc); 909 } 910 } else { 911 /* Can't add more */ 912 break; 913 } 914 } 915 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 916 strm->pd_api_started = 1; 917 control->pdapi_started = 1; 918 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 919 &stcb->sctp_socket->so_rcv, control->end_added, 920 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 921 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 922 return (0); 923 } else { 924 return (1); 925 } 926 } 927 928 static void 929 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 930 struct sctp_association *asoc, 931 struct sctp_queued_to_read *control, 932 struct sctp_tmit_chunk *chk, 933 int *abort_flag) 934 { 935 struct sctp_tmit_chunk *at; 936 int inserted; 937 938 /* 939 * Here we need to place the chunk into the control structure sorted 940 * in the correct order. 941 */ 942 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 943 /* Its the very first one. */ 944 SCTPDBG(SCTP_DEBUG_XXX, 945 "chunk is a first fsn: %u becomes fsn_included\n", 946 chk->rec.data.fsn); 947 if (control->first_frag_seen) { 948 /* 949 * In old un-ordered we can reassembly on one 950 * control multiple messages. As long as the next 951 * FIRST is greater then the old first (TSN i.e. FSN 952 * wise) 953 */ 954 struct mbuf *tdata; 955 uint32_t tmp; 956 957 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 958 /* 959 * Easy way the start of a new guy beyond 960 * the lowest 961 */ 962 goto place_chunk; 963 } 964 if ((chk->rec.data.fsn == control->fsn_included) || 965 (control->pdapi_started)) { 966 /* 967 * Ok this should not happen, if it does we 968 * started the pd-api on the higher TSN 969 * (since the equals part is a TSN failure 970 * it must be that). 971 * 972 * We are completly hosed in that case since 973 * I have no way to recover. This really 974 * will only happen if we can get more TSN's 975 * higher before the pd-api-point. 976 */ 977 sctp_abort_in_reasm(stcb, control, chk, 978 abort_flag, 979 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 980 981 return; 982 } 983 /* 984 * Ok we have two firsts and the one we just got is 985 * smaller than the one we previously placed.. yuck! 986 * We must swap them out. 987 */ 988 /* swap the mbufs */ 989 tdata = control->data; 990 control->data = chk->data; 991 chk->data = tdata; 992 /* Save the lengths */ 993 chk->send_size = control->length; 994 /* Recompute length of control and tail pointer */ 995 sctp_setup_tail_pointer(control); 996 /* Fix the FSN included */ 997 tmp = control->fsn_included; 998 control->fsn_included = chk->rec.data.fsn; 999 chk->rec.data.fsn = tmp; 1000 /* Fix the TSN included */ 1001 tmp = control->sinfo_tsn; 1002 control->sinfo_tsn = chk->rec.data.tsn; 1003 chk->rec.data.tsn = tmp; 1004 /* Fix the PPID included */ 1005 tmp = control->sinfo_ppid; 1006 control->sinfo_ppid = chk->rec.data.ppid; 1007 chk->rec.data.ppid = tmp; 1008 /* Fix tail pointer */ 1009 goto place_chunk; 1010 } 1011 control->first_frag_seen = 1; 1012 control->fsn_included = chk->rec.data.fsn; 1013 control->top_fsn = chk->rec.data.fsn; 1014 control->sinfo_tsn = chk->rec.data.tsn; 1015 control->sinfo_ppid = chk->rec.data.ppid; 1016 control->data = chk->data; 1017 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1018 chk->data = NULL; 1019 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1020 sctp_setup_tail_pointer(control); 1021 return; 1022 } 1023 place_chunk: 1024 inserted = 0; 1025 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1026 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1027 /* 1028 * This one in queue is bigger than the new one, 1029 * insert the new one before at. 1030 */ 1031 asoc->size_on_reasm_queue += chk->send_size; 1032 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1033 inserted = 1; 1034 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1035 break; 1036 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1037 /* 1038 * They sent a duplicate fsn number. This really 1039 * should not happen since the FSN is a TSN and it 1040 * should have been dropped earlier. 1041 */ 1042 sctp_abort_in_reasm(stcb, control, chk, 1043 abort_flag, 1044 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1045 return; 1046 } 1047 } 1048 if (inserted == 0) { 1049 /* Its at the end */ 1050 asoc->size_on_reasm_queue += chk->send_size; 1051 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1052 control->top_fsn = chk->rec.data.fsn; 1053 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1054 } 1055 } 1056 1057 static int 1058 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1059 struct sctp_stream_in *strm, int inp_read_lock_held) 1060 { 1061 /* 1062 * Given a stream, strm, see if any of the SSN's on it that are 1063 * fragmented are ready to deliver. If so go ahead and place them on 1064 * the read queue. In so placing if we have hit the end, then we 1065 * need to remove them from the stream's queue. 1066 */ 1067 struct sctp_queued_to_read *control, *nctl = NULL; 1068 uint32_t next_to_del; 1069 uint32_t pd_point; 1070 int ret = 0; 1071 1072 if (stcb->sctp_socket) { 1073 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1074 stcb->sctp_ep->partial_delivery_point); 1075 } else { 1076 pd_point = stcb->sctp_ep->partial_delivery_point; 1077 } 1078 control = TAILQ_FIRST(&strm->uno_inqueue); 1079 1080 if ((control != NULL) && 1081 (asoc->idata_supported == 0)) { 1082 /* Special handling needed for "old" data format */ 1083 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1084 goto done_un; 1085 } 1086 } 1087 if (strm->pd_api_started) { 1088 /* Can't add more */ 1089 return (0); 1090 } 1091 while (control) { 1092 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1093 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1094 nctl = TAILQ_NEXT(control, next_instrm); 1095 if (control->end_added) { 1096 /* We just put the last bit on */ 1097 if (control->on_strm_q) { 1098 #ifdef INVARIANTS 1099 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1100 panic("Huh control: %p on_q: %d -- not unordered?", 1101 control, control->on_strm_q); 1102 } 1103 #endif 1104 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1105 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1106 control->on_strm_q = 0; 1107 } 1108 if (control->on_read_q == 0) { 1109 sctp_add_to_readq(stcb->sctp_ep, stcb, 1110 control, 1111 &stcb->sctp_socket->so_rcv, control->end_added, 1112 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1113 } 1114 } else { 1115 /* Can we do a PD-API for this un-ordered guy? */ 1116 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1117 strm->pd_api_started = 1; 1118 control->pdapi_started = 1; 1119 sctp_add_to_readq(stcb->sctp_ep, stcb, 1120 control, 1121 &stcb->sctp_socket->so_rcv, control->end_added, 1122 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1123 1124 break; 1125 } 1126 } 1127 control = nctl; 1128 } 1129 done_un: 1130 control = TAILQ_FIRST(&strm->inqueue); 1131 if (strm->pd_api_started) { 1132 /* Can't add more */ 1133 return (0); 1134 } 1135 if (control == NULL) { 1136 return (ret); 1137 } 1138 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1139 /* 1140 * Ok the guy at the top was being partially delivered 1141 * completed, so we remove it. Note the pd_api flag was 1142 * taken off when the chunk was merged on in 1143 * sctp_queue_data_for_reasm below. 1144 */ 1145 nctl = TAILQ_NEXT(control, next_instrm); 1146 SCTPDBG(SCTP_DEBUG_XXX, 1147 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1148 control, control->end_added, control->mid, 1149 control->top_fsn, control->fsn_included, 1150 strm->last_mid_delivered); 1151 if (control->end_added) { 1152 if (control->on_strm_q) { 1153 #ifdef INVARIANTS 1154 if (control->on_strm_q != SCTP_ON_ORDERED) { 1155 panic("Huh control: %p on_q: %d -- not ordered?", 1156 control, control->on_strm_q); 1157 } 1158 #endif 1159 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1160 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1161 if (asoc->size_on_all_streams >= control->length) { 1162 asoc->size_on_all_streams -= control->length; 1163 } else { 1164 #ifdef INVARIANTS 1165 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1166 #else 1167 asoc->size_on_all_streams = 0; 1168 #endif 1169 } 1170 sctp_ucount_decr(asoc->cnt_on_all_streams); 1171 control->on_strm_q = 0; 1172 } 1173 if (strm->pd_api_started && control->pdapi_started) { 1174 control->pdapi_started = 0; 1175 strm->pd_api_started = 0; 1176 } 1177 if (control->on_read_q == 0) { 1178 sctp_add_to_readq(stcb->sctp_ep, stcb, 1179 control, 1180 &stcb->sctp_socket->so_rcv, control->end_added, 1181 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1182 } 1183 control = nctl; 1184 } 1185 } 1186 if (strm->pd_api_started) { 1187 /* 1188 * Can't add more must have gotten an un-ordered above being 1189 * partially delivered. 1190 */ 1191 return (0); 1192 } 1193 deliver_more: 1194 next_to_del = strm->last_mid_delivered + 1; 1195 if (control) { 1196 SCTPDBG(SCTP_DEBUG_XXX, 1197 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1198 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1199 next_to_del); 1200 nctl = TAILQ_NEXT(control, next_instrm); 1201 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1202 (control->first_frag_seen)) { 1203 int done; 1204 1205 /* Ok we can deliver it onto the stream. */ 1206 if (control->end_added) { 1207 /* We are done with it afterwards */ 1208 if (control->on_strm_q) { 1209 #ifdef INVARIANTS 1210 if (control->on_strm_q != SCTP_ON_ORDERED) { 1211 panic("Huh control: %p on_q: %d -- not ordered?", 1212 control, control->on_strm_q); 1213 } 1214 #endif 1215 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1216 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1217 if (asoc->size_on_all_streams >= control->length) { 1218 asoc->size_on_all_streams -= control->length; 1219 } else { 1220 #ifdef INVARIANTS 1221 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1222 #else 1223 asoc->size_on_all_streams = 0; 1224 #endif 1225 } 1226 sctp_ucount_decr(asoc->cnt_on_all_streams); 1227 control->on_strm_q = 0; 1228 } 1229 ret++; 1230 } 1231 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1232 /* 1233 * A singleton now slipping through - mark 1234 * it non-revokable too 1235 */ 1236 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1237 } else if (control->end_added == 0) { 1238 /* 1239 * Check if we can defer adding until its 1240 * all there 1241 */ 1242 if ((control->length < pd_point) || (strm->pd_api_started)) { 1243 /* 1244 * Don't need it or cannot add more 1245 * (one being delivered that way) 1246 */ 1247 goto out; 1248 } 1249 } 1250 done = (control->end_added) && (control->last_frag_seen); 1251 if (control->on_read_q == 0) { 1252 if (!done) { 1253 if (asoc->size_on_all_streams >= control->length) { 1254 asoc->size_on_all_streams -= control->length; 1255 } else { 1256 #ifdef INVARIANTS 1257 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1258 #else 1259 asoc->size_on_all_streams = 0; 1260 #endif 1261 } 1262 strm->pd_api_started = 1; 1263 control->pdapi_started = 1; 1264 } 1265 sctp_add_to_readq(stcb->sctp_ep, stcb, 1266 control, 1267 &stcb->sctp_socket->so_rcv, control->end_added, 1268 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1269 } 1270 strm->last_mid_delivered = next_to_del; 1271 if (done) { 1272 control = nctl; 1273 goto deliver_more; 1274 } 1275 } 1276 } 1277 out: 1278 return (ret); 1279 } 1280 1281 1282 uint32_t 1283 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1284 struct sctp_stream_in *strm, 1285 struct sctp_tcb *stcb, struct sctp_association *asoc, 1286 struct sctp_tmit_chunk *chk, int hold_rlock) 1287 { 1288 /* 1289 * Given a control and a chunk, merge the data from the chk onto the 1290 * control and free up the chunk resources. 1291 */ 1292 uint32_t added = 0; 1293 int i_locked = 0; 1294 1295 if (control->on_read_q && (hold_rlock == 0)) { 1296 /* 1297 * Its being pd-api'd so we must do some locks. 1298 */ 1299 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1300 i_locked = 1; 1301 } 1302 if (control->data == NULL) { 1303 control->data = chk->data; 1304 sctp_setup_tail_pointer(control); 1305 } else { 1306 sctp_add_to_tail_pointer(control, chk->data, &added); 1307 } 1308 control->fsn_included = chk->rec.data.fsn; 1309 asoc->size_on_reasm_queue -= chk->send_size; 1310 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1311 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1312 chk->data = NULL; 1313 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1314 control->first_frag_seen = 1; 1315 control->sinfo_tsn = chk->rec.data.tsn; 1316 control->sinfo_ppid = chk->rec.data.ppid; 1317 } 1318 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1319 /* Its complete */ 1320 if ((control->on_strm_q) && (control->on_read_q)) { 1321 if (control->pdapi_started) { 1322 control->pdapi_started = 0; 1323 strm->pd_api_started = 0; 1324 } 1325 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1326 /* Unordered */ 1327 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1328 control->on_strm_q = 0; 1329 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1330 /* Ordered */ 1331 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1332 /* 1333 * Don't need to decrement 1334 * size_on_all_streams, since control is on 1335 * the read queue. 1336 */ 1337 sctp_ucount_decr(asoc->cnt_on_all_streams); 1338 control->on_strm_q = 0; 1339 #ifdef INVARIANTS 1340 } else if (control->on_strm_q) { 1341 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1342 control->on_strm_q); 1343 #endif 1344 } 1345 } 1346 control->end_added = 1; 1347 control->last_frag_seen = 1; 1348 } 1349 if (i_locked) { 1350 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1351 } 1352 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1353 return (added); 1354 } 1355 1356 /* 1357 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1358 * queue, see if anthing can be delivered. If so pull it off (or as much as 1359 * we can. If we run out of space then we must dump what we can and set the 1360 * appropriate flag to say we queued what we could. 1361 */ 1362 static void 1363 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1364 struct sctp_queued_to_read *control, 1365 struct sctp_tmit_chunk *chk, 1366 int created_control, 1367 int *abort_flag, uint32_t tsn) 1368 { 1369 uint32_t next_fsn; 1370 struct sctp_tmit_chunk *at, *nat; 1371 struct sctp_stream_in *strm; 1372 int do_wakeup, unordered; 1373 uint32_t lenadded; 1374 1375 strm = &asoc->strmin[control->sinfo_stream]; 1376 /* 1377 * For old un-ordered data chunks. 1378 */ 1379 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1380 unordered = 1; 1381 } else { 1382 unordered = 0; 1383 } 1384 /* Must be added to the stream-in queue */ 1385 if (created_control) { 1386 if (unordered == 0) { 1387 sctp_ucount_incr(asoc->cnt_on_all_streams); 1388 } 1389 if (sctp_place_control_in_stream(strm, asoc, control)) { 1390 /* Duplicate SSN? */ 1391 sctp_abort_in_reasm(stcb, control, chk, 1392 abort_flag, 1393 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1394 sctp_clean_up_control(stcb, control); 1395 return; 1396 } 1397 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1398 /* 1399 * Ok we created this control and now lets validate 1400 * that its legal i.e. there is a B bit set, if not 1401 * and we have up to the cum-ack then its invalid. 1402 */ 1403 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1404 sctp_abort_in_reasm(stcb, control, chk, 1405 abort_flag, 1406 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1407 return; 1408 } 1409 } 1410 } 1411 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1412 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1413 return; 1414 } 1415 /* 1416 * Ok we must queue the chunk into the reasembly portion: o if its 1417 * the first it goes to the control mbuf. o if its not first but the 1418 * next in sequence it goes to the control, and each succeeding one 1419 * in order also goes. o if its not in order we place it on the list 1420 * in its place. 1421 */ 1422 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1423 /* Its the very first one. */ 1424 SCTPDBG(SCTP_DEBUG_XXX, 1425 "chunk is a first fsn: %u becomes fsn_included\n", 1426 chk->rec.data.fsn); 1427 if (control->first_frag_seen) { 1428 /* 1429 * Error on senders part, they either sent us two 1430 * data chunks with FIRST, or they sent two 1431 * un-ordered chunks that were fragmented at the 1432 * same time in the same stream. 1433 */ 1434 sctp_abort_in_reasm(stcb, control, chk, 1435 abort_flag, 1436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1437 return; 1438 } 1439 control->first_frag_seen = 1; 1440 control->sinfo_ppid = chk->rec.data.ppid; 1441 control->sinfo_tsn = chk->rec.data.tsn; 1442 control->fsn_included = chk->rec.data.fsn; 1443 control->data = chk->data; 1444 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1445 chk->data = NULL; 1446 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1447 sctp_setup_tail_pointer(control); 1448 asoc->size_on_all_streams += control->length; 1449 } else { 1450 /* Place the chunk in our list */ 1451 int inserted = 0; 1452 1453 if (control->last_frag_seen == 0) { 1454 /* Still willing to raise highest FSN seen */ 1455 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1456 SCTPDBG(SCTP_DEBUG_XXX, 1457 "We have a new top_fsn: %u\n", 1458 chk->rec.data.fsn); 1459 control->top_fsn = chk->rec.data.fsn; 1460 } 1461 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1462 SCTPDBG(SCTP_DEBUG_XXX, 1463 "The last fsn is now in place fsn: %u\n", 1464 chk->rec.data.fsn); 1465 control->last_frag_seen = 1; 1466 } 1467 if (asoc->idata_supported || control->first_frag_seen) { 1468 /* 1469 * For IDATA we always check since we know 1470 * that the first fragment is 0. For old 1471 * DATA we have to receive the first before 1472 * we know the first FSN (which is the TSN). 1473 */ 1474 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1475 /* 1476 * We have already delivered up to 1477 * this so its a dup 1478 */ 1479 sctp_abort_in_reasm(stcb, control, chk, 1480 abort_flag, 1481 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1482 return; 1483 } 1484 } 1485 } else { 1486 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1487 /* Second last? huh? */ 1488 SCTPDBG(SCTP_DEBUG_XXX, 1489 "Duplicate last fsn: %u (top: %u) -- abort\n", 1490 chk->rec.data.fsn, control->top_fsn); 1491 sctp_abort_in_reasm(stcb, control, 1492 chk, abort_flag, 1493 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1494 return; 1495 } 1496 if (asoc->idata_supported || control->first_frag_seen) { 1497 /* 1498 * For IDATA we always check since we know 1499 * that the first fragment is 0. For old 1500 * DATA we have to receive the first before 1501 * we know the first FSN (which is the TSN). 1502 */ 1503 1504 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1505 /* 1506 * We have already delivered up to 1507 * this so its a dup 1508 */ 1509 SCTPDBG(SCTP_DEBUG_XXX, 1510 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1511 chk->rec.data.fsn, control->fsn_included); 1512 sctp_abort_in_reasm(stcb, control, chk, 1513 abort_flag, 1514 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1515 return; 1516 } 1517 } 1518 /* 1519 * validate not beyond top FSN if we have seen last 1520 * one 1521 */ 1522 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1523 SCTPDBG(SCTP_DEBUG_XXX, 1524 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1525 chk->rec.data.fsn, 1526 control->top_fsn); 1527 sctp_abort_in_reasm(stcb, control, chk, 1528 abort_flag, 1529 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1530 return; 1531 } 1532 } 1533 /* 1534 * If we reach here, we need to place the new chunk in the 1535 * reassembly for this control. 1536 */ 1537 SCTPDBG(SCTP_DEBUG_XXX, 1538 "chunk is a not first fsn: %u needs to be inserted\n", 1539 chk->rec.data.fsn); 1540 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1541 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1542 /* 1543 * This one in queue is bigger than the new 1544 * one, insert the new one before at. 1545 */ 1546 SCTPDBG(SCTP_DEBUG_XXX, 1547 "Insert it before fsn: %u\n", 1548 at->rec.data.fsn); 1549 asoc->size_on_reasm_queue += chk->send_size; 1550 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1551 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1552 inserted = 1; 1553 break; 1554 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1555 /* 1556 * Gak, He sent me a duplicate str seq 1557 * number 1558 */ 1559 /* 1560 * foo bar, I guess I will just free this 1561 * new guy, should we abort too? FIX ME 1562 * MAYBE? Or it COULD be that the SSN's have 1563 * wrapped. Maybe I should compare to TSN 1564 * somehow... sigh for now just blow away 1565 * the chunk! 1566 */ 1567 SCTPDBG(SCTP_DEBUG_XXX, 1568 "Duplicate to fsn: %u -- abort\n", 1569 at->rec.data.fsn); 1570 sctp_abort_in_reasm(stcb, control, 1571 chk, abort_flag, 1572 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1573 return; 1574 } 1575 } 1576 if (inserted == 0) { 1577 /* Goes on the end */ 1578 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1579 chk->rec.data.fsn); 1580 asoc->size_on_reasm_queue += chk->send_size; 1581 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1582 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1583 } 1584 } 1585 /* 1586 * Ok lets see if we can suck any up into the control structure that 1587 * are in seq if it makes sense. 1588 */ 1589 do_wakeup = 0; 1590 /* 1591 * If the first fragment has not been seen there is no sense in 1592 * looking. 1593 */ 1594 if (control->first_frag_seen) { 1595 next_fsn = control->fsn_included + 1; 1596 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1597 if (at->rec.data.fsn == next_fsn) { 1598 /* We can add this one now to the control */ 1599 SCTPDBG(SCTP_DEBUG_XXX, 1600 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1601 control, at, 1602 at->rec.data.fsn, 1603 next_fsn, control->fsn_included); 1604 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1605 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1606 if (control->on_read_q) { 1607 do_wakeup = 1; 1608 } else { 1609 /* 1610 * We only add to the 1611 * size-on-all-streams if its not on 1612 * the read q. The read q flag will 1613 * cause a sballoc so its accounted 1614 * for there. 1615 */ 1616 asoc->size_on_all_streams += lenadded; 1617 } 1618 next_fsn++; 1619 if (control->end_added && control->pdapi_started) { 1620 if (strm->pd_api_started) { 1621 strm->pd_api_started = 0; 1622 control->pdapi_started = 0; 1623 } 1624 if (control->on_read_q == 0) { 1625 sctp_add_to_readq(stcb->sctp_ep, stcb, 1626 control, 1627 &stcb->sctp_socket->so_rcv, control->end_added, 1628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1629 } 1630 break; 1631 } 1632 } else { 1633 break; 1634 } 1635 } 1636 } 1637 if (do_wakeup) { 1638 /* Need to wakeup the reader */ 1639 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1640 } 1641 } 1642 1643 static struct sctp_queued_to_read * 1644 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1645 { 1646 struct sctp_queued_to_read *control; 1647 1648 if (ordered) { 1649 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1650 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1651 break; 1652 } 1653 } 1654 } else { 1655 if (idata_supported) { 1656 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1657 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1658 break; 1659 } 1660 } 1661 } else { 1662 control = TAILQ_FIRST(&strm->uno_inqueue); 1663 } 1664 } 1665 return (control); 1666 } 1667 1668 static int 1669 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1670 struct mbuf **m, int offset, int chk_length, 1671 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1672 int *break_flag, int last_chunk, uint8_t chk_type) 1673 { 1674 /* Process a data chunk */ 1675 /* struct sctp_tmit_chunk *chk; */ 1676 struct sctp_tmit_chunk *chk; 1677 uint32_t tsn, fsn, gap, mid; 1678 struct mbuf *dmbuf; 1679 int the_len; 1680 int need_reasm_check = 0; 1681 uint16_t sid; 1682 struct mbuf *op_err; 1683 char msg[SCTP_DIAG_INFO_LEN]; 1684 struct sctp_queued_to_read *control, *ncontrol; 1685 uint32_t ppid; 1686 uint8_t chk_flags; 1687 struct sctp_stream_reset_list *liste; 1688 int ordered; 1689 size_t clen; 1690 int created_control = 0; 1691 1692 if (chk_type == SCTP_IDATA) { 1693 struct sctp_idata_chunk *chunk, chunk_buf; 1694 1695 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1696 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1697 chk_flags = chunk->ch.chunk_flags; 1698 clen = sizeof(struct sctp_idata_chunk); 1699 tsn = ntohl(chunk->dp.tsn); 1700 sid = ntohs(chunk->dp.sid); 1701 mid = ntohl(chunk->dp.mid); 1702 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1703 fsn = 0; 1704 ppid = chunk->dp.ppid_fsn.ppid; 1705 } else { 1706 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1707 ppid = 0xffffffff; /* Use as an invalid value. */ 1708 } 1709 } else { 1710 struct sctp_data_chunk *chunk, chunk_buf; 1711 1712 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1713 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1714 chk_flags = chunk->ch.chunk_flags; 1715 clen = sizeof(struct sctp_data_chunk); 1716 tsn = ntohl(chunk->dp.tsn); 1717 sid = ntohs(chunk->dp.sid); 1718 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1719 fsn = tsn; 1720 ppid = chunk->dp.ppid; 1721 } 1722 if ((size_t)chk_length == clen) { 1723 /* 1724 * Need to send an abort since we had a empty data chunk. 1725 */ 1726 op_err = sctp_generate_no_user_data_cause(tsn); 1727 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1728 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1729 *abort_flag = 1; 1730 return (0); 1731 } 1732 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1733 asoc->send_sack = 1; 1734 } 1735 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1737 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1738 } 1739 if (stcb == NULL) { 1740 return (0); 1741 } 1742 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1743 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1744 /* It is a duplicate */ 1745 SCTP_STAT_INCR(sctps_recvdupdata); 1746 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1747 /* Record a dup for the next outbound sack */ 1748 asoc->dup_tsns[asoc->numduptsns] = tsn; 1749 asoc->numduptsns++; 1750 } 1751 asoc->send_sack = 1; 1752 return (0); 1753 } 1754 /* Calculate the number of TSN's between the base and this TSN */ 1755 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1756 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1757 /* Can't hold the bit in the mapping at max array, toss it */ 1758 return (0); 1759 } 1760 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1761 SCTP_TCB_LOCK_ASSERT(stcb); 1762 if (sctp_expand_mapping_array(asoc, gap)) { 1763 /* Can't expand, drop it */ 1764 return (0); 1765 } 1766 } 1767 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1768 *high_tsn = tsn; 1769 } 1770 /* See if we have received this one already */ 1771 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1772 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1773 SCTP_STAT_INCR(sctps_recvdupdata); 1774 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1775 /* Record a dup for the next outbound sack */ 1776 asoc->dup_tsns[asoc->numduptsns] = tsn; 1777 asoc->numduptsns++; 1778 } 1779 asoc->send_sack = 1; 1780 return (0); 1781 } 1782 /* 1783 * Check to see about the GONE flag, duplicates would cause a sack 1784 * to be sent up above 1785 */ 1786 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1787 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1788 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1789 /* 1790 * wait a minute, this guy is gone, there is no longer a 1791 * receiver. Send peer an ABORT! 1792 */ 1793 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1794 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1795 *abort_flag = 1; 1796 return (0); 1797 } 1798 /* 1799 * Now before going further we see if there is room. If NOT then we 1800 * MAY let one through only IF this TSN is the one we are waiting 1801 * for on a partial delivery API. 1802 */ 1803 1804 /* Is the stream valid? */ 1805 if (sid >= asoc->streamincnt) { 1806 struct sctp_error_invalid_stream *cause; 1807 1808 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1809 0, M_NOWAIT, 1, MT_DATA); 1810 if (op_err != NULL) { 1811 /* add some space up front so prepend will work well */ 1812 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1813 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1814 /* 1815 * Error causes are just param's and this one has 1816 * two back to back phdr, one with the error type 1817 * and size, the other with the streamid and a rsvd 1818 */ 1819 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1820 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1821 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1822 cause->stream_id = htons(sid); 1823 cause->reserved = htons(0); 1824 sctp_queue_op_err(stcb, op_err); 1825 } 1826 SCTP_STAT_INCR(sctps_badsid); 1827 SCTP_TCB_LOCK_ASSERT(stcb); 1828 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1829 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1830 asoc->highest_tsn_inside_nr_map = tsn; 1831 } 1832 if (tsn == (asoc->cumulative_tsn + 1)) { 1833 /* Update cum-ack */ 1834 asoc->cumulative_tsn = tsn; 1835 } 1836 return (0); 1837 } 1838 /* 1839 * If its a fragmented message, lets see if we can find the control 1840 * on the reassembly queues. 1841 */ 1842 if ((chk_type == SCTP_IDATA) && 1843 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1844 (fsn == 0)) { 1845 /* 1846 * The first *must* be fsn 0, and other (middle/end) pieces 1847 * can *not* be fsn 0. XXX: This can happen in case of a 1848 * wrap around. Ignore is for now. 1849 */ 1850 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", 1851 mid, chk_flags); 1852 goto err_out; 1853 } 1854 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1855 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1856 chk_flags, control); 1857 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1858 /* See if we can find the re-assembly entity */ 1859 if (control != NULL) { 1860 /* We found something, does it belong? */ 1861 if (ordered && (mid != control->mid)) { 1862 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1863 err_out: 1864 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1865 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1866 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1867 *abort_flag = 1; 1868 return (0); 1869 } 1870 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1871 /* 1872 * We can't have a switched order with an 1873 * unordered chunk 1874 */ 1875 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1876 tsn); 1877 goto err_out; 1878 } 1879 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1880 /* 1881 * We can't have a switched unordered with a 1882 * ordered chunk 1883 */ 1884 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1885 tsn); 1886 goto err_out; 1887 } 1888 } 1889 } else { 1890 /* 1891 * Its a complete segment. Lets validate we don't have a 1892 * re-assembly going on with the same Stream/Seq (for 1893 * ordered) or in the same Stream for unordered. 1894 */ 1895 if (control != NULL) { 1896 if (ordered || asoc->idata_supported) { 1897 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1898 chk_flags, mid); 1899 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1900 goto err_out; 1901 } else { 1902 if ((tsn == control->fsn_included + 1) && 1903 (control->end_added == 0)) { 1904 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included); 1905 goto err_out; 1906 } else { 1907 control = NULL; 1908 } 1909 } 1910 } 1911 } 1912 /* now do the tests */ 1913 if (((asoc->cnt_on_all_streams + 1914 asoc->cnt_on_reasm_queue + 1915 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1916 (((int)asoc->my_rwnd) <= 0)) { 1917 /* 1918 * When we have NO room in the rwnd we check to make sure 1919 * the reader is doing its job... 1920 */ 1921 if (stcb->sctp_socket->so_rcv.sb_cc) { 1922 /* some to read, wake-up */ 1923 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1924 struct socket *so; 1925 1926 so = SCTP_INP_SO(stcb->sctp_ep); 1927 atomic_add_int(&stcb->asoc.refcnt, 1); 1928 SCTP_TCB_UNLOCK(stcb); 1929 SCTP_SOCKET_LOCK(so, 1); 1930 SCTP_TCB_LOCK(stcb); 1931 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1932 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1933 /* assoc was freed while we were unlocked */ 1934 SCTP_SOCKET_UNLOCK(so, 1); 1935 return (0); 1936 } 1937 #endif 1938 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1939 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1940 SCTP_SOCKET_UNLOCK(so, 1); 1941 #endif 1942 } 1943 /* now is it in the mapping array of what we have accepted? */ 1944 if (chk_type == SCTP_DATA) { 1945 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1946 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1947 /* Nope not in the valid range dump it */ 1948 dump_packet: 1949 sctp_set_rwnd(stcb, asoc); 1950 if ((asoc->cnt_on_all_streams + 1951 asoc->cnt_on_reasm_queue + 1952 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1953 SCTP_STAT_INCR(sctps_datadropchklmt); 1954 } else { 1955 SCTP_STAT_INCR(sctps_datadroprwnd); 1956 } 1957 *break_flag = 1; 1958 return (0); 1959 } 1960 } else { 1961 if (control == NULL) { 1962 goto dump_packet; 1963 } 1964 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1965 goto dump_packet; 1966 } 1967 } 1968 } 1969 #ifdef SCTP_ASOCLOG_OF_TSNS 1970 SCTP_TCB_LOCK_ASSERT(stcb); 1971 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1972 asoc->tsn_in_at = 0; 1973 asoc->tsn_in_wrapped = 1; 1974 } 1975 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1976 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 1977 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 1978 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1979 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1980 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1981 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1982 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1983 asoc->tsn_in_at++; 1984 #endif 1985 /* 1986 * Before we continue lets validate that we are not being fooled by 1987 * an evil attacker. We can only have Nk chunks based on our TSN 1988 * spread allowed by the mapping array N * 8 bits, so there is no 1989 * way our stream sequence numbers could have wrapped. We of course 1990 * only validate the FIRST fragment so the bit must be set. 1991 */ 1992 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 1993 (TAILQ_EMPTY(&asoc->resetHead)) && 1994 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 1995 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 1996 /* The incoming sseq is behind where we last delivered? */ 1997 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 1998 mid, asoc->strmin[sid].last_mid_delivered); 1999 2000 if (asoc->idata_supported) { 2001 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2002 asoc->strmin[sid].last_mid_delivered, 2003 tsn, 2004 sid, 2005 mid); 2006 } else { 2007 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2008 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2009 tsn, 2010 sid, 2011 (uint16_t)mid); 2012 } 2013 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2014 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 2015 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 2016 *abort_flag = 1; 2017 return (0); 2018 } 2019 if (chk_type == SCTP_IDATA) { 2020 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2021 } else { 2022 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2023 } 2024 if (last_chunk == 0) { 2025 if (chk_type == SCTP_IDATA) { 2026 dmbuf = SCTP_M_COPYM(*m, 2027 (offset + sizeof(struct sctp_idata_chunk)), 2028 the_len, M_NOWAIT); 2029 } else { 2030 dmbuf = SCTP_M_COPYM(*m, 2031 (offset + sizeof(struct sctp_data_chunk)), 2032 the_len, M_NOWAIT); 2033 } 2034 #ifdef SCTP_MBUF_LOGGING 2035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2036 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2037 } 2038 #endif 2039 } else { 2040 /* We can steal the last chunk */ 2041 int l_len; 2042 2043 dmbuf = *m; 2044 /* lop off the top part */ 2045 if (chk_type == SCTP_IDATA) { 2046 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2047 } else { 2048 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2049 } 2050 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2051 l_len = SCTP_BUF_LEN(dmbuf); 2052 } else { 2053 /* 2054 * need to count up the size hopefully does not hit 2055 * this to often :-0 2056 */ 2057 struct mbuf *lat; 2058 2059 l_len = 0; 2060 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2061 l_len += SCTP_BUF_LEN(lat); 2062 } 2063 } 2064 if (l_len > the_len) { 2065 /* Trim the end round bytes off too */ 2066 m_adj(dmbuf, -(l_len - the_len)); 2067 } 2068 } 2069 if (dmbuf == NULL) { 2070 SCTP_STAT_INCR(sctps_nomem); 2071 return (0); 2072 } 2073 /* 2074 * Now no matter what, we need a control, get one if we don't have 2075 * one (we may have gotten it above when we found the message was 2076 * fragmented 2077 */ 2078 if (control == NULL) { 2079 sctp_alloc_a_readq(stcb, control); 2080 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2081 ppid, 2082 sid, 2083 chk_flags, 2084 NULL, fsn, mid); 2085 if (control == NULL) { 2086 SCTP_STAT_INCR(sctps_nomem); 2087 return (0); 2088 } 2089 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2090 struct mbuf *mm; 2091 2092 control->data = dmbuf; 2093 for (mm = control->data; mm; mm = mm->m_next) { 2094 control->length += SCTP_BUF_LEN(mm); 2095 } 2096 control->tail_mbuf = NULL; 2097 control->end_added = 1; 2098 control->last_frag_seen = 1; 2099 control->first_frag_seen = 1; 2100 control->fsn_included = fsn; 2101 control->top_fsn = fsn; 2102 } 2103 created_control = 1; 2104 } 2105 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2106 chk_flags, ordered, mid, control); 2107 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2108 TAILQ_EMPTY(&asoc->resetHead) && 2109 ((ordered == 0) || 2110 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2111 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2112 /* Candidate for express delivery */ 2113 /* 2114 * Its not fragmented, No PD-API is up, Nothing in the 2115 * delivery queue, Its un-ordered OR ordered and the next to 2116 * deliver AND nothing else is stuck on the stream queue, 2117 * And there is room for it in the socket buffer. Lets just 2118 * stuff it up the buffer.... 2119 */ 2120 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2121 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2122 asoc->highest_tsn_inside_nr_map = tsn; 2123 } 2124 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2125 control, mid); 2126 2127 sctp_add_to_readq(stcb->sctp_ep, stcb, 2128 control, &stcb->sctp_socket->so_rcv, 2129 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2130 2131 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2132 /* for ordered, bump what we delivered */ 2133 asoc->strmin[sid].last_mid_delivered++; 2134 } 2135 SCTP_STAT_INCR(sctps_recvexpress); 2136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2137 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2138 SCTP_STR_LOG_FROM_EXPRS_DEL); 2139 } 2140 control = NULL; 2141 goto finish_express_del; 2142 } 2143 /* Now will we need a chunk too? */ 2144 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2145 sctp_alloc_a_chunk(stcb, chk); 2146 if (chk == NULL) { 2147 /* No memory so we drop the chunk */ 2148 SCTP_STAT_INCR(sctps_nomem); 2149 if (last_chunk == 0) { 2150 /* we copied it, free the copy */ 2151 sctp_m_freem(dmbuf); 2152 } 2153 return (0); 2154 } 2155 chk->rec.data.tsn = tsn; 2156 chk->no_fr_allowed = 0; 2157 chk->rec.data.fsn = fsn; 2158 chk->rec.data.mid = mid; 2159 chk->rec.data.sid = sid; 2160 chk->rec.data.ppid = ppid; 2161 chk->rec.data.context = stcb->asoc.context; 2162 chk->rec.data.doing_fast_retransmit = 0; 2163 chk->rec.data.rcv_flags = chk_flags; 2164 chk->asoc = asoc; 2165 chk->send_size = the_len; 2166 chk->whoTo = net; 2167 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2168 chk, 2169 control, mid); 2170 atomic_add_int(&net->ref_count, 1); 2171 chk->data = dmbuf; 2172 } 2173 /* Set the appropriate TSN mark */ 2174 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2175 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2176 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2177 asoc->highest_tsn_inside_nr_map = tsn; 2178 } 2179 } else { 2180 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2181 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2182 asoc->highest_tsn_inside_map = tsn; 2183 } 2184 } 2185 /* Now is it complete (i.e. not fragmented)? */ 2186 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2187 /* 2188 * Special check for when streams are resetting. We could be 2189 * more smart about this and check the actual stream to see 2190 * if it is not being reset.. that way we would not create a 2191 * HOLB when amongst streams being reset and those not being 2192 * reset. 2193 * 2194 */ 2195 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2196 SCTP_TSN_GT(tsn, liste->tsn)) { 2197 /* 2198 * yep its past where we need to reset... go ahead 2199 * and queue it. 2200 */ 2201 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2202 /* first one on */ 2203 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2204 } else { 2205 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2206 unsigned char inserted = 0; 2207 2208 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2209 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2210 2211 continue; 2212 } else { 2213 /* found it */ 2214 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2215 inserted = 1; 2216 break; 2217 } 2218 } 2219 if (inserted == 0) { 2220 /* 2221 * must be put at end, use prevP 2222 * (all setup from loop) to setup 2223 * nextP. 2224 */ 2225 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2226 } 2227 } 2228 goto finish_express_del; 2229 } 2230 if (chk_flags & SCTP_DATA_UNORDERED) { 2231 /* queue directly into socket buffer */ 2232 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2233 control, mid); 2234 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2235 sctp_add_to_readq(stcb->sctp_ep, stcb, 2236 control, 2237 &stcb->sctp_socket->so_rcv, 1, 2238 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2239 2240 } else { 2241 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2242 mid); 2243 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2244 if (*abort_flag) { 2245 if (last_chunk) { 2246 *m = NULL; 2247 } 2248 return (0); 2249 } 2250 } 2251 goto finish_express_del; 2252 } 2253 /* If we reach here its a reassembly */ 2254 need_reasm_check = 1; 2255 SCTPDBG(SCTP_DEBUG_XXX, 2256 "Queue data to stream for reasm control: %p MID: %u\n", 2257 control, mid); 2258 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2259 if (*abort_flag) { 2260 /* 2261 * the assoc is now gone and chk was put onto the reasm 2262 * queue, which has all been freed. 2263 */ 2264 if (last_chunk) { 2265 *m = NULL; 2266 } 2267 return (0); 2268 } 2269 finish_express_del: 2270 /* Here we tidy up things */ 2271 if (tsn == (asoc->cumulative_tsn + 1)) { 2272 /* Update cum-ack */ 2273 asoc->cumulative_tsn = tsn; 2274 } 2275 if (last_chunk) { 2276 *m = NULL; 2277 } 2278 if (ordered) { 2279 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2280 } else { 2281 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2282 } 2283 SCTP_STAT_INCR(sctps_recvdata); 2284 /* Set it present please */ 2285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2286 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2287 } 2288 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2289 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2290 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2291 } 2292 if (need_reasm_check) { 2293 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2294 need_reasm_check = 0; 2295 } 2296 /* check the special flag for stream resets */ 2297 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2298 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2299 /* 2300 * we have finished working through the backlogged TSN's now 2301 * time to reset streams. 1: call reset function. 2: free 2302 * pending_reply space 3: distribute any chunks in 2303 * pending_reply_queue. 2304 */ 2305 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2306 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2307 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2308 SCTP_FREE(liste, SCTP_M_STRESET); 2309 /* sa_ignore FREED_MEMORY */ 2310 liste = TAILQ_FIRST(&asoc->resetHead); 2311 if (TAILQ_EMPTY(&asoc->resetHead)) { 2312 /* All can be removed */ 2313 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2314 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2315 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2316 if (*abort_flag) { 2317 return (0); 2318 } 2319 if (need_reasm_check) { 2320 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2321 need_reasm_check = 0; 2322 } 2323 } 2324 } else { 2325 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2326 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2327 break; 2328 } 2329 /* 2330 * if control->sinfo_tsn is <= liste->tsn we 2331 * can process it which is the NOT of 2332 * control->sinfo_tsn > liste->tsn 2333 */ 2334 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2335 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2336 if (*abort_flag) { 2337 return (0); 2338 } 2339 if (need_reasm_check) { 2340 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2341 need_reasm_check = 0; 2342 } 2343 } 2344 } 2345 } 2346 return (1); 2347 } 2348 2349 static const int8_t sctp_map_lookup_tab[256] = { 2350 0, 1, 0, 2, 0, 1, 0, 3, 2351 0, 1, 0, 2, 0, 1, 0, 4, 2352 0, 1, 0, 2, 0, 1, 0, 3, 2353 0, 1, 0, 2, 0, 1, 0, 5, 2354 0, 1, 0, 2, 0, 1, 0, 3, 2355 0, 1, 0, 2, 0, 1, 0, 4, 2356 0, 1, 0, 2, 0, 1, 0, 3, 2357 0, 1, 0, 2, 0, 1, 0, 6, 2358 0, 1, 0, 2, 0, 1, 0, 3, 2359 0, 1, 0, 2, 0, 1, 0, 4, 2360 0, 1, 0, 2, 0, 1, 0, 3, 2361 0, 1, 0, 2, 0, 1, 0, 5, 2362 0, 1, 0, 2, 0, 1, 0, 3, 2363 0, 1, 0, 2, 0, 1, 0, 4, 2364 0, 1, 0, 2, 0, 1, 0, 3, 2365 0, 1, 0, 2, 0, 1, 0, 7, 2366 0, 1, 0, 2, 0, 1, 0, 3, 2367 0, 1, 0, 2, 0, 1, 0, 4, 2368 0, 1, 0, 2, 0, 1, 0, 3, 2369 0, 1, 0, 2, 0, 1, 0, 5, 2370 0, 1, 0, 2, 0, 1, 0, 3, 2371 0, 1, 0, 2, 0, 1, 0, 4, 2372 0, 1, 0, 2, 0, 1, 0, 3, 2373 0, 1, 0, 2, 0, 1, 0, 6, 2374 0, 1, 0, 2, 0, 1, 0, 3, 2375 0, 1, 0, 2, 0, 1, 0, 4, 2376 0, 1, 0, 2, 0, 1, 0, 3, 2377 0, 1, 0, 2, 0, 1, 0, 5, 2378 0, 1, 0, 2, 0, 1, 0, 3, 2379 0, 1, 0, 2, 0, 1, 0, 4, 2380 0, 1, 0, 2, 0, 1, 0, 3, 2381 0, 1, 0, 2, 0, 1, 0, 8 2382 }; 2383 2384 2385 void 2386 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2387 { 2388 /* 2389 * Now we also need to check the mapping array in a couple of ways. 2390 * 1) Did we move the cum-ack point? 2391 * 2392 * When you first glance at this you might think that all entries 2393 * that make up the position of the cum-ack would be in the 2394 * nr-mapping array only.. i.e. things up to the cum-ack are always 2395 * deliverable. Thats true with one exception, when its a fragmented 2396 * message we may not deliver the data until some threshold (or all 2397 * of it) is in place. So we must OR the nr_mapping_array and 2398 * mapping_array to get a true picture of the cum-ack. 2399 */ 2400 struct sctp_association *asoc; 2401 int at; 2402 uint8_t val; 2403 int slide_from, slide_end, lgap, distance; 2404 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2405 2406 asoc = &stcb->asoc; 2407 2408 old_cumack = asoc->cumulative_tsn; 2409 old_base = asoc->mapping_array_base_tsn; 2410 old_highest = asoc->highest_tsn_inside_map; 2411 /* 2412 * We could probably improve this a small bit by calculating the 2413 * offset of the current cum-ack as the starting point. 2414 */ 2415 at = 0; 2416 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2417 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2418 if (val == 0xff) { 2419 at += 8; 2420 } else { 2421 /* there is a 0 bit */ 2422 at += sctp_map_lookup_tab[val]; 2423 break; 2424 } 2425 } 2426 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2427 2428 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2429 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2430 #ifdef INVARIANTS 2431 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2432 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2433 #else 2434 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2435 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2436 sctp_print_mapping_array(asoc); 2437 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2438 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2439 } 2440 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2441 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2442 #endif 2443 } 2444 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2445 highest_tsn = asoc->highest_tsn_inside_nr_map; 2446 } else { 2447 highest_tsn = asoc->highest_tsn_inside_map; 2448 } 2449 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2450 /* The complete array was completed by a single FR */ 2451 /* highest becomes the cum-ack */ 2452 int clr; 2453 #ifdef INVARIANTS 2454 unsigned int i; 2455 #endif 2456 2457 /* clear the array */ 2458 clr = ((at + 7) >> 3); 2459 if (clr > asoc->mapping_array_size) { 2460 clr = asoc->mapping_array_size; 2461 } 2462 memset(asoc->mapping_array, 0, clr); 2463 memset(asoc->nr_mapping_array, 0, clr); 2464 #ifdef INVARIANTS 2465 for (i = 0; i < asoc->mapping_array_size; i++) { 2466 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2467 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2468 sctp_print_mapping_array(asoc); 2469 } 2470 } 2471 #endif 2472 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2473 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2474 } else if (at >= 8) { 2475 /* we can slide the mapping array down */ 2476 /* slide_from holds where we hit the first NON 0xff byte */ 2477 2478 /* 2479 * now calculate the ceiling of the move using our highest 2480 * TSN value 2481 */ 2482 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2483 slide_end = (lgap >> 3); 2484 if (slide_end < slide_from) { 2485 sctp_print_mapping_array(asoc); 2486 #ifdef INVARIANTS 2487 panic("impossible slide"); 2488 #else 2489 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2490 lgap, slide_end, slide_from, at); 2491 return; 2492 #endif 2493 } 2494 if (slide_end > asoc->mapping_array_size) { 2495 #ifdef INVARIANTS 2496 panic("would overrun buffer"); 2497 #else 2498 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2499 asoc->mapping_array_size, slide_end); 2500 slide_end = asoc->mapping_array_size; 2501 #endif 2502 } 2503 distance = (slide_end - slide_from) + 1; 2504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2505 sctp_log_map(old_base, old_cumack, old_highest, 2506 SCTP_MAP_PREPARE_SLIDE); 2507 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2508 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2509 } 2510 if (distance + slide_from > asoc->mapping_array_size || 2511 distance < 0) { 2512 /* 2513 * Here we do NOT slide forward the array so that 2514 * hopefully when more data comes in to fill it up 2515 * we will be able to slide it forward. Really I 2516 * don't think this should happen :-0 2517 */ 2518 2519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2520 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2521 (uint32_t)asoc->mapping_array_size, 2522 SCTP_MAP_SLIDE_NONE); 2523 } 2524 } else { 2525 int ii; 2526 2527 for (ii = 0; ii < distance; ii++) { 2528 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2529 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2530 2531 } 2532 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2533 asoc->mapping_array[ii] = 0; 2534 asoc->nr_mapping_array[ii] = 0; 2535 } 2536 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2537 asoc->highest_tsn_inside_map += (slide_from << 3); 2538 } 2539 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2540 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2541 } 2542 asoc->mapping_array_base_tsn += (slide_from << 3); 2543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2544 sctp_log_map(asoc->mapping_array_base_tsn, 2545 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2546 SCTP_MAP_SLIDE_RESULT); 2547 } 2548 } 2549 } 2550 } 2551 2552 void 2553 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2554 { 2555 struct sctp_association *asoc; 2556 uint32_t highest_tsn; 2557 int is_a_gap; 2558 2559 sctp_slide_mapping_arrays(stcb); 2560 asoc = &stcb->asoc; 2561 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2562 highest_tsn = asoc->highest_tsn_inside_nr_map; 2563 } else { 2564 highest_tsn = asoc->highest_tsn_inside_map; 2565 } 2566 /* Is there a gap now? */ 2567 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2568 2569 /* 2570 * Now we need to see if we need to queue a sack or just start the 2571 * timer (if allowed). 2572 */ 2573 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2574 /* 2575 * Ok special case, in SHUTDOWN-SENT case. here we maker 2576 * sure SACK timer is off and instead send a SHUTDOWN and a 2577 * SACK 2578 */ 2579 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2580 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2581 stcb->sctp_ep, stcb, NULL, 2582 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2583 } 2584 sctp_send_shutdown(stcb, 2585 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2586 if (is_a_gap) { 2587 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2588 } 2589 } else { 2590 /* 2591 * CMT DAC algorithm: increase number of packets received 2592 * since last ack 2593 */ 2594 stcb->asoc.cmt_dac_pkts_rcvd++; 2595 2596 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2597 * SACK */ 2598 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2599 * longer is one */ 2600 (stcb->asoc.numduptsns) || /* we have dup's */ 2601 (is_a_gap) || /* is still a gap */ 2602 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2603 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2604 ) { 2605 2606 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2607 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2608 (stcb->asoc.send_sack == 0) && 2609 (stcb->asoc.numduptsns == 0) && 2610 (stcb->asoc.delayed_ack) && 2611 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2612 2613 /* 2614 * CMT DAC algorithm: With CMT, delay acks 2615 * even in the face of 2616 * 2617 * reordering. Therefore, if acks that do 2618 * not have to be sent because of the above 2619 * reasons, will be delayed. That is, acks 2620 * that would have been sent due to gap 2621 * reports will be delayed with DAC. Start 2622 * the delayed ack timer. 2623 */ 2624 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2625 stcb->sctp_ep, stcb, NULL); 2626 } else { 2627 /* 2628 * Ok we must build a SACK since the timer 2629 * is pending, we got our first packet OR 2630 * there are gaps or duplicates. 2631 */ 2632 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2633 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2634 } 2635 } else { 2636 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2637 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2638 stcb->sctp_ep, stcb, NULL); 2639 } 2640 } 2641 } 2642 } 2643 2644 int 2645 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2646 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2647 struct sctp_nets *net, uint32_t *high_tsn) 2648 { 2649 struct sctp_chunkhdr *ch, chunk_buf; 2650 struct sctp_association *asoc; 2651 int num_chunks = 0; /* number of control chunks processed */ 2652 int stop_proc = 0; 2653 int break_flag, last_chunk; 2654 int abort_flag = 0, was_a_gap; 2655 struct mbuf *m; 2656 uint32_t highest_tsn; 2657 uint16_t chk_length; 2658 2659 /* set the rwnd */ 2660 sctp_set_rwnd(stcb, &stcb->asoc); 2661 2662 m = *mm; 2663 SCTP_TCB_LOCK_ASSERT(stcb); 2664 asoc = &stcb->asoc; 2665 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2666 highest_tsn = asoc->highest_tsn_inside_nr_map; 2667 } else { 2668 highest_tsn = asoc->highest_tsn_inside_map; 2669 } 2670 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2671 /* 2672 * setup where we got the last DATA packet from for any SACK that 2673 * may need to go out. Don't bump the net. This is done ONLY when a 2674 * chunk is assigned. 2675 */ 2676 asoc->last_data_chunk_from = net; 2677 2678 /*- 2679 * Now before we proceed we must figure out if this is a wasted 2680 * cluster... i.e. it is a small packet sent in and yet the driver 2681 * underneath allocated a full cluster for it. If so we must copy it 2682 * to a smaller mbuf and free up the cluster mbuf. This will help 2683 * with cluster starvation. Note for __Panda__ we don't do this 2684 * since it has clusters all the way down to 64 bytes. 2685 */ 2686 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2687 /* we only handle mbufs that are singletons.. not chains */ 2688 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2689 if (m) { 2690 /* ok lets see if we can copy the data up */ 2691 caddr_t *from, *to; 2692 2693 /* get the pointers and copy */ 2694 to = mtod(m, caddr_t *); 2695 from = mtod((*mm), caddr_t *); 2696 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2697 /* copy the length and free up the old */ 2698 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2699 sctp_m_freem(*mm); 2700 /* success, back copy */ 2701 *mm = m; 2702 } else { 2703 /* We are in trouble in the mbuf world .. yikes */ 2704 m = *mm; 2705 } 2706 } 2707 /* get pointer to the first chunk header */ 2708 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2709 sizeof(struct sctp_chunkhdr), 2710 (uint8_t *)&chunk_buf); 2711 if (ch == NULL) { 2712 return (1); 2713 } 2714 /* 2715 * process all DATA chunks... 2716 */ 2717 *high_tsn = asoc->cumulative_tsn; 2718 break_flag = 0; 2719 asoc->data_pkts_seen++; 2720 while (stop_proc == 0) { 2721 /* validate chunk length */ 2722 chk_length = ntohs(ch->chunk_length); 2723 if (length - *offset < chk_length) { 2724 /* all done, mutulated chunk */ 2725 stop_proc = 1; 2726 continue; 2727 } 2728 if ((asoc->idata_supported == 1) && 2729 (ch->chunk_type == SCTP_DATA)) { 2730 struct mbuf *op_err; 2731 char msg[SCTP_DIAG_INFO_LEN]; 2732 2733 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2734 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2735 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2736 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2737 return (2); 2738 } 2739 if ((asoc->idata_supported == 0) && 2740 (ch->chunk_type == SCTP_IDATA)) { 2741 struct mbuf *op_err; 2742 char msg[SCTP_DIAG_INFO_LEN]; 2743 2744 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2745 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2746 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2747 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2748 return (2); 2749 } 2750 if ((ch->chunk_type == SCTP_DATA) || 2751 (ch->chunk_type == SCTP_IDATA)) { 2752 uint16_t clen; 2753 2754 if (ch->chunk_type == SCTP_DATA) { 2755 clen = sizeof(struct sctp_data_chunk); 2756 } else { 2757 clen = sizeof(struct sctp_idata_chunk); 2758 } 2759 if (chk_length < clen) { 2760 /* 2761 * Need to send an abort since we had a 2762 * invalid data chunk. 2763 */ 2764 struct mbuf *op_err; 2765 char msg[SCTP_DIAG_INFO_LEN]; 2766 2767 snprintf(msg, sizeof(msg), "%s chunk of length %u", 2768 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2769 chk_length); 2770 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2771 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2772 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2773 return (2); 2774 } 2775 #ifdef SCTP_AUDITING_ENABLED 2776 sctp_audit_log(0xB1, 0); 2777 #endif 2778 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2779 last_chunk = 1; 2780 } else { 2781 last_chunk = 0; 2782 } 2783 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2784 chk_length, net, high_tsn, &abort_flag, &break_flag, 2785 last_chunk, ch->chunk_type)) { 2786 num_chunks++; 2787 } 2788 if (abort_flag) 2789 return (2); 2790 2791 if (break_flag) { 2792 /* 2793 * Set because of out of rwnd space and no 2794 * drop rep space left. 2795 */ 2796 stop_proc = 1; 2797 continue; 2798 } 2799 } else { 2800 /* not a data chunk in the data region */ 2801 switch (ch->chunk_type) { 2802 case SCTP_INITIATION: 2803 case SCTP_INITIATION_ACK: 2804 case SCTP_SELECTIVE_ACK: 2805 case SCTP_NR_SELECTIVE_ACK: 2806 case SCTP_HEARTBEAT_REQUEST: 2807 case SCTP_HEARTBEAT_ACK: 2808 case SCTP_ABORT_ASSOCIATION: 2809 case SCTP_SHUTDOWN: 2810 case SCTP_SHUTDOWN_ACK: 2811 case SCTP_OPERATION_ERROR: 2812 case SCTP_COOKIE_ECHO: 2813 case SCTP_COOKIE_ACK: 2814 case SCTP_ECN_ECHO: 2815 case SCTP_ECN_CWR: 2816 case SCTP_SHUTDOWN_COMPLETE: 2817 case SCTP_AUTHENTICATION: 2818 case SCTP_ASCONF_ACK: 2819 case SCTP_PACKET_DROPPED: 2820 case SCTP_STREAM_RESET: 2821 case SCTP_FORWARD_CUM_TSN: 2822 case SCTP_ASCONF: 2823 { 2824 /* 2825 * Now, what do we do with KNOWN 2826 * chunks that are NOT in the right 2827 * place? 2828 * 2829 * For now, I do nothing but ignore 2830 * them. We may later want to add 2831 * sysctl stuff to switch out and do 2832 * either an ABORT() or possibly 2833 * process them. 2834 */ 2835 struct mbuf *op_err; 2836 char msg[SCTP_DIAG_INFO_LEN]; 2837 2838 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2839 ch->chunk_type); 2840 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2841 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2842 return (2); 2843 } 2844 default: 2845 /* 2846 * Unknown chunk type: use bit rules after 2847 * checking length 2848 */ 2849 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2850 /* 2851 * Need to send an abort since we 2852 * had a invalid chunk. 2853 */ 2854 struct mbuf *op_err; 2855 char msg[SCTP_DIAG_INFO_LEN]; 2856 2857 snprintf(msg, sizeof(msg), "Chunk of length %u", 2858 chk_length); 2859 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2860 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2861 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2862 return (2); 2863 } 2864 if (ch->chunk_type & 0x40) { 2865 /* Add a error report to the queue */ 2866 struct mbuf *op_err; 2867 struct sctp_gen_error_cause *cause; 2868 2869 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2870 0, M_NOWAIT, 1, MT_DATA); 2871 if (op_err != NULL) { 2872 cause = mtod(op_err, struct sctp_gen_error_cause *); 2873 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2874 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2875 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2876 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2877 if (SCTP_BUF_NEXT(op_err) != NULL) { 2878 sctp_queue_op_err(stcb, op_err); 2879 } else { 2880 sctp_m_freem(op_err); 2881 } 2882 } 2883 } 2884 if ((ch->chunk_type & 0x80) == 0) { 2885 /* discard the rest of this packet */ 2886 stop_proc = 1; 2887 } /* else skip this bad chunk and 2888 * continue... */ 2889 break; 2890 } /* switch of chunk type */ 2891 } 2892 *offset += SCTP_SIZE32(chk_length); 2893 if ((*offset >= length) || stop_proc) { 2894 /* no more data left in the mbuf chain */ 2895 stop_proc = 1; 2896 continue; 2897 } 2898 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2899 sizeof(struct sctp_chunkhdr), 2900 (uint8_t *)&chunk_buf); 2901 if (ch == NULL) { 2902 *offset = length; 2903 stop_proc = 1; 2904 continue; 2905 } 2906 } 2907 if (break_flag) { 2908 /* 2909 * we need to report rwnd overrun drops. 2910 */ 2911 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2912 } 2913 if (num_chunks) { 2914 /* 2915 * Did we get data, if so update the time for auto-close and 2916 * give peer credit for being alive. 2917 */ 2918 SCTP_STAT_INCR(sctps_recvpktwithdata); 2919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2920 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2921 stcb->asoc.overall_error_count, 2922 0, 2923 SCTP_FROM_SCTP_INDATA, 2924 __LINE__); 2925 } 2926 stcb->asoc.overall_error_count = 0; 2927 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2928 } 2929 /* now service all of the reassm queue if needed */ 2930 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2931 /* Assure that we ack right away */ 2932 stcb->asoc.send_sack = 1; 2933 } 2934 /* Start a sack timer or QUEUE a SACK for sending */ 2935 sctp_sack_check(stcb, was_a_gap); 2936 return (0); 2937 } 2938 2939 static int 2940 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2941 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2942 int *num_frs, 2943 uint32_t *biggest_newly_acked_tsn, 2944 uint32_t *this_sack_lowest_newack, 2945 int *rto_ok) 2946 { 2947 struct sctp_tmit_chunk *tp1; 2948 unsigned int theTSN; 2949 int j, wake_him = 0, circled = 0; 2950 2951 /* Recover the tp1 we last saw */ 2952 tp1 = *p_tp1; 2953 if (tp1 == NULL) { 2954 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2955 } 2956 for (j = frag_strt; j <= frag_end; j++) { 2957 theTSN = j + last_tsn; 2958 while (tp1) { 2959 if (tp1->rec.data.doing_fast_retransmit) 2960 (*num_frs) += 1; 2961 2962 /*- 2963 * CMT: CUCv2 algorithm. For each TSN being 2964 * processed from the sent queue, track the 2965 * next expected pseudo-cumack, or 2966 * rtx_pseudo_cumack, if required. Separate 2967 * cumack trackers for first transmissions, 2968 * and retransmissions. 2969 */ 2970 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2971 (tp1->whoTo->find_pseudo_cumack == 1) && 2972 (tp1->snd_count == 1)) { 2973 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 2974 tp1->whoTo->find_pseudo_cumack = 0; 2975 } 2976 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2977 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2978 (tp1->snd_count > 1)) { 2979 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 2980 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2981 } 2982 if (tp1->rec.data.tsn == theTSN) { 2983 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2984 /*- 2985 * must be held until 2986 * cum-ack passes 2987 */ 2988 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2989 /*- 2990 * If it is less than RESEND, it is 2991 * now no-longer in flight. 2992 * Higher values may already be set 2993 * via previous Gap Ack Blocks... 2994 * i.e. ACKED or RESEND. 2995 */ 2996 if (SCTP_TSN_GT(tp1->rec.data.tsn, 2997 *biggest_newly_acked_tsn)) { 2998 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 2999 } 3000 /*- 3001 * CMT: SFR algo (and HTNA) - set 3002 * saw_newack to 1 for dest being 3003 * newly acked. update 3004 * this_sack_highest_newack if 3005 * appropriate. 3006 */ 3007 if (tp1->rec.data.chunk_was_revoked == 0) 3008 tp1->whoTo->saw_newack = 1; 3009 3010 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3011 tp1->whoTo->this_sack_highest_newack)) { 3012 tp1->whoTo->this_sack_highest_newack = 3013 tp1->rec.data.tsn; 3014 } 3015 /*- 3016 * CMT DAC algo: also update 3017 * this_sack_lowest_newack 3018 */ 3019 if (*this_sack_lowest_newack == 0) { 3020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3021 sctp_log_sack(*this_sack_lowest_newack, 3022 last_tsn, 3023 tp1->rec.data.tsn, 3024 0, 3025 0, 3026 SCTP_LOG_TSN_ACKED); 3027 } 3028 *this_sack_lowest_newack = tp1->rec.data.tsn; 3029 } 3030 /*- 3031 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3032 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3033 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3034 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3035 * Separate pseudo_cumack trackers for first transmissions and 3036 * retransmissions. 3037 */ 3038 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3039 if (tp1->rec.data.chunk_was_revoked == 0) { 3040 tp1->whoTo->new_pseudo_cumack = 1; 3041 } 3042 tp1->whoTo->find_pseudo_cumack = 1; 3043 } 3044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3045 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3046 } 3047 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3048 if (tp1->rec.data.chunk_was_revoked == 0) { 3049 tp1->whoTo->new_pseudo_cumack = 1; 3050 } 3051 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3052 } 3053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3054 sctp_log_sack(*biggest_newly_acked_tsn, 3055 last_tsn, 3056 tp1->rec.data.tsn, 3057 frag_strt, 3058 frag_end, 3059 SCTP_LOG_TSN_ACKED); 3060 } 3061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3062 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3063 tp1->whoTo->flight_size, 3064 tp1->book_size, 3065 (uint32_t)(uintptr_t)tp1->whoTo, 3066 tp1->rec.data.tsn); 3067 } 3068 sctp_flight_size_decrease(tp1); 3069 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3070 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3071 tp1); 3072 } 3073 sctp_total_flight_decrease(stcb, tp1); 3074 3075 tp1->whoTo->net_ack += tp1->send_size; 3076 if (tp1->snd_count < 2) { 3077 /*- 3078 * True non-retransmited chunk 3079 */ 3080 tp1->whoTo->net_ack2 += tp1->send_size; 3081 3082 /*- 3083 * update RTO too ? 3084 */ 3085 if (tp1->do_rtt) { 3086 if (*rto_ok) { 3087 tp1->whoTo->RTO = 3088 sctp_calculate_rto(stcb, 3089 &stcb->asoc, 3090 tp1->whoTo, 3091 &tp1->sent_rcv_time, 3092 SCTP_RTT_FROM_DATA); 3093 *rto_ok = 0; 3094 } 3095 if (tp1->whoTo->rto_needed == 0) { 3096 tp1->whoTo->rto_needed = 1; 3097 } 3098 tp1->do_rtt = 0; 3099 } 3100 } 3101 } 3102 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3103 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3104 stcb->asoc.this_sack_highest_gap)) { 3105 stcb->asoc.this_sack_highest_gap = 3106 tp1->rec.data.tsn; 3107 } 3108 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3109 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3110 #ifdef SCTP_AUDITING_ENABLED 3111 sctp_audit_log(0xB2, 3112 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3113 #endif 3114 } 3115 } 3116 /*- 3117 * All chunks NOT UNSENT fall through here and are marked 3118 * (leave PR-SCTP ones that are to skip alone though) 3119 */ 3120 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3121 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3122 tp1->sent = SCTP_DATAGRAM_MARKED; 3123 } 3124 if (tp1->rec.data.chunk_was_revoked) { 3125 /* deflate the cwnd */ 3126 tp1->whoTo->cwnd -= tp1->book_size; 3127 tp1->rec.data.chunk_was_revoked = 0; 3128 } 3129 /* NR Sack code here */ 3130 if (nr_sacking && 3131 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3132 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3133 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3134 #ifdef INVARIANTS 3135 } else { 3136 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3137 #endif 3138 } 3139 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3140 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3141 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3142 stcb->asoc.trigger_reset = 1; 3143 } 3144 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3145 if (tp1->data) { 3146 /* 3147 * sa_ignore 3148 * NO_NULL_CHK 3149 */ 3150 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3151 sctp_m_freem(tp1->data); 3152 tp1->data = NULL; 3153 } 3154 wake_him++; 3155 } 3156 } 3157 break; 3158 } /* if (tp1->tsn == theTSN) */ 3159 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3160 break; 3161 } 3162 tp1 = TAILQ_NEXT(tp1, sctp_next); 3163 if ((tp1 == NULL) && (circled == 0)) { 3164 circled++; 3165 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3166 } 3167 } /* end while (tp1) */ 3168 if (tp1 == NULL) { 3169 circled = 0; 3170 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3171 } 3172 /* In case the fragments were not in order we must reset */ 3173 } /* end for (j = fragStart */ 3174 *p_tp1 = tp1; 3175 return (wake_him); /* Return value only used for nr-sack */ 3176 } 3177 3178 3179 static int 3180 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3181 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3182 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3183 int num_seg, int num_nr_seg, int *rto_ok) 3184 { 3185 struct sctp_gap_ack_block *frag, block; 3186 struct sctp_tmit_chunk *tp1; 3187 int i; 3188 int num_frs = 0; 3189 int chunk_freed; 3190 int non_revocable; 3191 uint16_t frag_strt, frag_end, prev_frag_end; 3192 3193 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3194 prev_frag_end = 0; 3195 chunk_freed = 0; 3196 3197 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3198 if (i == num_seg) { 3199 prev_frag_end = 0; 3200 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3201 } 3202 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3203 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3204 *offset += sizeof(block); 3205 if (frag == NULL) { 3206 return (chunk_freed); 3207 } 3208 frag_strt = ntohs(frag->start); 3209 frag_end = ntohs(frag->end); 3210 3211 if (frag_strt > frag_end) { 3212 /* This gap report is malformed, skip it. */ 3213 continue; 3214 } 3215 if (frag_strt <= prev_frag_end) { 3216 /* This gap report is not in order, so restart. */ 3217 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3218 } 3219 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3220 *biggest_tsn_acked = last_tsn + frag_end; 3221 } 3222 if (i < num_seg) { 3223 non_revocable = 0; 3224 } else { 3225 non_revocable = 1; 3226 } 3227 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3228 non_revocable, &num_frs, biggest_newly_acked_tsn, 3229 this_sack_lowest_newack, rto_ok)) { 3230 chunk_freed = 1; 3231 } 3232 prev_frag_end = frag_end; 3233 } 3234 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3235 if (num_frs) 3236 sctp_log_fr(*biggest_tsn_acked, 3237 *biggest_newly_acked_tsn, 3238 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3239 } 3240 return (chunk_freed); 3241 } 3242 3243 static void 3244 sctp_check_for_revoked(struct sctp_tcb *stcb, 3245 struct sctp_association *asoc, uint32_t cumack, 3246 uint32_t biggest_tsn_acked) 3247 { 3248 struct sctp_tmit_chunk *tp1; 3249 3250 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3251 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3252 /* 3253 * ok this guy is either ACK or MARKED. If it is 3254 * ACKED it has been previously acked but not this 3255 * time i.e. revoked. If it is MARKED it was ACK'ed 3256 * again. 3257 */ 3258 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3259 break; 3260 } 3261 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3262 /* it has been revoked */ 3263 tp1->sent = SCTP_DATAGRAM_SENT; 3264 tp1->rec.data.chunk_was_revoked = 1; 3265 /* 3266 * We must add this stuff back in to assure 3267 * timers and such get started. 3268 */ 3269 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3270 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3271 tp1->whoTo->flight_size, 3272 tp1->book_size, 3273 (uint32_t)(uintptr_t)tp1->whoTo, 3274 tp1->rec.data.tsn); 3275 } 3276 sctp_flight_size_increase(tp1); 3277 sctp_total_flight_increase(stcb, tp1); 3278 /* 3279 * We inflate the cwnd to compensate for our 3280 * artificial inflation of the flight_size. 3281 */ 3282 tp1->whoTo->cwnd += tp1->book_size; 3283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3284 sctp_log_sack(asoc->last_acked_seq, 3285 cumack, 3286 tp1->rec.data.tsn, 3287 0, 3288 0, 3289 SCTP_LOG_TSN_REVOKED); 3290 } 3291 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3292 /* it has been re-acked in this SACK */ 3293 tp1->sent = SCTP_DATAGRAM_ACKED; 3294 } 3295 } 3296 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3297 break; 3298 } 3299 } 3300 3301 3302 static void 3303 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3304 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3305 { 3306 struct sctp_tmit_chunk *tp1; 3307 int strike_flag = 0; 3308 struct timeval now; 3309 int tot_retrans = 0; 3310 uint32_t sending_seq; 3311 struct sctp_nets *net; 3312 int num_dests_sacked = 0; 3313 3314 /* 3315 * select the sending_seq, this is either the next thing ready to be 3316 * sent but not transmitted, OR, the next seq we assign. 3317 */ 3318 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3319 if (tp1 == NULL) { 3320 sending_seq = asoc->sending_seq; 3321 } else { 3322 sending_seq = tp1->rec.data.tsn; 3323 } 3324 3325 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3326 if ((asoc->sctp_cmt_on_off > 0) && 3327 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3328 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3329 if (net->saw_newack) 3330 num_dests_sacked++; 3331 } 3332 } 3333 if (stcb->asoc.prsctp_supported) { 3334 (void)SCTP_GETTIME_TIMEVAL(&now); 3335 } 3336 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3337 strike_flag = 0; 3338 if (tp1->no_fr_allowed) { 3339 /* this one had a timeout or something */ 3340 continue; 3341 } 3342 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3343 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3344 sctp_log_fr(biggest_tsn_newly_acked, 3345 tp1->rec.data.tsn, 3346 tp1->sent, 3347 SCTP_FR_LOG_CHECK_STRIKE); 3348 } 3349 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3350 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3351 /* done */ 3352 break; 3353 } 3354 if (stcb->asoc.prsctp_supported) { 3355 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3356 /* Is it expired? */ 3357 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3358 /* Yes so drop it */ 3359 if (tp1->data != NULL) { 3360 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3361 SCTP_SO_NOT_LOCKED); 3362 } 3363 continue; 3364 } 3365 } 3366 } 3367 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3368 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3369 /* we are beyond the tsn in the sack */ 3370 break; 3371 } 3372 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3373 /* either a RESEND, ACKED, or MARKED */ 3374 /* skip */ 3375 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3376 /* Continue strikin FWD-TSN chunks */ 3377 tp1->rec.data.fwd_tsn_cnt++; 3378 } 3379 continue; 3380 } 3381 /* 3382 * CMT : SFR algo (covers part of DAC and HTNA as well) 3383 */ 3384 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3385 /* 3386 * No new acks were receieved for data sent to this 3387 * dest. Therefore, according to the SFR algo for 3388 * CMT, no data sent to this dest can be marked for 3389 * FR using this SACK. 3390 */ 3391 continue; 3392 } else if (tp1->whoTo && 3393 SCTP_TSN_GT(tp1->rec.data.tsn, 3394 tp1->whoTo->this_sack_highest_newack) && 3395 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3396 /* 3397 * CMT: New acks were receieved for data sent to 3398 * this dest. But no new acks were seen for data 3399 * sent after tp1. Therefore, according to the SFR 3400 * algo for CMT, tp1 cannot be marked for FR using 3401 * this SACK. This step covers part of the DAC algo 3402 * and the HTNA algo as well. 3403 */ 3404 continue; 3405 } 3406 /* 3407 * Here we check to see if we were have already done a FR 3408 * and if so we see if the biggest TSN we saw in the sack is 3409 * smaller than the recovery point. If so we don't strike 3410 * the tsn... otherwise we CAN strike the TSN. 3411 */ 3412 /* 3413 * @@@ JRI: Check for CMT if (accum_moved && 3414 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3415 * 0)) { 3416 */ 3417 if (accum_moved && asoc->fast_retran_loss_recovery) { 3418 /* 3419 * Strike the TSN if in fast-recovery and cum-ack 3420 * moved. 3421 */ 3422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3423 sctp_log_fr(biggest_tsn_newly_acked, 3424 tp1->rec.data.tsn, 3425 tp1->sent, 3426 SCTP_FR_LOG_STRIKE_CHUNK); 3427 } 3428 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3429 tp1->sent++; 3430 } 3431 if ((asoc->sctp_cmt_on_off > 0) && 3432 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3433 /* 3434 * CMT DAC algorithm: If SACK flag is set to 3435 * 0, then lowest_newack test will not pass 3436 * because it would have been set to the 3437 * cumack earlier. If not already to be 3438 * rtx'd, If not a mixed sack and if tp1 is 3439 * not between two sacked TSNs, then mark by 3440 * one more. NOTE that we are marking by one 3441 * additional time since the SACK DAC flag 3442 * indicates that two packets have been 3443 * received after this missing TSN. 3444 */ 3445 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3446 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3448 sctp_log_fr(16 + num_dests_sacked, 3449 tp1->rec.data.tsn, 3450 tp1->sent, 3451 SCTP_FR_LOG_STRIKE_CHUNK); 3452 } 3453 tp1->sent++; 3454 } 3455 } 3456 } else if ((tp1->rec.data.doing_fast_retransmit) && 3457 (asoc->sctp_cmt_on_off == 0)) { 3458 /* 3459 * For those that have done a FR we must take 3460 * special consideration if we strike. I.e the 3461 * biggest_newly_acked must be higher than the 3462 * sending_seq at the time we did the FR. 3463 */ 3464 if ( 3465 #ifdef SCTP_FR_TO_ALTERNATE 3466 /* 3467 * If FR's go to new networks, then we must only do 3468 * this for singly homed asoc's. However if the FR's 3469 * go to the same network (Armando's work) then its 3470 * ok to FR multiple times. 3471 */ 3472 (asoc->numnets < 2) 3473 #else 3474 (1) 3475 #endif 3476 ) { 3477 3478 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3479 tp1->rec.data.fast_retran_tsn)) { 3480 /* 3481 * Strike the TSN, since this ack is 3482 * beyond where things were when we 3483 * did a FR. 3484 */ 3485 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3486 sctp_log_fr(biggest_tsn_newly_acked, 3487 tp1->rec.data.tsn, 3488 tp1->sent, 3489 SCTP_FR_LOG_STRIKE_CHUNK); 3490 } 3491 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3492 tp1->sent++; 3493 } 3494 strike_flag = 1; 3495 if ((asoc->sctp_cmt_on_off > 0) && 3496 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3497 /* 3498 * CMT DAC algorithm: If 3499 * SACK flag is set to 0, 3500 * then lowest_newack test 3501 * will not pass because it 3502 * would have been set to 3503 * the cumack earlier. If 3504 * not already to be rtx'd, 3505 * If not a mixed sack and 3506 * if tp1 is not between two 3507 * sacked TSNs, then mark by 3508 * one more. NOTE that we 3509 * are marking by one 3510 * additional time since the 3511 * SACK DAC flag indicates 3512 * that two packets have 3513 * been received after this 3514 * missing TSN. 3515 */ 3516 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3517 (num_dests_sacked == 1) && 3518 SCTP_TSN_GT(this_sack_lowest_newack, 3519 tp1->rec.data.tsn)) { 3520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3521 sctp_log_fr(32 + num_dests_sacked, 3522 tp1->rec.data.tsn, 3523 tp1->sent, 3524 SCTP_FR_LOG_STRIKE_CHUNK); 3525 } 3526 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3527 tp1->sent++; 3528 } 3529 } 3530 } 3531 } 3532 } 3533 /* 3534 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3535 * algo covers HTNA. 3536 */ 3537 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3538 biggest_tsn_newly_acked)) { 3539 /* 3540 * We don't strike these: This is the HTNA 3541 * algorithm i.e. we don't strike If our TSN is 3542 * larger than the Highest TSN Newly Acked. 3543 */ 3544 ; 3545 } else { 3546 /* Strike the TSN */ 3547 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3548 sctp_log_fr(biggest_tsn_newly_acked, 3549 tp1->rec.data.tsn, 3550 tp1->sent, 3551 SCTP_FR_LOG_STRIKE_CHUNK); 3552 } 3553 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3554 tp1->sent++; 3555 } 3556 if ((asoc->sctp_cmt_on_off > 0) && 3557 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3558 /* 3559 * CMT DAC algorithm: If SACK flag is set to 3560 * 0, then lowest_newack test will not pass 3561 * because it would have been set to the 3562 * cumack earlier. If not already to be 3563 * rtx'd, If not a mixed sack and if tp1 is 3564 * not between two sacked TSNs, then mark by 3565 * one more. NOTE that we are marking by one 3566 * additional time since the SACK DAC flag 3567 * indicates that two packets have been 3568 * received after this missing TSN. 3569 */ 3570 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3571 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3573 sctp_log_fr(48 + num_dests_sacked, 3574 tp1->rec.data.tsn, 3575 tp1->sent, 3576 SCTP_FR_LOG_STRIKE_CHUNK); 3577 } 3578 tp1->sent++; 3579 } 3580 } 3581 } 3582 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3583 struct sctp_nets *alt; 3584 3585 /* fix counts and things */ 3586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3587 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3588 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3589 tp1->book_size, 3590 (uint32_t)(uintptr_t)tp1->whoTo, 3591 tp1->rec.data.tsn); 3592 } 3593 if (tp1->whoTo) { 3594 tp1->whoTo->net_ack++; 3595 sctp_flight_size_decrease(tp1); 3596 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3597 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3598 tp1); 3599 } 3600 } 3601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3602 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3603 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3604 } 3605 /* add back to the rwnd */ 3606 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3607 3608 /* remove from the total flight */ 3609 sctp_total_flight_decrease(stcb, tp1); 3610 3611 if ((stcb->asoc.prsctp_supported) && 3612 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3613 /* 3614 * Has it been retransmitted tv_sec times? - 3615 * we store the retran count there. 3616 */ 3617 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3618 /* Yes, so drop it */ 3619 if (tp1->data != NULL) { 3620 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3621 SCTP_SO_NOT_LOCKED); 3622 } 3623 /* Make sure to flag we had a FR */ 3624 if (tp1->whoTo != NULL) { 3625 tp1->whoTo->net_ack++; 3626 } 3627 continue; 3628 } 3629 } 3630 /* 3631 * SCTP_PRINTF("OK, we are now ready to FR this 3632 * guy\n"); 3633 */ 3634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3635 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3636 0, SCTP_FR_MARKED); 3637 } 3638 if (strike_flag) { 3639 /* This is a subsequent FR */ 3640 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3641 } 3642 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3643 if (asoc->sctp_cmt_on_off > 0) { 3644 /* 3645 * CMT: Using RTX_SSTHRESH policy for CMT. 3646 * If CMT is being used, then pick dest with 3647 * largest ssthresh for any retransmission. 3648 */ 3649 tp1->no_fr_allowed = 1; 3650 alt = tp1->whoTo; 3651 /* sa_ignore NO_NULL_CHK */ 3652 if (asoc->sctp_cmt_pf > 0) { 3653 /* 3654 * JRS 5/18/07 - If CMT PF is on, 3655 * use the PF version of 3656 * find_alt_net() 3657 */ 3658 alt = sctp_find_alternate_net(stcb, alt, 2); 3659 } else { 3660 /* 3661 * JRS 5/18/07 - If only CMT is on, 3662 * use the CMT version of 3663 * find_alt_net() 3664 */ 3665 /* sa_ignore NO_NULL_CHK */ 3666 alt = sctp_find_alternate_net(stcb, alt, 1); 3667 } 3668 if (alt == NULL) { 3669 alt = tp1->whoTo; 3670 } 3671 /* 3672 * CUCv2: If a different dest is picked for 3673 * the retransmission, then new 3674 * (rtx-)pseudo_cumack needs to be tracked 3675 * for orig dest. Let CUCv2 track new (rtx-) 3676 * pseudo-cumack always. 3677 */ 3678 if (tp1->whoTo) { 3679 tp1->whoTo->find_pseudo_cumack = 1; 3680 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3681 } 3682 } else { /* CMT is OFF */ 3683 3684 #ifdef SCTP_FR_TO_ALTERNATE 3685 /* Can we find an alternate? */ 3686 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3687 #else 3688 /* 3689 * default behavior is to NOT retransmit 3690 * FR's to an alternate. Armando Caro's 3691 * paper details why. 3692 */ 3693 alt = tp1->whoTo; 3694 #endif 3695 } 3696 3697 tp1->rec.data.doing_fast_retransmit = 1; 3698 tot_retrans++; 3699 /* mark the sending seq for possible subsequent FR's */ 3700 /* 3701 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3702 * (uint32_t)tpi->rec.data.tsn); 3703 */ 3704 if (TAILQ_EMPTY(&asoc->send_queue)) { 3705 /* 3706 * If the queue of send is empty then its 3707 * the next sequence number that will be 3708 * assigned so we subtract one from this to 3709 * get the one we last sent. 3710 */ 3711 tp1->rec.data.fast_retran_tsn = sending_seq; 3712 } else { 3713 /* 3714 * If there are chunks on the send queue 3715 * (unsent data that has made it from the 3716 * stream queues but not out the door, we 3717 * take the first one (which will have the 3718 * lowest TSN) and subtract one to get the 3719 * one we last sent. 3720 */ 3721 struct sctp_tmit_chunk *ttt; 3722 3723 ttt = TAILQ_FIRST(&asoc->send_queue); 3724 tp1->rec.data.fast_retran_tsn = 3725 ttt->rec.data.tsn; 3726 } 3727 3728 if (tp1->do_rtt) { 3729 /* 3730 * this guy had a RTO calculation pending on 3731 * it, cancel it 3732 */ 3733 if ((tp1->whoTo != NULL) && 3734 (tp1->whoTo->rto_needed == 0)) { 3735 tp1->whoTo->rto_needed = 1; 3736 } 3737 tp1->do_rtt = 0; 3738 } 3739 if (alt != tp1->whoTo) { 3740 /* yes, there is an alternate. */ 3741 sctp_free_remote_addr(tp1->whoTo); 3742 /* sa_ignore FREED_MEMORY */ 3743 tp1->whoTo = alt; 3744 atomic_add_int(&alt->ref_count, 1); 3745 } 3746 } 3747 } 3748 } 3749 3750 struct sctp_tmit_chunk * 3751 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3752 struct sctp_association *asoc) 3753 { 3754 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3755 struct timeval now; 3756 int now_filled = 0; 3757 3758 if (asoc->prsctp_supported == 0) { 3759 return (NULL); 3760 } 3761 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3762 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3763 tp1->sent != SCTP_DATAGRAM_RESEND && 3764 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3765 /* no chance to advance, out of here */ 3766 break; 3767 } 3768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3769 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3770 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3771 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3772 asoc->advanced_peer_ack_point, 3773 tp1->rec.data.tsn, 0, 0); 3774 } 3775 } 3776 if (!PR_SCTP_ENABLED(tp1->flags)) { 3777 /* 3778 * We can't fwd-tsn past any that are reliable aka 3779 * retransmitted until the asoc fails. 3780 */ 3781 break; 3782 } 3783 if (!now_filled) { 3784 (void)SCTP_GETTIME_TIMEVAL(&now); 3785 now_filled = 1; 3786 } 3787 /* 3788 * now we got a chunk which is marked for another 3789 * retransmission to a PR-stream but has run out its chances 3790 * already maybe OR has been marked to skip now. Can we skip 3791 * it if its a resend? 3792 */ 3793 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3794 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3795 /* 3796 * Now is this one marked for resend and its time is 3797 * now up? 3798 */ 3799 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3800 /* Yes so drop it */ 3801 if (tp1->data) { 3802 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3803 1, SCTP_SO_NOT_LOCKED); 3804 } 3805 } else { 3806 /* 3807 * No, we are done when hit one for resend 3808 * whos time as not expired. 3809 */ 3810 break; 3811 } 3812 } 3813 /* 3814 * Ok now if this chunk is marked to drop it we can clean up 3815 * the chunk, advance our peer ack point and we can check 3816 * the next chunk. 3817 */ 3818 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3819 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3820 /* advance PeerAckPoint goes forward */ 3821 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3822 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3823 a_adv = tp1; 3824 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3825 /* No update but we do save the chk */ 3826 a_adv = tp1; 3827 } 3828 } else { 3829 /* 3830 * If it is still in RESEND we can advance no 3831 * further 3832 */ 3833 break; 3834 } 3835 } 3836 return (a_adv); 3837 } 3838 3839 static int 3840 sctp_fs_audit(struct sctp_association *asoc) 3841 { 3842 struct sctp_tmit_chunk *chk; 3843 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3844 int ret; 3845 #ifndef INVARIANTS 3846 int entry_flight, entry_cnt; 3847 #endif 3848 3849 ret = 0; 3850 #ifndef INVARIANTS 3851 entry_flight = asoc->total_flight; 3852 entry_cnt = asoc->total_flight_count; 3853 #endif 3854 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3855 return (0); 3856 3857 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3858 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3859 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3860 chk->rec.data.tsn, 3861 chk->send_size, 3862 chk->snd_count); 3863 inflight++; 3864 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3865 resend++; 3866 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3867 inbetween++; 3868 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3869 above++; 3870 } else { 3871 acked++; 3872 } 3873 } 3874 3875 if ((inflight > 0) || (inbetween > 0)) { 3876 #ifdef INVARIANTS 3877 panic("Flight size-express incorrect? \n"); 3878 #else 3879 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3880 entry_flight, entry_cnt); 3881 3882 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3883 inflight, inbetween, resend, above, acked); 3884 ret = 1; 3885 #endif 3886 } 3887 return (ret); 3888 } 3889 3890 3891 static void 3892 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3893 struct sctp_association *asoc, 3894 struct sctp_tmit_chunk *tp1) 3895 { 3896 tp1->window_probe = 0; 3897 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3898 /* TSN's skipped we do NOT move back. */ 3899 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3900 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3901 tp1->book_size, 3902 (uint32_t)(uintptr_t)tp1->whoTo, 3903 tp1->rec.data.tsn); 3904 return; 3905 } 3906 /* First setup this by shrinking flight */ 3907 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3908 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3909 tp1); 3910 } 3911 sctp_flight_size_decrease(tp1); 3912 sctp_total_flight_decrease(stcb, tp1); 3913 /* Now mark for resend */ 3914 tp1->sent = SCTP_DATAGRAM_RESEND; 3915 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3916 3917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3918 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3919 tp1->whoTo->flight_size, 3920 tp1->book_size, 3921 (uint32_t)(uintptr_t)tp1->whoTo, 3922 tp1->rec.data.tsn); 3923 } 3924 } 3925 3926 void 3927 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3928 uint32_t rwnd, int *abort_now, int ecne_seen) 3929 { 3930 struct sctp_nets *net; 3931 struct sctp_association *asoc; 3932 struct sctp_tmit_chunk *tp1, *tp2; 3933 uint32_t old_rwnd; 3934 int win_probe_recovery = 0; 3935 int win_probe_recovered = 0; 3936 int j, done_once = 0; 3937 int rto_ok = 1; 3938 uint32_t send_s; 3939 3940 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3941 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3942 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3943 } 3944 SCTP_TCB_LOCK_ASSERT(stcb); 3945 #ifdef SCTP_ASOCLOG_OF_TSNS 3946 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3947 stcb->asoc.cumack_log_at++; 3948 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3949 stcb->asoc.cumack_log_at = 0; 3950 } 3951 #endif 3952 asoc = &stcb->asoc; 3953 old_rwnd = asoc->peers_rwnd; 3954 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3955 /* old ack */ 3956 return; 3957 } else if (asoc->last_acked_seq == cumack) { 3958 /* Window update sack */ 3959 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3960 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3961 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3962 /* SWS sender side engages */ 3963 asoc->peers_rwnd = 0; 3964 } 3965 if (asoc->peers_rwnd > old_rwnd) { 3966 goto again; 3967 } 3968 return; 3969 } 3970 /* First setup for CC stuff */ 3971 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3972 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3973 /* Drag along the window_tsn for cwr's */ 3974 net->cwr_window_tsn = cumack; 3975 } 3976 net->prev_cwnd = net->cwnd; 3977 net->net_ack = 0; 3978 net->net_ack2 = 0; 3979 3980 /* 3981 * CMT: Reset CUC and Fast recovery algo variables before 3982 * SACK processing 3983 */ 3984 net->new_pseudo_cumack = 0; 3985 net->will_exit_fast_recovery = 0; 3986 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3987 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3988 } 3989 } 3990 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3991 tp1 = TAILQ_LAST(&asoc->sent_queue, 3992 sctpchunk_listhead); 3993 send_s = tp1->rec.data.tsn + 1; 3994 } else { 3995 send_s = asoc->sending_seq; 3996 } 3997 if (SCTP_TSN_GE(cumack, send_s)) { 3998 struct mbuf *op_err; 3999 char msg[SCTP_DIAG_INFO_LEN]; 4000 4001 *abort_now = 1; 4002 /* XXX */ 4003 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4004 cumack, send_s); 4005 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4006 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 4007 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4008 return; 4009 } 4010 asoc->this_sack_highest_gap = cumack; 4011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4012 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4013 stcb->asoc.overall_error_count, 4014 0, 4015 SCTP_FROM_SCTP_INDATA, 4016 __LINE__); 4017 } 4018 stcb->asoc.overall_error_count = 0; 4019 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4020 /* process the new consecutive TSN first */ 4021 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4022 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4023 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4024 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4025 } 4026 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4027 /* 4028 * If it is less than ACKED, it is 4029 * now no-longer in flight. Higher 4030 * values may occur during marking 4031 */ 4032 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4034 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4035 tp1->whoTo->flight_size, 4036 tp1->book_size, 4037 (uint32_t)(uintptr_t)tp1->whoTo, 4038 tp1->rec.data.tsn); 4039 } 4040 sctp_flight_size_decrease(tp1); 4041 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4042 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4043 tp1); 4044 } 4045 /* sa_ignore NO_NULL_CHK */ 4046 sctp_total_flight_decrease(stcb, tp1); 4047 } 4048 tp1->whoTo->net_ack += tp1->send_size; 4049 if (tp1->snd_count < 2) { 4050 /* 4051 * True non-retransmited 4052 * chunk 4053 */ 4054 tp1->whoTo->net_ack2 += 4055 tp1->send_size; 4056 4057 /* update RTO too? */ 4058 if (tp1->do_rtt) { 4059 if (rto_ok) { 4060 tp1->whoTo->RTO = 4061 /* 4062 * sa_ignore 4063 * NO_NULL_CHK 4064 */ 4065 sctp_calculate_rto(stcb, 4066 asoc, tp1->whoTo, 4067 &tp1->sent_rcv_time, 4068 SCTP_RTT_FROM_DATA); 4069 rto_ok = 0; 4070 } 4071 if (tp1->whoTo->rto_needed == 0) { 4072 tp1->whoTo->rto_needed = 1; 4073 } 4074 tp1->do_rtt = 0; 4075 } 4076 } 4077 /* 4078 * CMT: CUCv2 algorithm. From the 4079 * cumack'd TSNs, for each TSN being 4080 * acked for the first time, set the 4081 * following variables for the 4082 * corresp destination. 4083 * new_pseudo_cumack will trigger a 4084 * cwnd update. 4085 * find_(rtx_)pseudo_cumack will 4086 * trigger search for the next 4087 * expected (rtx-)pseudo-cumack. 4088 */ 4089 tp1->whoTo->new_pseudo_cumack = 1; 4090 tp1->whoTo->find_pseudo_cumack = 1; 4091 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4092 4093 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4094 /* sa_ignore NO_NULL_CHK */ 4095 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4096 } 4097 } 4098 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4099 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4100 } 4101 if (tp1->rec.data.chunk_was_revoked) { 4102 /* deflate the cwnd */ 4103 tp1->whoTo->cwnd -= tp1->book_size; 4104 tp1->rec.data.chunk_was_revoked = 0; 4105 } 4106 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4107 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4108 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4109 #ifdef INVARIANTS 4110 } else { 4111 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4112 #endif 4113 } 4114 } 4115 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4116 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4117 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4118 asoc->trigger_reset = 1; 4119 } 4120 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4121 if (tp1->data) { 4122 /* sa_ignore NO_NULL_CHK */ 4123 sctp_free_bufspace(stcb, asoc, tp1, 1); 4124 sctp_m_freem(tp1->data); 4125 tp1->data = NULL; 4126 } 4127 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4128 sctp_log_sack(asoc->last_acked_seq, 4129 cumack, 4130 tp1->rec.data.tsn, 4131 0, 4132 0, 4133 SCTP_LOG_FREE_SENT); 4134 } 4135 asoc->sent_queue_cnt--; 4136 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4137 } else { 4138 break; 4139 } 4140 } 4141 4142 } 4143 /* sa_ignore NO_NULL_CHK */ 4144 if (stcb->sctp_socket) { 4145 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4146 struct socket *so; 4147 4148 #endif 4149 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4150 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4151 /* sa_ignore NO_NULL_CHK */ 4152 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4153 } 4154 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4155 so = SCTP_INP_SO(stcb->sctp_ep); 4156 atomic_add_int(&stcb->asoc.refcnt, 1); 4157 SCTP_TCB_UNLOCK(stcb); 4158 SCTP_SOCKET_LOCK(so, 1); 4159 SCTP_TCB_LOCK(stcb); 4160 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4161 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4162 /* assoc was freed while we were unlocked */ 4163 SCTP_SOCKET_UNLOCK(so, 1); 4164 return; 4165 } 4166 #endif 4167 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4168 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4169 SCTP_SOCKET_UNLOCK(so, 1); 4170 #endif 4171 } else { 4172 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4173 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4174 } 4175 } 4176 4177 /* JRS - Use the congestion control given in the CC module */ 4178 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4179 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4180 if (net->net_ack2 > 0) { 4181 /* 4182 * Karn's rule applies to clearing error 4183 * count, this is optional. 4184 */ 4185 net->error_count = 0; 4186 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4187 /* addr came good */ 4188 net->dest_state |= SCTP_ADDR_REACHABLE; 4189 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4190 0, (void *)net, SCTP_SO_NOT_LOCKED); 4191 } 4192 if (net == stcb->asoc.primary_destination) { 4193 if (stcb->asoc.alternate) { 4194 /* 4195 * release the alternate, 4196 * primary is good 4197 */ 4198 sctp_free_remote_addr(stcb->asoc.alternate); 4199 stcb->asoc.alternate = NULL; 4200 } 4201 } 4202 if (net->dest_state & SCTP_ADDR_PF) { 4203 net->dest_state &= ~SCTP_ADDR_PF; 4204 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4205 stcb->sctp_ep, stcb, net, 4206 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4207 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4208 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4209 /* Done with this net */ 4210 net->net_ack = 0; 4211 } 4212 /* restore any doubled timers */ 4213 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4214 if (net->RTO < stcb->asoc.minrto) { 4215 net->RTO = stcb->asoc.minrto; 4216 } 4217 if (net->RTO > stcb->asoc.maxrto) { 4218 net->RTO = stcb->asoc.maxrto; 4219 } 4220 } 4221 } 4222 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4223 } 4224 asoc->last_acked_seq = cumack; 4225 4226 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4227 /* nothing left in-flight */ 4228 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4229 net->flight_size = 0; 4230 net->partial_bytes_acked = 0; 4231 } 4232 asoc->total_flight = 0; 4233 asoc->total_flight_count = 0; 4234 } 4235 /* RWND update */ 4236 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4237 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4238 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4239 /* SWS sender side engages */ 4240 asoc->peers_rwnd = 0; 4241 } 4242 if (asoc->peers_rwnd > old_rwnd) { 4243 win_probe_recovery = 1; 4244 } 4245 /* Now assure a timer where data is queued at */ 4246 again: 4247 j = 0; 4248 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4249 if (win_probe_recovery && (net->window_probe)) { 4250 win_probe_recovered = 1; 4251 /* 4252 * Find first chunk that was used with window probe 4253 * and clear the sent 4254 */ 4255 /* sa_ignore FREED_MEMORY */ 4256 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4257 if (tp1->window_probe) { 4258 /* move back to data send queue */ 4259 sctp_window_probe_recovery(stcb, asoc, tp1); 4260 break; 4261 } 4262 } 4263 } 4264 if (net->flight_size) { 4265 j++; 4266 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4267 if (net->window_probe) { 4268 net->window_probe = 0; 4269 } 4270 } else { 4271 if (net->window_probe) { 4272 /* 4273 * In window probes we must assure a timer 4274 * is still running there 4275 */ 4276 net->window_probe = 0; 4277 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4278 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4279 } 4280 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4281 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4282 stcb, net, 4283 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4284 } 4285 } 4286 } 4287 if ((j == 0) && 4288 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4289 (asoc->sent_queue_retran_cnt == 0) && 4290 (win_probe_recovered == 0) && 4291 (done_once == 0)) { 4292 /* 4293 * huh, this should not happen unless all packets are 4294 * PR-SCTP and marked to skip of course. 4295 */ 4296 if (sctp_fs_audit(asoc)) { 4297 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4298 net->flight_size = 0; 4299 } 4300 asoc->total_flight = 0; 4301 asoc->total_flight_count = 0; 4302 asoc->sent_queue_retran_cnt = 0; 4303 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4304 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4305 sctp_flight_size_increase(tp1); 4306 sctp_total_flight_increase(stcb, tp1); 4307 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4308 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4309 } 4310 } 4311 } 4312 done_once = 1; 4313 goto again; 4314 } 4315 /**********************************/ 4316 /* Now what about shutdown issues */ 4317 /**********************************/ 4318 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4319 /* nothing left on sendqueue.. consider done */ 4320 /* clean up */ 4321 if ((asoc->stream_queue_cnt == 1) && 4322 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4323 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4324 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4325 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4326 } 4327 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4328 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4329 (asoc->stream_queue_cnt == 1) && 4330 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4331 struct mbuf *op_err; 4332 4333 *abort_now = 1; 4334 /* XXX */ 4335 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4336 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4337 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4338 return; 4339 } 4340 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4341 (asoc->stream_queue_cnt == 0)) { 4342 struct sctp_nets *netp; 4343 4344 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4345 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4346 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4347 } 4348 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4349 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4350 sctp_stop_timers_for_shutdown(stcb); 4351 if (asoc->alternate) { 4352 netp = asoc->alternate; 4353 } else { 4354 netp = asoc->primary_destination; 4355 } 4356 sctp_send_shutdown(stcb, netp); 4357 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4358 stcb->sctp_ep, stcb, netp); 4359 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4360 stcb->sctp_ep, stcb, netp); 4361 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4362 (asoc->stream_queue_cnt == 0)) { 4363 struct sctp_nets *netp; 4364 4365 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4366 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4367 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4368 sctp_stop_timers_for_shutdown(stcb); 4369 if (asoc->alternate) { 4370 netp = asoc->alternate; 4371 } else { 4372 netp = asoc->primary_destination; 4373 } 4374 sctp_send_shutdown_ack(stcb, netp); 4375 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4376 stcb->sctp_ep, stcb, netp); 4377 } 4378 } 4379 /*********************************************/ 4380 /* Here we perform PR-SCTP procedures */ 4381 /* (section 4.2) */ 4382 /*********************************************/ 4383 /* C1. update advancedPeerAckPoint */ 4384 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4385 asoc->advanced_peer_ack_point = cumack; 4386 } 4387 /* PR-Sctp issues need to be addressed too */ 4388 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4389 struct sctp_tmit_chunk *lchk; 4390 uint32_t old_adv_peer_ack_point; 4391 4392 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4393 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4394 /* C3. See if we need to send a Fwd-TSN */ 4395 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4396 /* 4397 * ISSUE with ECN, see FWD-TSN processing. 4398 */ 4399 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4400 send_forward_tsn(stcb, asoc); 4401 } else if (lchk) { 4402 /* try to FR fwd-tsn's that get lost too */ 4403 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4404 send_forward_tsn(stcb, asoc); 4405 } 4406 } 4407 } 4408 if (lchk) { 4409 /* Assure a timer is up */ 4410 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4411 stcb->sctp_ep, stcb, lchk->whoTo); 4412 } 4413 } 4414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4415 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4416 rwnd, 4417 stcb->asoc.peers_rwnd, 4418 stcb->asoc.total_flight, 4419 stcb->asoc.total_output_queue_size); 4420 } 4421 } 4422 4423 void 4424 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4425 struct sctp_tcb *stcb, 4426 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4427 int *abort_now, uint8_t flags, 4428 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4429 { 4430 struct sctp_association *asoc; 4431 struct sctp_tmit_chunk *tp1, *tp2; 4432 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4433 uint16_t wake_him = 0; 4434 uint32_t send_s = 0; 4435 long j; 4436 int accum_moved = 0; 4437 int will_exit_fast_recovery = 0; 4438 uint32_t a_rwnd, old_rwnd; 4439 int win_probe_recovery = 0; 4440 int win_probe_recovered = 0; 4441 struct sctp_nets *net = NULL; 4442 int done_once; 4443 int rto_ok = 1; 4444 uint8_t reneged_all = 0; 4445 uint8_t cmt_dac_flag; 4446 4447 /* 4448 * we take any chance we can to service our queues since we cannot 4449 * get awoken when the socket is read from :< 4450 */ 4451 /* 4452 * Now perform the actual SACK handling: 1) Verify that it is not an 4453 * old sack, if so discard. 2) If there is nothing left in the send 4454 * queue (cum-ack is equal to last acked) then you have a duplicate 4455 * too, update any rwnd change and verify no timers are running. 4456 * then return. 3) Process any new consequtive data i.e. cum-ack 4457 * moved process these first and note that it moved. 4) Process any 4458 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4459 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4460 * sync up flightsizes and things, stop all timers and also check 4461 * for shutdown_pending state. If so then go ahead and send off the 4462 * shutdown. If in shutdown recv, send off the shutdown-ack and 4463 * start that timer, Ret. 9) Strike any non-acked things and do FR 4464 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4465 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4466 * if in shutdown_recv state. 4467 */ 4468 SCTP_TCB_LOCK_ASSERT(stcb); 4469 /* CMT DAC algo */ 4470 this_sack_lowest_newack = 0; 4471 SCTP_STAT_INCR(sctps_slowpath_sack); 4472 last_tsn = cum_ack; 4473 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4474 #ifdef SCTP_ASOCLOG_OF_TSNS 4475 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4476 stcb->asoc.cumack_log_at++; 4477 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4478 stcb->asoc.cumack_log_at = 0; 4479 } 4480 #endif 4481 a_rwnd = rwnd; 4482 4483 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4484 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4485 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4486 } 4487 old_rwnd = stcb->asoc.peers_rwnd; 4488 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4489 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4490 stcb->asoc.overall_error_count, 4491 0, 4492 SCTP_FROM_SCTP_INDATA, 4493 __LINE__); 4494 } 4495 stcb->asoc.overall_error_count = 0; 4496 asoc = &stcb->asoc; 4497 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4498 sctp_log_sack(asoc->last_acked_seq, 4499 cum_ack, 4500 0, 4501 num_seg, 4502 num_dup, 4503 SCTP_LOG_NEW_SACK); 4504 } 4505 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4506 uint16_t i; 4507 uint32_t *dupdata, dblock; 4508 4509 for (i = 0; i < num_dup; i++) { 4510 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4511 sizeof(uint32_t), (uint8_t *)&dblock); 4512 if (dupdata == NULL) { 4513 break; 4514 } 4515 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4516 } 4517 } 4518 /* reality check */ 4519 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4520 tp1 = TAILQ_LAST(&asoc->sent_queue, 4521 sctpchunk_listhead); 4522 send_s = tp1->rec.data.tsn + 1; 4523 } else { 4524 tp1 = NULL; 4525 send_s = asoc->sending_seq; 4526 } 4527 if (SCTP_TSN_GE(cum_ack, send_s)) { 4528 struct mbuf *op_err; 4529 char msg[SCTP_DIAG_INFO_LEN]; 4530 4531 /* 4532 * no way, we have not even sent this TSN out yet. Peer is 4533 * hopelessly messed up with us. 4534 */ 4535 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4536 cum_ack, send_s); 4537 if (tp1) { 4538 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4539 tp1->rec.data.tsn, (void *)tp1); 4540 } 4541 hopeless_peer: 4542 *abort_now = 1; 4543 /* XXX */ 4544 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4545 cum_ack, send_s); 4546 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4547 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4548 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4549 return; 4550 } 4551 /**********************/ 4552 /* 1) check the range */ 4553 /**********************/ 4554 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4555 /* acking something behind */ 4556 return; 4557 } 4558 /* update the Rwnd of the peer */ 4559 if (TAILQ_EMPTY(&asoc->sent_queue) && 4560 TAILQ_EMPTY(&asoc->send_queue) && 4561 (asoc->stream_queue_cnt == 0)) { 4562 /* nothing left on send/sent and strmq */ 4563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4564 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4565 asoc->peers_rwnd, 0, 0, a_rwnd); 4566 } 4567 asoc->peers_rwnd = a_rwnd; 4568 if (asoc->sent_queue_retran_cnt) { 4569 asoc->sent_queue_retran_cnt = 0; 4570 } 4571 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4572 /* SWS sender side engages */ 4573 asoc->peers_rwnd = 0; 4574 } 4575 /* stop any timers */ 4576 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4577 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4578 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4579 net->partial_bytes_acked = 0; 4580 net->flight_size = 0; 4581 } 4582 asoc->total_flight = 0; 4583 asoc->total_flight_count = 0; 4584 return; 4585 } 4586 /* 4587 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4588 * things. The total byte count acked is tracked in netAckSz AND 4589 * netAck2 is used to track the total bytes acked that are un- 4590 * amibguious and were never retransmitted. We track these on a per 4591 * destination address basis. 4592 */ 4593 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4594 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4595 /* Drag along the window_tsn for cwr's */ 4596 net->cwr_window_tsn = cum_ack; 4597 } 4598 net->prev_cwnd = net->cwnd; 4599 net->net_ack = 0; 4600 net->net_ack2 = 0; 4601 4602 /* 4603 * CMT: Reset CUC and Fast recovery algo variables before 4604 * SACK processing 4605 */ 4606 net->new_pseudo_cumack = 0; 4607 net->will_exit_fast_recovery = 0; 4608 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4609 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4610 } 4611 /* 4612 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4613 * to be greater than the cumack. Also reset saw_newack to 0 4614 * for all dests. 4615 */ 4616 net->saw_newack = 0; 4617 net->this_sack_highest_newack = last_tsn; 4618 } 4619 /* process the new consecutive TSN first */ 4620 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4621 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4622 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4623 accum_moved = 1; 4624 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4625 /* 4626 * If it is less than ACKED, it is 4627 * now no-longer in flight. Higher 4628 * values may occur during marking 4629 */ 4630 if ((tp1->whoTo->dest_state & 4631 SCTP_ADDR_UNCONFIRMED) && 4632 (tp1->snd_count < 2)) { 4633 /* 4634 * If there was no retran 4635 * and the address is 4636 * un-confirmed and we sent 4637 * there and are now 4638 * sacked.. its confirmed, 4639 * mark it so. 4640 */ 4641 tp1->whoTo->dest_state &= 4642 ~SCTP_ADDR_UNCONFIRMED; 4643 } 4644 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4645 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4646 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4647 tp1->whoTo->flight_size, 4648 tp1->book_size, 4649 (uint32_t)(uintptr_t)tp1->whoTo, 4650 tp1->rec.data.tsn); 4651 } 4652 sctp_flight_size_decrease(tp1); 4653 sctp_total_flight_decrease(stcb, tp1); 4654 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4655 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4656 tp1); 4657 } 4658 } 4659 tp1->whoTo->net_ack += tp1->send_size; 4660 4661 /* CMT SFR and DAC algos */ 4662 this_sack_lowest_newack = tp1->rec.data.tsn; 4663 tp1->whoTo->saw_newack = 1; 4664 4665 if (tp1->snd_count < 2) { 4666 /* 4667 * True non-retransmited 4668 * chunk 4669 */ 4670 tp1->whoTo->net_ack2 += 4671 tp1->send_size; 4672 4673 /* update RTO too? */ 4674 if (tp1->do_rtt) { 4675 if (rto_ok) { 4676 tp1->whoTo->RTO = 4677 sctp_calculate_rto(stcb, 4678 asoc, tp1->whoTo, 4679 &tp1->sent_rcv_time, 4680 SCTP_RTT_FROM_DATA); 4681 rto_ok = 0; 4682 } 4683 if (tp1->whoTo->rto_needed == 0) { 4684 tp1->whoTo->rto_needed = 1; 4685 } 4686 tp1->do_rtt = 0; 4687 } 4688 } 4689 /* 4690 * CMT: CUCv2 algorithm. From the 4691 * cumack'd TSNs, for each TSN being 4692 * acked for the first time, set the 4693 * following variables for the 4694 * corresp destination. 4695 * new_pseudo_cumack will trigger a 4696 * cwnd update. 4697 * find_(rtx_)pseudo_cumack will 4698 * trigger search for the next 4699 * expected (rtx-)pseudo-cumack. 4700 */ 4701 tp1->whoTo->new_pseudo_cumack = 1; 4702 tp1->whoTo->find_pseudo_cumack = 1; 4703 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4704 4705 4706 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4707 sctp_log_sack(asoc->last_acked_seq, 4708 cum_ack, 4709 tp1->rec.data.tsn, 4710 0, 4711 0, 4712 SCTP_LOG_TSN_ACKED); 4713 } 4714 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4715 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4716 } 4717 } 4718 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4719 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4720 #ifdef SCTP_AUDITING_ENABLED 4721 sctp_audit_log(0xB3, 4722 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4723 #endif 4724 } 4725 if (tp1->rec.data.chunk_was_revoked) { 4726 /* deflate the cwnd */ 4727 tp1->whoTo->cwnd -= tp1->book_size; 4728 tp1->rec.data.chunk_was_revoked = 0; 4729 } 4730 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4731 tp1->sent = SCTP_DATAGRAM_ACKED; 4732 } 4733 } 4734 } else { 4735 break; 4736 } 4737 } 4738 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4739 /* always set this up to cum-ack */ 4740 asoc->this_sack_highest_gap = last_tsn; 4741 4742 if ((num_seg > 0) || (num_nr_seg > 0)) { 4743 4744 /* 4745 * thisSackHighestGap will increase while handling NEW 4746 * segments this_sack_highest_newack will increase while 4747 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4748 * used for CMT DAC algo. saw_newack will also change. 4749 */ 4750 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4751 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4752 num_seg, num_nr_seg, &rto_ok)) { 4753 wake_him++; 4754 } 4755 /* 4756 * validate the biggest_tsn_acked in the gap acks if strict 4757 * adherence is wanted. 4758 */ 4759 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4760 /* 4761 * peer is either confused or we are under attack. 4762 * We must abort. 4763 */ 4764 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4765 biggest_tsn_acked, send_s); 4766 goto hopeless_peer; 4767 } 4768 } 4769 /*******************************************/ 4770 /* cancel ALL T3-send timer if accum moved */ 4771 /*******************************************/ 4772 if (asoc->sctp_cmt_on_off > 0) { 4773 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4774 if (net->new_pseudo_cumack) 4775 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4776 stcb, net, 4777 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4778 4779 } 4780 } else { 4781 if (accum_moved) { 4782 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4783 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4784 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4785 } 4786 } 4787 } 4788 /********************************************/ 4789 /* drop the acked chunks from the sentqueue */ 4790 /********************************************/ 4791 asoc->last_acked_seq = cum_ack; 4792 4793 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4794 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4795 break; 4796 } 4797 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4798 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4799 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4800 #ifdef INVARIANTS 4801 } else { 4802 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4803 #endif 4804 } 4805 } 4806 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4807 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4808 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4809 asoc->trigger_reset = 1; 4810 } 4811 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4812 if (PR_SCTP_ENABLED(tp1->flags)) { 4813 if (asoc->pr_sctp_cnt != 0) 4814 asoc->pr_sctp_cnt--; 4815 } 4816 asoc->sent_queue_cnt--; 4817 if (tp1->data) { 4818 /* sa_ignore NO_NULL_CHK */ 4819 sctp_free_bufspace(stcb, asoc, tp1, 1); 4820 sctp_m_freem(tp1->data); 4821 tp1->data = NULL; 4822 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4823 asoc->sent_queue_cnt_removeable--; 4824 } 4825 } 4826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4827 sctp_log_sack(asoc->last_acked_seq, 4828 cum_ack, 4829 tp1->rec.data.tsn, 4830 0, 4831 0, 4832 SCTP_LOG_FREE_SENT); 4833 } 4834 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4835 wake_him++; 4836 } 4837 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4838 #ifdef INVARIANTS 4839 panic("Warning flight size is positive and should be 0"); 4840 #else 4841 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4842 asoc->total_flight); 4843 #endif 4844 asoc->total_flight = 0; 4845 } 4846 /* sa_ignore NO_NULL_CHK */ 4847 if ((wake_him) && (stcb->sctp_socket)) { 4848 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4849 struct socket *so; 4850 4851 #endif 4852 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4854 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4855 } 4856 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4857 so = SCTP_INP_SO(stcb->sctp_ep); 4858 atomic_add_int(&stcb->asoc.refcnt, 1); 4859 SCTP_TCB_UNLOCK(stcb); 4860 SCTP_SOCKET_LOCK(so, 1); 4861 SCTP_TCB_LOCK(stcb); 4862 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4863 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4864 /* assoc was freed while we were unlocked */ 4865 SCTP_SOCKET_UNLOCK(so, 1); 4866 return; 4867 } 4868 #endif 4869 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4870 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4871 SCTP_SOCKET_UNLOCK(so, 1); 4872 #endif 4873 } else { 4874 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4875 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4876 } 4877 } 4878 4879 if (asoc->fast_retran_loss_recovery && accum_moved) { 4880 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4881 /* Setup so we will exit RFC2582 fast recovery */ 4882 will_exit_fast_recovery = 1; 4883 } 4884 } 4885 /* 4886 * Check for revoked fragments: 4887 * 4888 * if Previous sack - Had no frags then we can't have any revoked if 4889 * Previous sack - Had frag's then - If we now have frags aka 4890 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4891 * some of them. else - The peer revoked all ACKED fragments, since 4892 * we had some before and now we have NONE. 4893 */ 4894 4895 if (num_seg) { 4896 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4897 asoc->saw_sack_with_frags = 1; 4898 } else if (asoc->saw_sack_with_frags) { 4899 int cnt_revoked = 0; 4900 4901 /* Peer revoked all dg's marked or acked */ 4902 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4903 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4904 tp1->sent = SCTP_DATAGRAM_SENT; 4905 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4906 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4907 tp1->whoTo->flight_size, 4908 tp1->book_size, 4909 (uint32_t)(uintptr_t)tp1->whoTo, 4910 tp1->rec.data.tsn); 4911 } 4912 sctp_flight_size_increase(tp1); 4913 sctp_total_flight_increase(stcb, tp1); 4914 tp1->rec.data.chunk_was_revoked = 1; 4915 /* 4916 * To ensure that this increase in 4917 * flightsize, which is artificial, does not 4918 * throttle the sender, we also increase the 4919 * cwnd artificially. 4920 */ 4921 tp1->whoTo->cwnd += tp1->book_size; 4922 cnt_revoked++; 4923 } 4924 } 4925 if (cnt_revoked) { 4926 reneged_all = 1; 4927 } 4928 asoc->saw_sack_with_frags = 0; 4929 } 4930 if (num_nr_seg > 0) 4931 asoc->saw_sack_with_nr_frags = 1; 4932 else 4933 asoc->saw_sack_with_nr_frags = 0; 4934 4935 /* JRS - Use the congestion control given in the CC module */ 4936 if (ecne_seen == 0) { 4937 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4938 if (net->net_ack2 > 0) { 4939 /* 4940 * Karn's rule applies to clearing error 4941 * count, this is optional. 4942 */ 4943 net->error_count = 0; 4944 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4945 /* addr came good */ 4946 net->dest_state |= SCTP_ADDR_REACHABLE; 4947 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4948 0, (void *)net, SCTP_SO_NOT_LOCKED); 4949 } 4950 if (net == stcb->asoc.primary_destination) { 4951 if (stcb->asoc.alternate) { 4952 /* 4953 * release the alternate, 4954 * primary is good 4955 */ 4956 sctp_free_remote_addr(stcb->asoc.alternate); 4957 stcb->asoc.alternate = NULL; 4958 } 4959 } 4960 if (net->dest_state & SCTP_ADDR_PF) { 4961 net->dest_state &= ~SCTP_ADDR_PF; 4962 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4963 stcb->sctp_ep, stcb, net, 4964 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4965 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4966 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4967 /* Done with this net */ 4968 net->net_ack = 0; 4969 } 4970 /* restore any doubled timers */ 4971 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4972 if (net->RTO < stcb->asoc.minrto) { 4973 net->RTO = stcb->asoc.minrto; 4974 } 4975 if (net->RTO > stcb->asoc.maxrto) { 4976 net->RTO = stcb->asoc.maxrto; 4977 } 4978 } 4979 } 4980 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4981 } 4982 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4983 /* nothing left in-flight */ 4984 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4985 /* stop all timers */ 4986 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4987 stcb, net, 4988 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4989 net->flight_size = 0; 4990 net->partial_bytes_acked = 0; 4991 } 4992 asoc->total_flight = 0; 4993 asoc->total_flight_count = 0; 4994 } 4995 /**********************************/ 4996 /* Now what about shutdown issues */ 4997 /**********************************/ 4998 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4999 /* nothing left on sendqueue.. consider done */ 5000 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5001 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5002 asoc->peers_rwnd, 0, 0, a_rwnd); 5003 } 5004 asoc->peers_rwnd = a_rwnd; 5005 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5006 /* SWS sender side engages */ 5007 asoc->peers_rwnd = 0; 5008 } 5009 /* clean up */ 5010 if ((asoc->stream_queue_cnt == 1) && 5011 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5012 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 5013 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 5014 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 5015 } 5016 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5017 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 5018 (asoc->stream_queue_cnt == 1) && 5019 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 5020 struct mbuf *op_err; 5021 5022 *abort_now = 1; 5023 /* XXX */ 5024 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 5025 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 5026 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5027 return; 5028 } 5029 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5030 (asoc->stream_queue_cnt == 0)) { 5031 struct sctp_nets *netp; 5032 5033 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 5034 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5035 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5036 } 5037 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 5038 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5039 sctp_stop_timers_for_shutdown(stcb); 5040 if (asoc->alternate) { 5041 netp = asoc->alternate; 5042 } else { 5043 netp = asoc->primary_destination; 5044 } 5045 sctp_send_shutdown(stcb, netp); 5046 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5047 stcb->sctp_ep, stcb, netp); 5048 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5049 stcb->sctp_ep, stcb, netp); 5050 return; 5051 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5052 (asoc->stream_queue_cnt == 0)) { 5053 struct sctp_nets *netp; 5054 5055 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5056 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 5057 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5058 sctp_stop_timers_for_shutdown(stcb); 5059 if (asoc->alternate) { 5060 netp = asoc->alternate; 5061 } else { 5062 netp = asoc->primary_destination; 5063 } 5064 sctp_send_shutdown_ack(stcb, netp); 5065 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5066 stcb->sctp_ep, stcb, netp); 5067 return; 5068 } 5069 } 5070 /* 5071 * Now here we are going to recycle net_ack for a different use... 5072 * HEADS UP. 5073 */ 5074 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5075 net->net_ack = 0; 5076 } 5077 5078 /* 5079 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5080 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5081 * automatically ensure that. 5082 */ 5083 if ((asoc->sctp_cmt_on_off > 0) && 5084 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5085 (cmt_dac_flag == 0)) { 5086 this_sack_lowest_newack = cum_ack; 5087 } 5088 if ((num_seg > 0) || (num_nr_seg > 0)) { 5089 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5090 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5091 } 5092 /* JRS - Use the congestion control given in the CC module */ 5093 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5094 5095 /* Now are we exiting loss recovery ? */ 5096 if (will_exit_fast_recovery) { 5097 /* Ok, we must exit fast recovery */ 5098 asoc->fast_retran_loss_recovery = 0; 5099 } 5100 if ((asoc->sat_t3_loss_recovery) && 5101 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5102 /* end satellite t3 loss recovery */ 5103 asoc->sat_t3_loss_recovery = 0; 5104 } 5105 /* 5106 * CMT Fast recovery 5107 */ 5108 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5109 if (net->will_exit_fast_recovery) { 5110 /* Ok, we must exit fast recovery */ 5111 net->fast_retran_loss_recovery = 0; 5112 } 5113 } 5114 5115 /* Adjust and set the new rwnd value */ 5116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5117 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5118 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5119 } 5120 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5121 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5122 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5123 /* SWS sender side engages */ 5124 asoc->peers_rwnd = 0; 5125 } 5126 if (asoc->peers_rwnd > old_rwnd) { 5127 win_probe_recovery = 1; 5128 } 5129 /* 5130 * Now we must setup so we have a timer up for anyone with 5131 * outstanding data. 5132 */ 5133 done_once = 0; 5134 again: 5135 j = 0; 5136 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5137 if (win_probe_recovery && (net->window_probe)) { 5138 win_probe_recovered = 1; 5139 /*- 5140 * Find first chunk that was used with 5141 * window probe and clear the event. Put 5142 * it back into the send queue as if has 5143 * not been sent. 5144 */ 5145 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5146 if (tp1->window_probe) { 5147 sctp_window_probe_recovery(stcb, asoc, tp1); 5148 break; 5149 } 5150 } 5151 } 5152 if (net->flight_size) { 5153 j++; 5154 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5155 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5156 stcb->sctp_ep, stcb, net); 5157 } 5158 if (net->window_probe) { 5159 net->window_probe = 0; 5160 } 5161 } else { 5162 if (net->window_probe) { 5163 /* 5164 * In window probes we must assure a timer 5165 * is still running there 5166 */ 5167 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5168 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5169 stcb->sctp_ep, stcb, net); 5170 5171 } 5172 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5173 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5174 stcb, net, 5175 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5176 } 5177 } 5178 } 5179 if ((j == 0) && 5180 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5181 (asoc->sent_queue_retran_cnt == 0) && 5182 (win_probe_recovered == 0) && 5183 (done_once == 0)) { 5184 /* 5185 * huh, this should not happen unless all packets are 5186 * PR-SCTP and marked to skip of course. 5187 */ 5188 if (sctp_fs_audit(asoc)) { 5189 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5190 net->flight_size = 0; 5191 } 5192 asoc->total_flight = 0; 5193 asoc->total_flight_count = 0; 5194 asoc->sent_queue_retran_cnt = 0; 5195 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5196 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5197 sctp_flight_size_increase(tp1); 5198 sctp_total_flight_increase(stcb, tp1); 5199 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5200 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5201 } 5202 } 5203 } 5204 done_once = 1; 5205 goto again; 5206 } 5207 /*********************************************/ 5208 /* Here we perform PR-SCTP procedures */ 5209 /* (section 4.2) */ 5210 /*********************************************/ 5211 /* C1. update advancedPeerAckPoint */ 5212 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5213 asoc->advanced_peer_ack_point = cum_ack; 5214 } 5215 /* C2. try to further move advancedPeerAckPoint ahead */ 5216 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5217 struct sctp_tmit_chunk *lchk; 5218 uint32_t old_adv_peer_ack_point; 5219 5220 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5221 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5222 /* C3. See if we need to send a Fwd-TSN */ 5223 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5224 /* 5225 * ISSUE with ECN, see FWD-TSN processing. 5226 */ 5227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5228 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5229 0xee, cum_ack, asoc->advanced_peer_ack_point, 5230 old_adv_peer_ack_point); 5231 } 5232 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5233 send_forward_tsn(stcb, asoc); 5234 } else if (lchk) { 5235 /* try to FR fwd-tsn's that get lost too */ 5236 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5237 send_forward_tsn(stcb, asoc); 5238 } 5239 } 5240 } 5241 if (lchk) { 5242 /* Assure a timer is up */ 5243 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5244 stcb->sctp_ep, stcb, lchk->whoTo); 5245 } 5246 } 5247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5248 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5249 a_rwnd, 5250 stcb->asoc.peers_rwnd, 5251 stcb->asoc.total_flight, 5252 stcb->asoc.total_output_queue_size); 5253 } 5254 } 5255 5256 void 5257 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5258 { 5259 /* Copy cum-ack */ 5260 uint32_t cum_ack, a_rwnd; 5261 5262 cum_ack = ntohl(cp->cumulative_tsn_ack); 5263 /* Arrange so a_rwnd does NOT change */ 5264 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5265 5266 /* Now call the express sack handling */ 5267 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5268 } 5269 5270 static void 5271 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5272 struct sctp_stream_in *strmin) 5273 { 5274 struct sctp_queued_to_read *control, *ncontrol; 5275 struct sctp_association *asoc; 5276 uint32_t mid; 5277 int need_reasm_check = 0; 5278 5279 asoc = &stcb->asoc; 5280 mid = strmin->last_mid_delivered; 5281 /* 5282 * First deliver anything prior to and including the stream no that 5283 * came in. 5284 */ 5285 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5286 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5287 /* this is deliverable now */ 5288 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5289 if (control->on_strm_q) { 5290 if (control->on_strm_q == SCTP_ON_ORDERED) { 5291 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5292 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5293 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5294 #ifdef INVARIANTS 5295 } else { 5296 panic("strmin: %p ctl: %p unknown %d", 5297 strmin, control, control->on_strm_q); 5298 #endif 5299 } 5300 control->on_strm_q = 0; 5301 } 5302 /* subtract pending on streams */ 5303 if (asoc->size_on_all_streams >= control->length) { 5304 asoc->size_on_all_streams -= control->length; 5305 } else { 5306 #ifdef INVARIANTS 5307 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5308 #else 5309 asoc->size_on_all_streams = 0; 5310 #endif 5311 } 5312 sctp_ucount_decr(asoc->cnt_on_all_streams); 5313 /* deliver it to at least the delivery-q */ 5314 if (stcb->sctp_socket) { 5315 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5316 sctp_add_to_readq(stcb->sctp_ep, stcb, 5317 control, 5318 &stcb->sctp_socket->so_rcv, 5319 1, SCTP_READ_LOCK_HELD, 5320 SCTP_SO_NOT_LOCKED); 5321 } 5322 } else { 5323 /* Its a fragmented message */ 5324 if (control->first_frag_seen) { 5325 /* 5326 * Make it so this is next to 5327 * deliver, we restore later 5328 */ 5329 strmin->last_mid_delivered = control->mid - 1; 5330 need_reasm_check = 1; 5331 break; 5332 } 5333 } 5334 } else { 5335 /* no more delivery now. */ 5336 break; 5337 } 5338 } 5339 if (need_reasm_check) { 5340 int ret; 5341 5342 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5343 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5344 /* Restore the next to deliver unless we are ahead */ 5345 strmin->last_mid_delivered = mid; 5346 } 5347 if (ret == 0) { 5348 /* Left the front Partial one on */ 5349 return; 5350 } 5351 need_reasm_check = 0; 5352 } 5353 /* 5354 * now we must deliver things in queue the normal way if any are 5355 * now ready. 5356 */ 5357 mid = strmin->last_mid_delivered + 1; 5358 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5359 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5360 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5361 /* this is deliverable now */ 5362 if (control->on_strm_q) { 5363 if (control->on_strm_q == SCTP_ON_ORDERED) { 5364 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5365 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5366 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5367 #ifdef INVARIANTS 5368 } else { 5369 panic("strmin: %p ctl: %p unknown %d", 5370 strmin, control, control->on_strm_q); 5371 #endif 5372 } 5373 control->on_strm_q = 0; 5374 } 5375 /* subtract pending on streams */ 5376 if (asoc->size_on_all_streams >= control->length) { 5377 asoc->size_on_all_streams -= control->length; 5378 } else { 5379 #ifdef INVARIANTS 5380 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5381 #else 5382 asoc->size_on_all_streams = 0; 5383 #endif 5384 } 5385 sctp_ucount_decr(asoc->cnt_on_all_streams); 5386 /* deliver it to at least the delivery-q */ 5387 strmin->last_mid_delivered = control->mid; 5388 if (stcb->sctp_socket) { 5389 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5390 sctp_add_to_readq(stcb->sctp_ep, stcb, 5391 control, 5392 &stcb->sctp_socket->so_rcv, 1, 5393 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5394 5395 } 5396 mid = strmin->last_mid_delivered + 1; 5397 } else { 5398 /* Its a fragmented message */ 5399 if (control->first_frag_seen) { 5400 /* 5401 * Make it so this is next to 5402 * deliver 5403 */ 5404 strmin->last_mid_delivered = control->mid - 1; 5405 need_reasm_check = 1; 5406 break; 5407 } 5408 } 5409 } else { 5410 break; 5411 } 5412 } 5413 if (need_reasm_check) { 5414 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5415 } 5416 } 5417 5418 5419 5420 static void 5421 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5422 struct sctp_association *asoc, 5423 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn) 5424 { 5425 struct sctp_queued_to_read *control; 5426 struct sctp_stream_in *strm; 5427 struct sctp_tmit_chunk *chk, *nchk; 5428 int cnt_removed = 0; 5429 5430 /* 5431 * For now large messages held on the stream reasm that are complete 5432 * will be tossed too. We could in theory do more work to spin 5433 * through and stop after dumping one msg aka seeing the start of a 5434 * new msg at the head, and call the delivery function... to see if 5435 * it can be delivered... But for now we just dump everything on the 5436 * queue. 5437 */ 5438 strm = &asoc->strmin[stream]; 5439 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported); 5440 if (control == NULL) { 5441 /* Not found */ 5442 return; 5443 } 5444 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5445 return; 5446 } 5447 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5448 /* Purge hanging chunks */ 5449 if (!asoc->idata_supported && (ordered == 0)) { 5450 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5451 break; 5452 } 5453 } 5454 cnt_removed++; 5455 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5456 if (asoc->size_on_reasm_queue >= chk->send_size) { 5457 asoc->size_on_reasm_queue -= chk->send_size; 5458 } else { 5459 #ifdef INVARIANTS 5460 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5461 #else 5462 asoc->size_on_reasm_queue = 0; 5463 #endif 5464 } 5465 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5466 if (chk->data) { 5467 sctp_m_freem(chk->data); 5468 chk->data = NULL; 5469 } 5470 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5471 } 5472 if (!TAILQ_EMPTY(&control->reasm)) { 5473 /* This has to be old data, unordered */ 5474 if (control->data) { 5475 sctp_m_freem(control->data); 5476 control->data = NULL; 5477 } 5478 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5479 chk = TAILQ_FIRST(&control->reasm); 5480 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5481 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5482 sctp_add_chk_to_control(control, strm, stcb, asoc, 5483 chk, SCTP_READ_LOCK_HELD); 5484 } 5485 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5486 return; 5487 } 5488 if (control->on_strm_q == SCTP_ON_ORDERED) { 5489 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5490 if (asoc->size_on_all_streams >= control->length) { 5491 asoc->size_on_all_streams -= control->length; 5492 } else { 5493 #ifdef INVARIANTS 5494 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5495 #else 5496 asoc->size_on_all_streams = 0; 5497 #endif 5498 } 5499 sctp_ucount_decr(asoc->cnt_on_all_streams); 5500 control->on_strm_q = 0; 5501 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5502 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5503 control->on_strm_q = 0; 5504 #ifdef INVARIANTS 5505 } else if (control->on_strm_q) { 5506 panic("strm: %p ctl: %p unknown %d", 5507 strm, control, control->on_strm_q); 5508 #endif 5509 } 5510 control->on_strm_q = 0; 5511 if (control->on_read_q == 0) { 5512 sctp_free_remote_addr(control->whoFrom); 5513 if (control->data) { 5514 sctp_m_freem(control->data); 5515 control->data = NULL; 5516 } 5517 sctp_free_a_readq(stcb, control); 5518 } 5519 } 5520 5521 void 5522 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5523 struct sctp_forward_tsn_chunk *fwd, 5524 int *abort_flag, struct mbuf *m, int offset) 5525 { 5526 /* The pr-sctp fwd tsn */ 5527 /* 5528 * here we will perform all the data receiver side steps for 5529 * processing FwdTSN, as required in by pr-sctp draft: 5530 * 5531 * Assume we get FwdTSN(x): 5532 * 5533 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5534 * + others we have 3) examine and update re-ordering queue on 5535 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5536 * report where we are. 5537 */ 5538 struct sctp_association *asoc; 5539 uint32_t new_cum_tsn, gap; 5540 unsigned int i, fwd_sz, m_size; 5541 uint32_t str_seq; 5542 struct sctp_stream_in *strm; 5543 struct sctp_queued_to_read *control, *sv; 5544 5545 asoc = &stcb->asoc; 5546 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5547 SCTPDBG(SCTP_DEBUG_INDATA1, 5548 "Bad size too small/big fwd-tsn\n"); 5549 return; 5550 } 5551 m_size = (stcb->asoc.mapping_array_size << 3); 5552 /*************************************************************/ 5553 /* 1. Here we update local cumTSN and shift the bitmap array */ 5554 /*************************************************************/ 5555 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5556 5557 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5558 /* Already got there ... */ 5559 return; 5560 } 5561 /* 5562 * now we know the new TSN is more advanced, let's find the actual 5563 * gap 5564 */ 5565 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5566 asoc->cumulative_tsn = new_cum_tsn; 5567 if (gap >= m_size) { 5568 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5569 struct mbuf *op_err; 5570 char msg[SCTP_DIAG_INFO_LEN]; 5571 5572 /* 5573 * out of range (of single byte chunks in the rwnd I 5574 * give out). This must be an attacker. 5575 */ 5576 *abort_flag = 1; 5577 snprintf(msg, sizeof(msg), 5578 "New cum ack %8.8x too high, highest TSN %8.8x", 5579 new_cum_tsn, asoc->highest_tsn_inside_map); 5580 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5581 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5582 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5583 return; 5584 } 5585 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5586 5587 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5588 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5589 asoc->highest_tsn_inside_map = new_cum_tsn; 5590 5591 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5592 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5593 5594 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5595 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5596 } 5597 } else { 5598 SCTP_TCB_LOCK_ASSERT(stcb); 5599 for (i = 0; i <= gap; i++) { 5600 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5601 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5602 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5603 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5604 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5605 } 5606 } 5607 } 5608 } 5609 /*************************************************************/ 5610 /* 2. Clear up re-assembly queue */ 5611 /*************************************************************/ 5612 5613 /* This is now done as part of clearing up the stream/seq */ 5614 if (asoc->idata_supported == 0) { 5615 uint16_t sid; 5616 5617 /* Flush all the un-ordered data based on cum-tsn */ 5618 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5619 for (sid = 0; sid < asoc->streamincnt; sid++) { 5620 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn); 5621 } 5622 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5623 } 5624 /*******************************************************/ 5625 /* 3. Update the PR-stream re-ordering queues and fix */ 5626 /* delivery issues as needed. */ 5627 /*******************************************************/ 5628 fwd_sz -= sizeof(*fwd); 5629 if (m && fwd_sz) { 5630 /* New method. */ 5631 unsigned int num_str; 5632 uint32_t mid, cur_mid; 5633 uint16_t sid; 5634 uint16_t ordered, flags; 5635 struct sctp_strseq *stseq, strseqbuf; 5636 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5637 5638 offset += sizeof(*fwd); 5639 5640 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5641 if (asoc->idata_supported) { 5642 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5643 } else { 5644 num_str = fwd_sz / sizeof(struct sctp_strseq); 5645 } 5646 for (i = 0; i < num_str; i++) { 5647 if (asoc->idata_supported) { 5648 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5649 sizeof(struct sctp_strseq_mid), 5650 (uint8_t *)&strseqbuf_m); 5651 offset += sizeof(struct sctp_strseq_mid); 5652 if (stseq_m == NULL) { 5653 break; 5654 } 5655 sid = ntohs(stseq_m->sid); 5656 mid = ntohl(stseq_m->mid); 5657 flags = ntohs(stseq_m->flags); 5658 if (flags & PR_SCTP_UNORDERED_FLAG) { 5659 ordered = 0; 5660 } else { 5661 ordered = 1; 5662 } 5663 } else { 5664 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5665 sizeof(struct sctp_strseq), 5666 (uint8_t *)&strseqbuf); 5667 offset += sizeof(struct sctp_strseq); 5668 if (stseq == NULL) { 5669 break; 5670 } 5671 sid = ntohs(stseq->sid); 5672 mid = (uint32_t)ntohs(stseq->ssn); 5673 ordered = 1; 5674 } 5675 /* Convert */ 5676 5677 /* now process */ 5678 5679 /* 5680 * Ok we now look for the stream/seq on the read 5681 * queue where its not all delivered. If we find it 5682 * we transmute the read entry into a PDI_ABORTED. 5683 */ 5684 if (sid >= asoc->streamincnt) { 5685 /* screwed up streams, stop! */ 5686 break; 5687 } 5688 if ((asoc->str_of_pdapi == sid) && 5689 (asoc->ssn_of_pdapi == mid)) { 5690 /* 5691 * If this is the one we were partially 5692 * delivering now then we no longer are. 5693 * Note this will change with the reassembly 5694 * re-write. 5695 */ 5696 asoc->fragmented_delivery_inprogress = 0; 5697 } 5698 strm = &asoc->strmin[sid]; 5699 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) { 5700 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn); 5701 } 5702 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5703 if ((control->sinfo_stream == sid) && 5704 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5705 str_seq = (sid << 16) | (0x0000ffff & mid); 5706 control->pdapi_aborted = 1; 5707 sv = stcb->asoc.control_pdapi; 5708 control->end_added = 1; 5709 if (control->on_strm_q == SCTP_ON_ORDERED) { 5710 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5711 if (asoc->size_on_all_streams >= control->length) { 5712 asoc->size_on_all_streams -= control->length; 5713 } else { 5714 #ifdef INVARIANTS 5715 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5716 #else 5717 asoc->size_on_all_streams = 0; 5718 #endif 5719 } 5720 sctp_ucount_decr(asoc->cnt_on_all_streams); 5721 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5722 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5723 #ifdef INVARIANTS 5724 } else if (control->on_strm_q) { 5725 panic("strm: %p ctl: %p unknown %d", 5726 strm, control, control->on_strm_q); 5727 #endif 5728 } 5729 control->on_strm_q = 0; 5730 stcb->asoc.control_pdapi = control; 5731 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5732 stcb, 5733 SCTP_PARTIAL_DELIVERY_ABORTED, 5734 (void *)&str_seq, 5735 SCTP_SO_NOT_LOCKED); 5736 stcb->asoc.control_pdapi = sv; 5737 break; 5738 } else if ((control->sinfo_stream == sid) && 5739 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5740 /* We are past our victim SSN */ 5741 break; 5742 } 5743 } 5744 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5745 /* Update the sequence number */ 5746 strm->last_mid_delivered = mid; 5747 } 5748 /* now kick the stream the new way */ 5749 /* sa_ignore NO_NULL_CHK */ 5750 sctp_kick_prsctp_reorder_queue(stcb, strm); 5751 } 5752 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5753 } 5754 /* 5755 * Now slide thing forward. 5756 */ 5757 sctp_slide_mapping_arrays(stcb); 5758 } 5759