1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 #include <netinet/sctp_os.h> 37 #include <sys/proc.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctp_pcb.h> 42 #include <netinet/sctputil.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_auth.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_asconf.h> 48 #include <netinet/sctp_indata.h> 49 #include <netinet/sctp_bsd_addr.h> 50 #include <netinet/sctp_input.h> 51 #include <netinet/sctp_crc32.h> 52 #include <netinet/sctp_lock_bsd.h> 53 /* 54 * NOTES: On the outbound side of things I need to check the sack timer to 55 * see if I should generate a sack into the chunk queue (if I have data to 56 * send that is and will be sending it .. for bundling. 57 * 58 * The callback in sctp_usrreq.c will get called when the socket is read from. 59 * This will cause sctp_service_queues() to get called on the top entry in 60 * the list. 61 */ 62 static uint32_t 63 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 64 struct sctp_stream_in *strm, 65 struct sctp_tcb *stcb, 66 struct sctp_association *asoc, 67 struct sctp_tmit_chunk *chk, int hold_rlock); 68 69 void 70 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 71 { 72 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 73 } 74 75 /* Calculate what the rwnd would be */ 76 uint32_t 77 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 78 { 79 uint32_t calc = 0; 80 81 /* 82 * This is really set wrong with respect to a 1-2-m socket. Since 83 * the sb_cc is the count that everyone as put up. When we re-write 84 * sctp_soreceive then we will fix this so that ONLY this 85 * associations data is taken into account. 86 */ 87 if (stcb->sctp_socket == NULL) { 88 return (calc); 89 } 90 91 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 92 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 93 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 94 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 95 if (stcb->asoc.sb_cc == 0 && 96 asoc->cnt_on_reasm_queue == 0 && 97 asoc->cnt_on_all_streams == 0) { 98 /* Full rwnd granted */ 99 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 100 return (calc); 101 } 102 /* get actual space */ 103 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 104 /* 105 * take out what has NOT been put on socket queue and we yet hold 106 * for putting up. 107 */ 108 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 109 asoc->cnt_on_reasm_queue * MSIZE)); 110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 111 asoc->cnt_on_all_streams * MSIZE)); 112 if (calc == 0) { 113 /* out of space */ 114 return (calc); 115 } 116 117 /* what is the overhead of all these rwnd's */ 118 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 119 /* 120 * If the window gets too small due to ctrl-stuff, reduce it to 1, 121 * even it is 0. SWS engaged 122 */ 123 if (calc < stcb->asoc.my_rwnd_control_len) { 124 calc = 1; 125 } 126 return (calc); 127 } 128 129 /* 130 * Build out our readq entry based on the incoming packet. 131 */ 132 struct sctp_queued_to_read * 133 sctp_build_readq_entry(struct sctp_tcb *stcb, 134 struct sctp_nets *net, 135 uint32_t tsn, uint32_t ppid, 136 uint32_t context, uint16_t sid, 137 uint32_t mid, uint8_t flags, 138 struct mbuf *dm) 139 { 140 struct sctp_queued_to_read *read_queue_e = NULL; 141 142 sctp_alloc_a_readq(stcb, read_queue_e); 143 if (read_queue_e == NULL) { 144 goto failed_build; 145 } 146 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 147 read_queue_e->sinfo_stream = sid; 148 read_queue_e->sinfo_flags = (flags << 8); 149 read_queue_e->sinfo_ppid = ppid; 150 read_queue_e->sinfo_context = context; 151 read_queue_e->sinfo_tsn = tsn; 152 read_queue_e->sinfo_cumtsn = tsn; 153 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 154 read_queue_e->mid = mid; 155 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 156 TAILQ_INIT(&read_queue_e->reasm); 157 read_queue_e->whoFrom = net; 158 atomic_add_int(&net->ref_count, 1); 159 read_queue_e->data = dm; 160 read_queue_e->stcb = stcb; 161 read_queue_e->port_from = stcb->rport; 162 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 163 read_queue_e->do_not_ref_stcb = 1; 164 } 165 failed_build: 166 return (read_queue_e); 167 } 168 169 struct mbuf * 170 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 171 { 172 struct sctp_extrcvinfo *seinfo; 173 struct sctp_sndrcvinfo *outinfo; 174 struct sctp_rcvinfo *rcvinfo; 175 struct sctp_nxtinfo *nxtinfo; 176 struct cmsghdr *cmh; 177 struct mbuf *ret; 178 int len; 179 int use_extended; 180 int provide_nxt; 181 182 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 184 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 185 /* user does not want any ancillary data */ 186 return (NULL); 187 } 188 189 len = 0; 190 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 191 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 192 } 193 seinfo = (struct sctp_extrcvinfo *)sinfo; 194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 195 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 196 provide_nxt = 1; 197 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 198 } else { 199 provide_nxt = 0; 200 } 201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 202 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 203 use_extended = 1; 204 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 205 } else { 206 use_extended = 0; 207 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 208 } 209 } else { 210 use_extended = 0; 211 } 212 213 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 214 if (ret == NULL) { 215 /* No space */ 216 return (ret); 217 } 218 SCTP_BUF_LEN(ret) = 0; 219 220 /* We need a CMSG header followed by the struct */ 221 cmh = mtod(ret, struct cmsghdr *); 222 /* 223 * Make sure that there is no un-initialized padding between the 224 * cmsg header and cmsg data and after the cmsg data. 225 */ 226 memset(cmh, 0, len); 227 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 228 cmh->cmsg_level = IPPROTO_SCTP; 229 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 230 cmh->cmsg_type = SCTP_RCVINFO; 231 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 232 rcvinfo->rcv_sid = sinfo->sinfo_stream; 233 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 234 rcvinfo->rcv_flags = sinfo->sinfo_flags; 235 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 236 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 237 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 238 rcvinfo->rcv_context = sinfo->sinfo_context; 239 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 240 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 241 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 242 } 243 if (provide_nxt) { 244 cmh->cmsg_level = IPPROTO_SCTP; 245 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 246 cmh->cmsg_type = SCTP_NXTINFO; 247 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 248 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 249 nxtinfo->nxt_flags = 0; 250 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 251 nxtinfo->nxt_flags |= SCTP_UNORDERED; 252 } 253 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 254 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 255 } 256 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 257 nxtinfo->nxt_flags |= SCTP_COMPLETE; 258 } 259 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 260 nxtinfo->nxt_length = seinfo->serinfo_next_length; 261 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 262 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 263 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 264 } 265 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 266 cmh->cmsg_level = IPPROTO_SCTP; 267 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 268 if (use_extended) { 269 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 270 cmh->cmsg_type = SCTP_EXTRCV; 271 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 273 } else { 274 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 275 cmh->cmsg_type = SCTP_SNDRCV; 276 *outinfo = *sinfo; 277 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 278 } 279 } 280 return (ret); 281 } 282 283 static void 284 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 285 { 286 uint32_t gap, i; 287 int in_r, in_nr; 288 289 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 290 return; 291 } 292 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 293 /* 294 * This tsn is behind the cum ack and thus we don't need to 295 * worry about it being moved from one to the other. 296 */ 297 return; 298 } 299 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 300 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 301 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 302 KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__)); 303 if (!in_nr) { 304 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 305 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 306 asoc->highest_tsn_inside_nr_map = tsn; 307 } 308 } 309 if (in_r) { 310 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 311 if (tsn == asoc->highest_tsn_inside_map) { 312 /* We must back down to see what the new highest is. */ 313 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 314 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 315 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 316 asoc->highest_tsn_inside_map = i; 317 break; 318 } 319 } 320 if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) { 321 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 322 } 323 } 324 } 325 } 326 327 static int 328 sctp_place_control_in_stream(struct sctp_stream_in *strm, 329 struct sctp_association *asoc, 330 struct sctp_queued_to_read *control) 331 { 332 struct sctp_queued_to_read *at; 333 struct sctp_readhead *q; 334 uint8_t flags, unordered; 335 336 flags = (control->sinfo_flags >> 8); 337 unordered = flags & SCTP_DATA_UNORDERED; 338 if (unordered) { 339 q = &strm->uno_inqueue; 340 if (asoc->idata_supported == 0) { 341 if (!TAILQ_EMPTY(q)) { 342 /* 343 * Only one stream can be here in old style 344 * -- abort 345 */ 346 return (-1); 347 } 348 TAILQ_INSERT_TAIL(q, control, next_instrm); 349 control->on_strm_q = SCTP_ON_UNORDERED; 350 return (0); 351 } 352 } else { 353 q = &strm->inqueue; 354 } 355 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 356 control->end_added = 1; 357 control->first_frag_seen = 1; 358 control->last_frag_seen = 1; 359 } 360 if (TAILQ_EMPTY(q)) { 361 /* Empty queue */ 362 TAILQ_INSERT_HEAD(q, control, next_instrm); 363 if (unordered) { 364 control->on_strm_q = SCTP_ON_UNORDERED; 365 } else { 366 control->on_strm_q = SCTP_ON_ORDERED; 367 } 368 return (0); 369 } else { 370 TAILQ_FOREACH(at, q, next_instrm) { 371 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 372 /* 373 * one in queue is bigger than the new one, 374 * insert before this one 375 */ 376 TAILQ_INSERT_BEFORE(at, control, next_instrm); 377 if (unordered) { 378 control->on_strm_q = SCTP_ON_UNORDERED; 379 } else { 380 control->on_strm_q = SCTP_ON_ORDERED; 381 } 382 break; 383 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 384 /* 385 * Gak, He sent me a duplicate msg id 386 * number?? return -1 to abort. 387 */ 388 return (-1); 389 } else { 390 if (TAILQ_NEXT(at, next_instrm) == NULL) { 391 /* 392 * We are at the end, insert it 393 * after this one 394 */ 395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 396 sctp_log_strm_del(control, at, 397 SCTP_STR_LOG_FROM_INSERT_TL); 398 } 399 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 400 if (unordered) { 401 control->on_strm_q = SCTP_ON_UNORDERED; 402 } else { 403 control->on_strm_q = SCTP_ON_ORDERED; 404 } 405 break; 406 } 407 } 408 } 409 } 410 return (0); 411 } 412 413 static void 414 sctp_abort_in_reasm(struct sctp_tcb *stcb, 415 struct sctp_queued_to_read *control, 416 struct sctp_tmit_chunk *chk, 417 int *abort_flag, int opspot) 418 { 419 char msg[SCTP_DIAG_INFO_LEN]; 420 struct mbuf *oper; 421 422 if (stcb->asoc.idata_supported) { 423 SCTP_SNPRINTF(msg, sizeof(msg), 424 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 425 opspot, 426 control->fsn_included, 427 chk->rec.data.tsn, 428 chk->rec.data.sid, 429 chk->rec.data.fsn, chk->rec.data.mid); 430 } else { 431 SCTP_SNPRINTF(msg, sizeof(msg), 432 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 433 opspot, 434 control->fsn_included, 435 chk->rec.data.tsn, 436 chk->rec.data.sid, 437 chk->rec.data.fsn, 438 (uint16_t)chk->rec.data.mid); 439 } 440 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 441 sctp_m_freem(chk->data); 442 chk->data = NULL; 443 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 444 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 445 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED); 446 *abort_flag = 1; 447 } 448 449 static void 450 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 451 { 452 /* 453 * The control could not be placed and must be cleaned. 454 */ 455 struct sctp_tmit_chunk *chk, *nchk; 456 457 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 458 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 459 if (chk->data) 460 sctp_m_freem(chk->data); 461 chk->data = NULL; 462 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 463 } 464 sctp_free_remote_addr(control->whoFrom); 465 if (control->data) { 466 sctp_m_freem(control->data); 467 control->data = NULL; 468 } 469 sctp_free_a_readq(stcb, control); 470 } 471 472 /* 473 * Queue the chunk either right into the socket buffer if it is the next one 474 * to go OR put it in the correct place in the delivery queue. If we do 475 * append to the so_buf, keep doing so until we are out of order as 476 * long as the control's entered are non-fragmented. 477 */ 478 static void 479 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 480 struct sctp_association *asoc, 481 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 482 { 483 /* 484 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 485 * all the data in one stream this could happen quite rapidly. One 486 * could use the TSN to keep track of things, but this scheme breaks 487 * down in the other type of stream usage that could occur. Send a 488 * single msg to stream 0, send 4Billion messages to stream 1, now 489 * send a message to stream 0. You have a situation where the TSN 490 * has wrapped but not in the stream. Is this worth worrying about 491 * or should we just change our queue sort at the bottom to be by 492 * TSN. 493 * 494 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 495 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 496 * assignment this could happen... and I don't see how this would be 497 * a violation. So for now I am undecided an will leave the sort by 498 * SSN alone. Maybe a hybrid approach is the answer 499 * 500 */ 501 struct sctp_queued_to_read *at; 502 int queue_needed; 503 uint32_t nxt_todel; 504 struct mbuf *op_err; 505 struct sctp_stream_in *strm; 506 char msg[SCTP_DIAG_INFO_LEN]; 507 508 strm = &asoc->strmin[control->sinfo_stream]; 509 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 510 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 511 } 512 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 513 /* The incoming sseq is behind where we last delivered? */ 514 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 515 strm->last_mid_delivered, control->mid); 516 /* 517 * throw it in the stream so it gets cleaned up in 518 * association destruction 519 */ 520 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 521 if (asoc->idata_supported) { 522 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 523 strm->last_mid_delivered, control->sinfo_tsn, 524 control->sinfo_stream, control->mid); 525 } else { 526 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 527 (uint16_t)strm->last_mid_delivered, 528 control->sinfo_tsn, 529 control->sinfo_stream, 530 (uint16_t)control->mid); 531 } 532 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 533 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 534 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 535 *abort_flag = 1; 536 return; 537 } 538 queue_needed = 1; 539 asoc->size_on_all_streams += control->length; 540 sctp_ucount_incr(asoc->cnt_on_all_streams); 541 nxt_todel = strm->last_mid_delivered + 1; 542 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 543 /* can be delivered right away? */ 544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 545 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 546 } 547 /* EY it wont be queued if it could be delivered directly */ 548 queue_needed = 0; 549 if (asoc->size_on_all_streams >= control->length) { 550 asoc->size_on_all_streams -= control->length; 551 } else { 552 #ifdef INVARIANTS 553 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 554 #else 555 asoc->size_on_all_streams = 0; 556 #endif 557 } 558 sctp_ucount_decr(asoc->cnt_on_all_streams); 559 strm->last_mid_delivered++; 560 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 561 sctp_add_to_readq(stcb->sctp_ep, stcb, 562 control, 563 &stcb->sctp_socket->so_rcv, 1, 564 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 565 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 566 /* all delivered */ 567 nxt_todel = strm->last_mid_delivered + 1; 568 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 569 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 570 if (control->on_strm_q == SCTP_ON_ORDERED) { 571 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 572 if (asoc->size_on_all_streams >= control->length) { 573 asoc->size_on_all_streams -= control->length; 574 } else { 575 #ifdef INVARIANTS 576 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 577 #else 578 asoc->size_on_all_streams = 0; 579 #endif 580 } 581 sctp_ucount_decr(asoc->cnt_on_all_streams); 582 #ifdef INVARIANTS 583 } else { 584 panic("Huh control: %p is on_strm_q: %d", 585 control, control->on_strm_q); 586 #endif 587 } 588 control->on_strm_q = 0; 589 strm->last_mid_delivered++; 590 /* 591 * We ignore the return of deliver_data here 592 * since we always can hold the chunk on the 593 * d-queue. And we have a finite number that 594 * can be delivered from the strq. 595 */ 596 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 597 sctp_log_strm_del(control, NULL, 598 SCTP_STR_LOG_FROM_IMMED_DEL); 599 } 600 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 601 sctp_add_to_readq(stcb->sctp_ep, stcb, 602 control, 603 &stcb->sctp_socket->so_rcv, 1, 604 SCTP_READ_LOCK_NOT_HELD, 605 SCTP_SO_LOCKED); 606 continue; 607 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 608 *need_reasm = 1; 609 } 610 break; 611 } 612 } 613 if (queue_needed) { 614 /* 615 * Ok, we did not deliver this guy, find the correct place 616 * to put it on the queue. 617 */ 618 if (sctp_place_control_in_stream(strm, asoc, control)) { 619 SCTP_SNPRINTF(msg, sizeof(msg), 620 "Queue to str MID: %u duplicate", control->mid); 621 sctp_clean_up_control(stcb, control); 622 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 623 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 624 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 625 *abort_flag = 1; 626 } 627 } 628 } 629 630 static void 631 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 632 { 633 struct mbuf *m, *prev = NULL; 634 struct sctp_tcb *stcb; 635 636 stcb = control->stcb; 637 control->held_length = 0; 638 control->length = 0; 639 m = control->data; 640 while (m) { 641 if (SCTP_BUF_LEN(m) == 0) { 642 /* Skip mbufs with NO length */ 643 if (prev == NULL) { 644 /* First one */ 645 control->data = sctp_m_free(m); 646 m = control->data; 647 } else { 648 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 649 m = SCTP_BUF_NEXT(prev); 650 } 651 if (m == NULL) { 652 control->tail_mbuf = prev; 653 } 654 continue; 655 } 656 prev = m; 657 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 658 if (control->on_read_q) { 659 /* 660 * On read queue so we must increment the SB stuff, 661 * we assume caller has done any locks of SB. 662 */ 663 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 664 } 665 m = SCTP_BUF_NEXT(m); 666 } 667 if (prev) { 668 control->tail_mbuf = prev; 669 } 670 } 671 672 static void 673 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 674 { 675 struct mbuf *prev = NULL; 676 struct sctp_tcb *stcb; 677 678 stcb = control->stcb; 679 if (stcb == NULL) { 680 #ifdef INVARIANTS 681 panic("Control broken"); 682 #else 683 return; 684 #endif 685 } 686 if (control->tail_mbuf == NULL) { 687 /* TSNH */ 688 sctp_m_freem(control->data); 689 control->data = m; 690 sctp_setup_tail_pointer(control); 691 return; 692 } 693 control->tail_mbuf->m_next = m; 694 while (m) { 695 if (SCTP_BUF_LEN(m) == 0) { 696 /* Skip mbufs with NO length */ 697 if (prev == NULL) { 698 /* First one */ 699 control->tail_mbuf->m_next = sctp_m_free(m); 700 m = control->tail_mbuf->m_next; 701 } else { 702 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 703 m = SCTP_BUF_NEXT(prev); 704 } 705 if (m == NULL) { 706 control->tail_mbuf = prev; 707 } 708 continue; 709 } 710 prev = m; 711 if (control->on_read_q) { 712 /* 713 * On read queue so we must increment the SB stuff, 714 * we assume caller has done any locks of SB. 715 */ 716 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 717 } 718 *added += SCTP_BUF_LEN(m); 719 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 720 m = SCTP_BUF_NEXT(m); 721 } 722 if (prev) { 723 control->tail_mbuf = prev; 724 } 725 } 726 727 static void 728 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 729 { 730 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 731 nc->sinfo_stream = control->sinfo_stream; 732 nc->mid = control->mid; 733 TAILQ_INIT(&nc->reasm); 734 nc->top_fsn = control->top_fsn; 735 nc->mid = control->mid; 736 nc->sinfo_flags = control->sinfo_flags; 737 nc->sinfo_ppid = control->sinfo_ppid; 738 nc->sinfo_context = control->sinfo_context; 739 nc->fsn_included = 0xffffffff; 740 nc->sinfo_tsn = control->sinfo_tsn; 741 nc->sinfo_cumtsn = control->sinfo_cumtsn; 742 nc->sinfo_assoc_id = control->sinfo_assoc_id; 743 nc->whoFrom = control->whoFrom; 744 atomic_add_int(&nc->whoFrom->ref_count, 1); 745 nc->stcb = control->stcb; 746 nc->port_from = control->port_from; 747 nc->do_not_ref_stcb = control->do_not_ref_stcb; 748 } 749 750 static void 751 sctp_reset_a_control(struct sctp_queued_to_read *control, 752 struct sctp_inpcb *inp, uint32_t tsn) 753 { 754 control->fsn_included = tsn; 755 if (control->on_read_q) { 756 /* 757 * We have to purge it from there, hopefully this will work 758 * :-) 759 */ 760 TAILQ_REMOVE(&inp->read_queue, control, next); 761 control->on_read_q = 0; 762 } 763 } 764 765 static int 766 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 767 struct sctp_association *asoc, 768 struct sctp_stream_in *strm, 769 struct sctp_queued_to_read *control, 770 uint32_t pd_point, 771 int inp_read_lock_held) 772 { 773 /* 774 * Special handling for the old un-ordered data chunk. All the 775 * chunks/TSN's go to mid 0. So we have to do the old style watching 776 * to see if we have it all. If you return one, no other control 777 * entries on the un-ordered queue will be looked at. In theory 778 * there should be no others entries in reality, unless the guy is 779 * sending both unordered NDATA and unordered DATA... 780 */ 781 struct sctp_tmit_chunk *chk, *lchk, *tchk; 782 uint32_t fsn; 783 struct sctp_queued_to_read *nc; 784 int cnt_added; 785 786 if (control->first_frag_seen == 0) { 787 /* Nothing we can do, we have not seen the first piece yet */ 788 return (1); 789 } 790 /* Collapse any we can */ 791 cnt_added = 0; 792 restart: 793 fsn = control->fsn_included + 1; 794 /* Now what can we add? */ 795 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 796 if (chk->rec.data.fsn == fsn) { 797 /* Ok lets add it */ 798 sctp_alloc_a_readq(stcb, nc); 799 if (nc == NULL) { 800 break; 801 } 802 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 803 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 804 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held); 805 fsn++; 806 cnt_added++; 807 chk = NULL; 808 if (control->end_added) { 809 /* We are done */ 810 if (!TAILQ_EMPTY(&control->reasm)) { 811 /* 812 * Ok we have to move anything left 813 * on the control queue to a new 814 * control. 815 */ 816 sctp_build_readq_entry_from_ctl(nc, control); 817 tchk = TAILQ_FIRST(&control->reasm); 818 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 819 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 820 if (asoc->size_on_reasm_queue >= tchk->send_size) { 821 asoc->size_on_reasm_queue -= tchk->send_size; 822 } else { 823 #ifdef INVARIANTS 824 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 825 #else 826 asoc->size_on_reasm_queue = 0; 827 #endif 828 } 829 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 830 nc->first_frag_seen = 1; 831 nc->fsn_included = tchk->rec.data.fsn; 832 nc->data = tchk->data; 833 nc->sinfo_ppid = tchk->rec.data.ppid; 834 nc->sinfo_tsn = tchk->rec.data.tsn; 835 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 836 tchk->data = NULL; 837 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 838 sctp_setup_tail_pointer(nc); 839 tchk = TAILQ_FIRST(&control->reasm); 840 } 841 /* Spin the rest onto the queue */ 842 while (tchk) { 843 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 844 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 845 tchk = TAILQ_FIRST(&control->reasm); 846 } 847 /* 848 * Now lets add it to the queue 849 * after removing control 850 */ 851 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 852 nc->on_strm_q = SCTP_ON_UNORDERED; 853 if (control->on_strm_q) { 854 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 855 control->on_strm_q = 0; 856 } 857 } 858 if (control->pdapi_started) { 859 strm->pd_api_started = 0; 860 control->pdapi_started = 0; 861 } 862 if (control->on_strm_q) { 863 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 864 control->on_strm_q = 0; 865 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 866 } 867 if (control->on_read_q == 0) { 868 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 869 &stcb->sctp_socket->so_rcv, control->end_added, 870 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 871 } 872 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 873 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 874 /* 875 * Switch to the new guy and 876 * continue 877 */ 878 control = nc; 879 goto restart; 880 } else { 881 if (nc->on_strm_q == 0) { 882 sctp_free_a_readq(stcb, nc); 883 } 884 } 885 return (1); 886 } else { 887 sctp_free_a_readq(stcb, nc); 888 } 889 } else { 890 /* Can't add more */ 891 break; 892 } 893 } 894 if (cnt_added && strm->pd_api_started) { 895 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 896 } 897 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 898 strm->pd_api_started = 1; 899 control->pdapi_started = 1; 900 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 901 &stcb->sctp_socket->so_rcv, control->end_added, 902 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 903 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 904 return (0); 905 } else { 906 return (1); 907 } 908 } 909 910 static void 911 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 912 struct sctp_association *asoc, 913 struct sctp_queued_to_read *control, 914 struct sctp_tmit_chunk *chk, 915 int *abort_flag) 916 { 917 struct sctp_tmit_chunk *at; 918 int inserted; 919 920 /* 921 * Here we need to place the chunk into the control structure sorted 922 * in the correct order. 923 */ 924 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 925 /* Its the very first one. */ 926 SCTPDBG(SCTP_DEBUG_XXX, 927 "chunk is a first fsn: %u becomes fsn_included\n", 928 chk->rec.data.fsn); 929 at = TAILQ_FIRST(&control->reasm); 930 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { 931 /* 932 * The first chunk in the reassembly is a smaller 933 * TSN than this one, even though this has a first, 934 * it must be from a subsequent msg. 935 */ 936 goto place_chunk; 937 } 938 if (control->first_frag_seen) { 939 /* 940 * In old un-ordered we can reassembly on one 941 * control multiple messages. As long as the next 942 * FIRST is greater then the old first (TSN i.e. FSN 943 * wise) 944 */ 945 struct mbuf *tdata; 946 uint32_t tmp; 947 948 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 949 /* 950 * Easy way the start of a new guy beyond 951 * the lowest 952 */ 953 goto place_chunk; 954 } 955 if ((chk->rec.data.fsn == control->fsn_included) || 956 (control->pdapi_started)) { 957 /* 958 * Ok this should not happen, if it does we 959 * started the pd-api on the higher TSN 960 * (since the equals part is a TSN failure 961 * it must be that). 962 * 963 * We are completely hosed in that case 964 * since I have no way to recover. This 965 * really will only happen if we can get 966 * more TSN's higher before the 967 * pd-api-point. 968 */ 969 sctp_abort_in_reasm(stcb, control, chk, 970 abort_flag, 971 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 972 973 return; 974 } 975 /* 976 * Ok we have two firsts and the one we just got is 977 * smaller than the one we previously placed.. yuck! 978 * We must swap them out. 979 */ 980 /* swap the mbufs */ 981 tdata = control->data; 982 control->data = chk->data; 983 chk->data = tdata; 984 /* Save the lengths */ 985 chk->send_size = control->length; 986 /* Recompute length of control and tail pointer */ 987 sctp_setup_tail_pointer(control); 988 /* Fix the FSN included */ 989 tmp = control->fsn_included; 990 control->fsn_included = chk->rec.data.fsn; 991 chk->rec.data.fsn = tmp; 992 /* Fix the TSN included */ 993 tmp = control->sinfo_tsn; 994 control->sinfo_tsn = chk->rec.data.tsn; 995 chk->rec.data.tsn = tmp; 996 /* Fix the PPID included */ 997 tmp = control->sinfo_ppid; 998 control->sinfo_ppid = chk->rec.data.ppid; 999 chk->rec.data.ppid = tmp; 1000 /* Fix tail pointer */ 1001 goto place_chunk; 1002 } 1003 control->first_frag_seen = 1; 1004 control->fsn_included = chk->rec.data.fsn; 1005 control->top_fsn = chk->rec.data.fsn; 1006 control->sinfo_tsn = chk->rec.data.tsn; 1007 control->sinfo_ppid = chk->rec.data.ppid; 1008 control->data = chk->data; 1009 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1010 chk->data = NULL; 1011 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1012 sctp_setup_tail_pointer(control); 1013 return; 1014 } 1015 place_chunk: 1016 inserted = 0; 1017 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1018 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1019 /* 1020 * This one in queue is bigger than the new one, 1021 * insert the new one before at. 1022 */ 1023 asoc->size_on_reasm_queue += chk->send_size; 1024 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1025 inserted = 1; 1026 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1027 break; 1028 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1029 /* 1030 * They sent a duplicate fsn number. This really 1031 * should not happen since the FSN is a TSN and it 1032 * should have been dropped earlier. 1033 */ 1034 sctp_abort_in_reasm(stcb, control, chk, 1035 abort_flag, 1036 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1037 return; 1038 } 1039 } 1040 if (inserted == 0) { 1041 /* Its at the end */ 1042 asoc->size_on_reasm_queue += chk->send_size; 1043 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1044 control->top_fsn = chk->rec.data.fsn; 1045 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1046 } 1047 } 1048 1049 static int 1050 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1051 struct sctp_stream_in *strm, int inp_read_lock_held) 1052 { 1053 /* 1054 * Given a stream, strm, see if any of the SSN's on it that are 1055 * fragmented are ready to deliver. If so go ahead and place them on 1056 * the read queue. In so placing if we have hit the end, then we 1057 * need to remove them from the stream's queue. 1058 */ 1059 struct sctp_queued_to_read *control, *nctl = NULL; 1060 uint32_t next_to_del; 1061 uint32_t pd_point; 1062 int ret = 0; 1063 1064 if (stcb->sctp_socket) { 1065 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1066 stcb->sctp_ep->partial_delivery_point); 1067 } else { 1068 pd_point = stcb->sctp_ep->partial_delivery_point; 1069 } 1070 control = TAILQ_FIRST(&strm->uno_inqueue); 1071 1072 if ((control != NULL) && 1073 (asoc->idata_supported == 0)) { 1074 /* Special handling needed for "old" data format */ 1075 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1076 goto done_un; 1077 } 1078 } 1079 if (strm->pd_api_started) { 1080 /* Can't add more */ 1081 return (0); 1082 } 1083 while (control) { 1084 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1085 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1086 nctl = TAILQ_NEXT(control, next_instrm); 1087 if (control->end_added) { 1088 /* We just put the last bit on */ 1089 if (control->on_strm_q) { 1090 #ifdef INVARIANTS 1091 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1092 panic("Huh control: %p on_q: %d -- not unordered?", 1093 control, control->on_strm_q); 1094 } 1095 #endif 1096 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1097 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1098 if (asoc->size_on_all_streams >= control->length) { 1099 asoc->size_on_all_streams -= control->length; 1100 } else { 1101 #ifdef INVARIANTS 1102 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1103 #else 1104 asoc->size_on_all_streams = 0; 1105 #endif 1106 } 1107 sctp_ucount_decr(asoc->cnt_on_all_streams); 1108 control->on_strm_q = 0; 1109 } 1110 if (control->on_read_q == 0) { 1111 sctp_add_to_readq(stcb->sctp_ep, stcb, 1112 control, 1113 &stcb->sctp_socket->so_rcv, control->end_added, 1114 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1115 } 1116 } else { 1117 /* Can we do a PD-API for this un-ordered guy? */ 1118 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1119 strm->pd_api_started = 1; 1120 control->pdapi_started = 1; 1121 sctp_add_to_readq(stcb->sctp_ep, stcb, 1122 control, 1123 &stcb->sctp_socket->so_rcv, control->end_added, 1124 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1125 1126 break; 1127 } 1128 } 1129 control = nctl; 1130 } 1131 done_un: 1132 control = TAILQ_FIRST(&strm->inqueue); 1133 if (strm->pd_api_started) { 1134 /* Can't add more */ 1135 return (0); 1136 } 1137 if (control == NULL) { 1138 return (ret); 1139 } 1140 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1141 /* 1142 * Ok the guy at the top was being partially delivered 1143 * completed, so we remove it. Note the pd_api flag was 1144 * taken off when the chunk was merged on in 1145 * sctp_queue_data_for_reasm below. 1146 */ 1147 nctl = TAILQ_NEXT(control, next_instrm); 1148 SCTPDBG(SCTP_DEBUG_XXX, 1149 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1150 control, control->end_added, control->mid, 1151 control->top_fsn, control->fsn_included, 1152 strm->last_mid_delivered); 1153 if (control->end_added) { 1154 if (control->on_strm_q) { 1155 #ifdef INVARIANTS 1156 if (control->on_strm_q != SCTP_ON_ORDERED) { 1157 panic("Huh control: %p on_q: %d -- not ordered?", 1158 control, control->on_strm_q); 1159 } 1160 #endif 1161 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1162 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1163 if (asoc->size_on_all_streams >= control->length) { 1164 asoc->size_on_all_streams -= control->length; 1165 } else { 1166 #ifdef INVARIANTS 1167 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1168 #else 1169 asoc->size_on_all_streams = 0; 1170 #endif 1171 } 1172 sctp_ucount_decr(asoc->cnt_on_all_streams); 1173 control->on_strm_q = 0; 1174 } 1175 if (strm->pd_api_started && control->pdapi_started) { 1176 control->pdapi_started = 0; 1177 strm->pd_api_started = 0; 1178 } 1179 if (control->on_read_q == 0) { 1180 sctp_add_to_readq(stcb->sctp_ep, stcb, 1181 control, 1182 &stcb->sctp_socket->so_rcv, control->end_added, 1183 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1184 } 1185 control = nctl; 1186 } 1187 } 1188 if (strm->pd_api_started) { 1189 /* 1190 * Can't add more must have gotten an un-ordered above being 1191 * partially delivered. 1192 */ 1193 return (0); 1194 } 1195 deliver_more: 1196 next_to_del = strm->last_mid_delivered + 1; 1197 if (control) { 1198 SCTPDBG(SCTP_DEBUG_XXX, 1199 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1200 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1201 next_to_del); 1202 nctl = TAILQ_NEXT(control, next_instrm); 1203 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1204 (control->first_frag_seen)) { 1205 int done; 1206 1207 /* Ok we can deliver it onto the stream. */ 1208 if (control->end_added) { 1209 /* We are done with it afterwards */ 1210 if (control->on_strm_q) { 1211 #ifdef INVARIANTS 1212 if (control->on_strm_q != SCTP_ON_ORDERED) { 1213 panic("Huh control: %p on_q: %d -- not ordered?", 1214 control, control->on_strm_q); 1215 } 1216 #endif 1217 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1218 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1219 if (asoc->size_on_all_streams >= control->length) { 1220 asoc->size_on_all_streams -= control->length; 1221 } else { 1222 #ifdef INVARIANTS 1223 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1224 #else 1225 asoc->size_on_all_streams = 0; 1226 #endif 1227 } 1228 sctp_ucount_decr(asoc->cnt_on_all_streams); 1229 control->on_strm_q = 0; 1230 } 1231 ret++; 1232 } 1233 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1234 /* 1235 * A singleton now slipping through - mark 1236 * it non-revokable too 1237 */ 1238 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1239 } else if (control->end_added == 0) { 1240 /* 1241 * Check if we can defer adding until its 1242 * all there 1243 */ 1244 if ((control->length < pd_point) || (strm->pd_api_started)) { 1245 /* 1246 * Don't need it or cannot add more 1247 * (one being delivered that way) 1248 */ 1249 goto out; 1250 } 1251 } 1252 done = (control->end_added) && (control->last_frag_seen); 1253 if (control->on_read_q == 0) { 1254 if (!done) { 1255 if (asoc->size_on_all_streams >= control->length) { 1256 asoc->size_on_all_streams -= control->length; 1257 } else { 1258 #ifdef INVARIANTS 1259 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1260 #else 1261 asoc->size_on_all_streams = 0; 1262 #endif 1263 } 1264 strm->pd_api_started = 1; 1265 control->pdapi_started = 1; 1266 } 1267 sctp_add_to_readq(stcb->sctp_ep, stcb, 1268 control, 1269 &stcb->sctp_socket->so_rcv, control->end_added, 1270 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1271 } 1272 strm->last_mid_delivered = next_to_del; 1273 if (done) { 1274 control = nctl; 1275 goto deliver_more; 1276 } 1277 } 1278 } 1279 out: 1280 return (ret); 1281 } 1282 1283 uint32_t 1284 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1285 struct sctp_stream_in *strm, 1286 struct sctp_tcb *stcb, struct sctp_association *asoc, 1287 struct sctp_tmit_chunk *chk, int hold_rlock) 1288 { 1289 /* 1290 * Given a control and a chunk, merge the data from the chk onto the 1291 * control and free up the chunk resources. 1292 */ 1293 uint32_t added = 0; 1294 int i_locked = 0; 1295 1296 if (control->on_read_q && (hold_rlock == 0)) { 1297 /* 1298 * Its being pd-api'd so we must do some locks. 1299 */ 1300 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1301 i_locked = 1; 1302 } 1303 if (control->data == NULL) { 1304 control->data = chk->data; 1305 sctp_setup_tail_pointer(control); 1306 } else { 1307 sctp_add_to_tail_pointer(control, chk->data, &added); 1308 } 1309 control->fsn_included = chk->rec.data.fsn; 1310 asoc->size_on_reasm_queue -= chk->send_size; 1311 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1312 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1313 chk->data = NULL; 1314 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1315 control->first_frag_seen = 1; 1316 control->sinfo_tsn = chk->rec.data.tsn; 1317 control->sinfo_ppid = chk->rec.data.ppid; 1318 } 1319 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1320 /* Its complete */ 1321 if ((control->on_strm_q) && (control->on_read_q)) { 1322 if (control->pdapi_started) { 1323 control->pdapi_started = 0; 1324 strm->pd_api_started = 0; 1325 } 1326 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1327 /* Unordered */ 1328 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1329 control->on_strm_q = 0; 1330 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1331 /* Ordered */ 1332 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1333 /* 1334 * Don't need to decrement 1335 * size_on_all_streams, since control is on 1336 * the read queue. 1337 */ 1338 sctp_ucount_decr(asoc->cnt_on_all_streams); 1339 control->on_strm_q = 0; 1340 #ifdef INVARIANTS 1341 } else if (control->on_strm_q) { 1342 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1343 control->on_strm_q); 1344 #endif 1345 } 1346 } 1347 control->end_added = 1; 1348 control->last_frag_seen = 1; 1349 } 1350 if (i_locked) { 1351 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1352 } 1353 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1354 return (added); 1355 } 1356 1357 /* 1358 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1359 * queue, see if anthing can be delivered. If so pull it off (or as much as 1360 * we can. If we run out of space then we must dump what we can and set the 1361 * appropriate flag to say we queued what we could. 1362 */ 1363 static void 1364 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1365 struct sctp_queued_to_read *control, 1366 struct sctp_tmit_chunk *chk, 1367 int created_control, 1368 int *abort_flag, uint32_t tsn) 1369 { 1370 uint32_t next_fsn; 1371 struct sctp_tmit_chunk *at, *nat; 1372 struct sctp_stream_in *strm; 1373 int do_wakeup, unordered; 1374 uint32_t lenadded; 1375 1376 strm = &asoc->strmin[control->sinfo_stream]; 1377 /* 1378 * For old un-ordered data chunks. 1379 */ 1380 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1381 unordered = 1; 1382 } else { 1383 unordered = 0; 1384 } 1385 /* Must be added to the stream-in queue */ 1386 if (created_control) { 1387 if ((unordered == 0) || (asoc->idata_supported)) { 1388 sctp_ucount_incr(asoc->cnt_on_all_streams); 1389 } 1390 if (sctp_place_control_in_stream(strm, asoc, control)) { 1391 /* Duplicate SSN? */ 1392 sctp_abort_in_reasm(stcb, control, chk, 1393 abort_flag, 1394 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1395 sctp_clean_up_control(stcb, control); 1396 return; 1397 } 1398 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1399 /* 1400 * Ok we created this control and now lets validate 1401 * that its legal i.e. there is a B bit set, if not 1402 * and we have up to the cum-ack then its invalid. 1403 */ 1404 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1405 sctp_abort_in_reasm(stcb, control, chk, 1406 abort_flag, 1407 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1408 return; 1409 } 1410 } 1411 } 1412 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1413 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1414 return; 1415 } 1416 /* 1417 * Ok we must queue the chunk into the reasembly portion: o if its 1418 * the first it goes to the control mbuf. o if its not first but the 1419 * next in sequence it goes to the control, and each succeeding one 1420 * in order also goes. o if its not in order we place it on the list 1421 * in its place. 1422 */ 1423 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1424 /* Its the very first one. */ 1425 SCTPDBG(SCTP_DEBUG_XXX, 1426 "chunk is a first fsn: %u becomes fsn_included\n", 1427 chk->rec.data.fsn); 1428 if (control->first_frag_seen) { 1429 /* 1430 * Error on senders part, they either sent us two 1431 * data chunks with FIRST, or they sent two 1432 * un-ordered chunks that were fragmented at the 1433 * same time in the same stream. 1434 */ 1435 sctp_abort_in_reasm(stcb, control, chk, 1436 abort_flag, 1437 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1438 return; 1439 } 1440 control->first_frag_seen = 1; 1441 control->sinfo_ppid = chk->rec.data.ppid; 1442 control->sinfo_tsn = chk->rec.data.tsn; 1443 control->fsn_included = chk->rec.data.fsn; 1444 control->data = chk->data; 1445 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1446 chk->data = NULL; 1447 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1448 sctp_setup_tail_pointer(control); 1449 asoc->size_on_all_streams += control->length; 1450 } else { 1451 /* Place the chunk in our list */ 1452 int inserted = 0; 1453 1454 if (control->last_frag_seen == 0) { 1455 /* Still willing to raise highest FSN seen */ 1456 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1457 SCTPDBG(SCTP_DEBUG_XXX, 1458 "We have a new top_fsn: %u\n", 1459 chk->rec.data.fsn); 1460 control->top_fsn = chk->rec.data.fsn; 1461 } 1462 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1463 SCTPDBG(SCTP_DEBUG_XXX, 1464 "The last fsn is now in place fsn: %u\n", 1465 chk->rec.data.fsn); 1466 control->last_frag_seen = 1; 1467 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) { 1468 SCTPDBG(SCTP_DEBUG_XXX, 1469 "New fsn: %u is not at top_fsn: %u -- abort\n", 1470 chk->rec.data.fsn, 1471 control->top_fsn); 1472 sctp_abort_in_reasm(stcb, control, chk, 1473 abort_flag, 1474 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1475 return; 1476 } 1477 } 1478 if (asoc->idata_supported || control->first_frag_seen) { 1479 /* 1480 * For IDATA we always check since we know 1481 * that the first fragment is 0. For old 1482 * DATA we have to receive the first before 1483 * we know the first FSN (which is the TSN). 1484 */ 1485 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1486 /* 1487 * We have already delivered up to 1488 * this so its a dup 1489 */ 1490 sctp_abort_in_reasm(stcb, control, chk, 1491 abort_flag, 1492 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1493 return; 1494 } 1495 } 1496 } else { 1497 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1498 /* Second last? huh? */ 1499 SCTPDBG(SCTP_DEBUG_XXX, 1500 "Duplicate last fsn: %u (top: %u) -- abort\n", 1501 chk->rec.data.fsn, control->top_fsn); 1502 sctp_abort_in_reasm(stcb, control, 1503 chk, abort_flag, 1504 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1505 return; 1506 } 1507 if (asoc->idata_supported || control->first_frag_seen) { 1508 /* 1509 * For IDATA we always check since we know 1510 * that the first fragment is 0. For old 1511 * DATA we have to receive the first before 1512 * we know the first FSN (which is the TSN). 1513 */ 1514 1515 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1516 /* 1517 * We have already delivered up to 1518 * this so its a dup 1519 */ 1520 SCTPDBG(SCTP_DEBUG_XXX, 1521 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1522 chk->rec.data.fsn, control->fsn_included); 1523 sctp_abort_in_reasm(stcb, control, chk, 1524 abort_flag, 1525 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1526 return; 1527 } 1528 } 1529 /* 1530 * validate not beyond top FSN if we have seen last 1531 * one 1532 */ 1533 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1534 SCTPDBG(SCTP_DEBUG_XXX, 1535 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1536 chk->rec.data.fsn, 1537 control->top_fsn); 1538 sctp_abort_in_reasm(stcb, control, chk, 1539 abort_flag, 1540 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1541 return; 1542 } 1543 } 1544 /* 1545 * If we reach here, we need to place the new chunk in the 1546 * reassembly for this control. 1547 */ 1548 SCTPDBG(SCTP_DEBUG_XXX, 1549 "chunk is a not first fsn: %u needs to be inserted\n", 1550 chk->rec.data.fsn); 1551 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1552 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1553 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1554 /* Last not at the end? huh? */ 1555 SCTPDBG(SCTP_DEBUG_XXX, 1556 "Last fragment not last in list: -- abort\n"); 1557 sctp_abort_in_reasm(stcb, control, 1558 chk, abort_flag, 1559 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1560 return; 1561 } 1562 /* 1563 * This one in queue is bigger than the new 1564 * one, insert the new one before at. 1565 */ 1566 SCTPDBG(SCTP_DEBUG_XXX, 1567 "Insert it before fsn: %u\n", 1568 at->rec.data.fsn); 1569 asoc->size_on_reasm_queue += chk->send_size; 1570 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1571 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1572 inserted = 1; 1573 break; 1574 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1575 /* 1576 * Gak, He sent me a duplicate str seq 1577 * number 1578 */ 1579 /* 1580 * foo bar, I guess I will just free this 1581 * new guy, should we abort too? FIX ME 1582 * MAYBE? Or it COULD be that the SSN's have 1583 * wrapped. Maybe I should compare to TSN 1584 * somehow... sigh for now just blow away 1585 * the chunk! 1586 */ 1587 SCTPDBG(SCTP_DEBUG_XXX, 1588 "Duplicate to fsn: %u -- abort\n", 1589 at->rec.data.fsn); 1590 sctp_abort_in_reasm(stcb, control, 1591 chk, abort_flag, 1592 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1593 return; 1594 } 1595 } 1596 if (inserted == 0) { 1597 /* Goes on the end */ 1598 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1599 chk->rec.data.fsn); 1600 asoc->size_on_reasm_queue += chk->send_size; 1601 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1602 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1603 } 1604 } 1605 /* 1606 * Ok lets see if we can suck any up into the control structure that 1607 * are in seq if it makes sense. 1608 */ 1609 do_wakeup = 0; 1610 /* 1611 * If the first fragment has not been seen there is no sense in 1612 * looking. 1613 */ 1614 if (control->first_frag_seen) { 1615 next_fsn = control->fsn_included + 1; 1616 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1617 if (at->rec.data.fsn == next_fsn) { 1618 /* We can add this one now to the control */ 1619 SCTPDBG(SCTP_DEBUG_XXX, 1620 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1621 control, at, 1622 at->rec.data.fsn, 1623 next_fsn, control->fsn_included); 1624 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1625 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1626 if (control->on_read_q) { 1627 do_wakeup = 1; 1628 } else { 1629 /* 1630 * We only add to the 1631 * size-on-all-streams if its not on 1632 * the read q. The read q flag will 1633 * cause a sballoc so its accounted 1634 * for there. 1635 */ 1636 asoc->size_on_all_streams += lenadded; 1637 } 1638 next_fsn++; 1639 if (control->end_added && control->pdapi_started) { 1640 if (strm->pd_api_started) { 1641 strm->pd_api_started = 0; 1642 control->pdapi_started = 0; 1643 } 1644 if (control->on_read_q == 0) { 1645 sctp_add_to_readq(stcb->sctp_ep, stcb, 1646 control, 1647 &stcb->sctp_socket->so_rcv, control->end_added, 1648 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1649 } 1650 break; 1651 } 1652 } else { 1653 break; 1654 } 1655 } 1656 } 1657 if (do_wakeup) { 1658 /* Need to wakeup the reader */ 1659 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1660 } 1661 } 1662 1663 static struct sctp_queued_to_read * 1664 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1665 { 1666 struct sctp_queued_to_read *control; 1667 1668 if (ordered) { 1669 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1670 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1671 break; 1672 } 1673 } 1674 } else { 1675 if (idata_supported) { 1676 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1677 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1678 break; 1679 } 1680 } 1681 } else { 1682 control = TAILQ_FIRST(&strm->uno_inqueue); 1683 } 1684 } 1685 return (control); 1686 } 1687 1688 static int 1689 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1690 struct mbuf **m, int offset, int chk_length, 1691 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1692 int *break_flag, int last_chunk, uint8_t chk_type) 1693 { 1694 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ 1695 struct sctp_stream_in *strm; 1696 uint32_t tsn, fsn, gap, mid; 1697 struct mbuf *dmbuf; 1698 int the_len; 1699 int need_reasm_check = 0; 1700 uint16_t sid; 1701 struct mbuf *op_err; 1702 char msg[SCTP_DIAG_INFO_LEN]; 1703 struct sctp_queued_to_read *control, *ncontrol; 1704 uint32_t ppid; 1705 uint8_t chk_flags; 1706 struct sctp_stream_reset_list *liste; 1707 int ordered; 1708 size_t clen; 1709 int created_control = 0; 1710 1711 if (chk_type == SCTP_IDATA) { 1712 struct sctp_idata_chunk *chunk, chunk_buf; 1713 1714 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1715 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1716 chk_flags = chunk->ch.chunk_flags; 1717 clen = sizeof(struct sctp_idata_chunk); 1718 tsn = ntohl(chunk->dp.tsn); 1719 sid = ntohs(chunk->dp.sid); 1720 mid = ntohl(chunk->dp.mid); 1721 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1722 fsn = 0; 1723 ppid = chunk->dp.ppid_fsn.ppid; 1724 } else { 1725 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1726 ppid = 0xffffffff; /* Use as an invalid value. */ 1727 } 1728 } else { 1729 struct sctp_data_chunk *chunk, chunk_buf; 1730 1731 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1732 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1733 chk_flags = chunk->ch.chunk_flags; 1734 clen = sizeof(struct sctp_data_chunk); 1735 tsn = ntohl(chunk->dp.tsn); 1736 sid = ntohs(chunk->dp.sid); 1737 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1738 fsn = tsn; 1739 ppid = chunk->dp.ppid; 1740 } 1741 if ((size_t)chk_length == clen) { 1742 /* 1743 * Need to send an abort since we had a empty data chunk. 1744 */ 1745 op_err = sctp_generate_no_user_data_cause(tsn); 1746 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1747 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1748 *abort_flag = 1; 1749 return (0); 1750 } 1751 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1752 asoc->send_sack = 1; 1753 } 1754 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1755 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1756 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1757 } 1758 if (stcb == NULL) { 1759 return (0); 1760 } 1761 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1762 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1763 /* It is a duplicate */ 1764 SCTP_STAT_INCR(sctps_recvdupdata); 1765 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1766 /* Record a dup for the next outbound sack */ 1767 asoc->dup_tsns[asoc->numduptsns] = tsn; 1768 asoc->numduptsns++; 1769 } 1770 asoc->send_sack = 1; 1771 return (0); 1772 } 1773 /* Calculate the number of TSN's between the base and this TSN */ 1774 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1775 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1776 /* Can't hold the bit in the mapping at max array, toss it */ 1777 return (0); 1778 } 1779 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1780 SCTP_TCB_LOCK_ASSERT(stcb); 1781 if (sctp_expand_mapping_array(asoc, gap)) { 1782 /* Can't expand, drop it */ 1783 return (0); 1784 } 1785 } 1786 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1787 *high_tsn = tsn; 1788 } 1789 /* See if we have received this one already */ 1790 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1791 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1792 SCTP_STAT_INCR(sctps_recvdupdata); 1793 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1794 /* Record a dup for the next outbound sack */ 1795 asoc->dup_tsns[asoc->numduptsns] = tsn; 1796 asoc->numduptsns++; 1797 } 1798 asoc->send_sack = 1; 1799 return (0); 1800 } 1801 /* 1802 * Check to see about the GONE flag, duplicates would cause a sack 1803 * to be sent up above 1804 */ 1805 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1806 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1807 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1808 /* 1809 * wait a minute, this guy is gone, there is no longer a 1810 * receiver. Send peer an ABORT! 1811 */ 1812 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1813 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1814 *abort_flag = 1; 1815 return (0); 1816 } 1817 /* 1818 * Now before going further we see if there is room. If NOT then we 1819 * MAY let one through only IF this TSN is the one we are waiting 1820 * for on a partial delivery API. 1821 */ 1822 1823 /* Is the stream valid? */ 1824 if (sid >= asoc->streamincnt) { 1825 struct sctp_error_invalid_stream *cause; 1826 1827 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1828 0, M_NOWAIT, 1, MT_DATA); 1829 if (op_err != NULL) { 1830 /* add some space up front so prepend will work well */ 1831 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1832 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1833 /* 1834 * Error causes are just param's and this one has 1835 * two back to back phdr, one with the error type 1836 * and size, the other with the streamid and a rsvd 1837 */ 1838 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1839 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1840 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1841 cause->stream_id = htons(sid); 1842 cause->reserved = htons(0); 1843 sctp_queue_op_err(stcb, op_err); 1844 } 1845 SCTP_STAT_INCR(sctps_badsid); 1846 SCTP_TCB_LOCK_ASSERT(stcb); 1847 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1848 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1849 asoc->highest_tsn_inside_nr_map = tsn; 1850 } 1851 if (tsn == (asoc->cumulative_tsn + 1)) { 1852 /* Update cum-ack */ 1853 asoc->cumulative_tsn = tsn; 1854 } 1855 return (0); 1856 } 1857 /* 1858 * If its a fragmented message, lets see if we can find the control 1859 * on the reassembly queues. 1860 */ 1861 if ((chk_type == SCTP_IDATA) && 1862 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1863 (fsn == 0)) { 1864 /* 1865 * The first *must* be fsn 0, and other (middle/end) pieces 1866 * can *not* be fsn 0. XXX: This can happen in case of a 1867 * wrap around. Ignore is for now. 1868 */ 1869 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags); 1870 goto err_out; 1871 } 1872 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1873 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1874 chk_flags, control); 1875 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1876 /* See if we can find the re-assembly entity */ 1877 if (control != NULL) { 1878 /* We found something, does it belong? */ 1879 if (ordered && (mid != control->mid)) { 1880 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1881 err_out: 1882 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1883 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1884 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1885 *abort_flag = 1; 1886 return (0); 1887 } 1888 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1889 /* 1890 * We can't have a switched order with an 1891 * unordered chunk 1892 */ 1893 SCTP_SNPRINTF(msg, sizeof(msg), 1894 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1895 tsn); 1896 goto err_out; 1897 } 1898 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1899 /* 1900 * We can't have a switched unordered with a 1901 * ordered chunk 1902 */ 1903 SCTP_SNPRINTF(msg, sizeof(msg), 1904 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1905 tsn); 1906 goto err_out; 1907 } 1908 } 1909 } else { 1910 /* 1911 * Its a complete segment. Lets validate we don't have a 1912 * re-assembly going on with the same Stream/Seq (for 1913 * ordered) or in the same Stream for unordered. 1914 */ 1915 if (control != NULL) { 1916 if (ordered || asoc->idata_supported) { 1917 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1918 chk_flags, mid); 1919 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1920 goto err_out; 1921 } else { 1922 if ((tsn == control->fsn_included + 1) && 1923 (control->end_added == 0)) { 1924 SCTP_SNPRINTF(msg, sizeof(msg), 1925 "Illegal message sequence, missing end for MID: %8.8x", 1926 control->fsn_included); 1927 goto err_out; 1928 } else { 1929 control = NULL; 1930 } 1931 } 1932 } 1933 } 1934 /* now do the tests */ 1935 if (((asoc->cnt_on_all_streams + 1936 asoc->cnt_on_reasm_queue + 1937 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1938 (((int)asoc->my_rwnd) <= 0)) { 1939 /* 1940 * When we have NO room in the rwnd we check to make sure 1941 * the reader is doing its job... 1942 */ 1943 if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) { 1944 /* some to read, wake-up */ 1945 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1946 } 1947 /* now is it in the mapping array of what we have accepted? */ 1948 if (chk_type == SCTP_DATA) { 1949 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1950 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1951 /* Nope not in the valid range dump it */ 1952 dump_packet: 1953 sctp_set_rwnd(stcb, asoc); 1954 if ((asoc->cnt_on_all_streams + 1955 asoc->cnt_on_reasm_queue + 1956 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1957 SCTP_STAT_INCR(sctps_datadropchklmt); 1958 } else { 1959 SCTP_STAT_INCR(sctps_datadroprwnd); 1960 } 1961 *break_flag = 1; 1962 return (0); 1963 } 1964 } else { 1965 if (control == NULL) { 1966 goto dump_packet; 1967 } 1968 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1969 goto dump_packet; 1970 } 1971 } 1972 } 1973 #ifdef SCTP_ASOCLOG_OF_TSNS 1974 SCTP_TCB_LOCK_ASSERT(stcb); 1975 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1976 asoc->tsn_in_at = 0; 1977 asoc->tsn_in_wrapped = 1; 1978 } 1979 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1980 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 1981 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 1982 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1983 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1984 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1985 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1986 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1987 asoc->tsn_in_at++; 1988 #endif 1989 /* 1990 * Before we continue lets validate that we are not being fooled by 1991 * an evil attacker. We can only have Nk chunks based on our TSN 1992 * spread allowed by the mapping array N * 8 bits, so there is no 1993 * way our stream sequence numbers could have wrapped. We of course 1994 * only validate the FIRST fragment so the bit must be set. 1995 */ 1996 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 1997 (TAILQ_EMPTY(&asoc->resetHead)) && 1998 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 1999 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 2000 /* The incoming sseq is behind where we last delivered? */ 2001 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 2002 mid, asoc->strmin[sid].last_mid_delivered); 2003 2004 if (asoc->idata_supported) { 2005 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2006 asoc->strmin[sid].last_mid_delivered, 2007 tsn, 2008 sid, 2009 mid); 2010 } else { 2011 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2012 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2013 tsn, 2014 sid, 2015 (uint16_t)mid); 2016 } 2017 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2018 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2019 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2020 *abort_flag = 1; 2021 return (0); 2022 } 2023 if (chk_type == SCTP_IDATA) { 2024 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2025 } else { 2026 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2027 } 2028 if (last_chunk == 0) { 2029 if (chk_type == SCTP_IDATA) { 2030 dmbuf = SCTP_M_COPYM(*m, 2031 (offset + sizeof(struct sctp_idata_chunk)), 2032 the_len, M_NOWAIT); 2033 } else { 2034 dmbuf = SCTP_M_COPYM(*m, 2035 (offset + sizeof(struct sctp_data_chunk)), 2036 the_len, M_NOWAIT); 2037 } 2038 #ifdef SCTP_MBUF_LOGGING 2039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2040 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2041 } 2042 #endif 2043 } else { 2044 /* We can steal the last chunk */ 2045 int l_len; 2046 2047 dmbuf = *m; 2048 /* lop off the top part */ 2049 if (chk_type == SCTP_IDATA) { 2050 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2051 } else { 2052 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2053 } 2054 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2055 l_len = SCTP_BUF_LEN(dmbuf); 2056 } else { 2057 /* 2058 * need to count up the size hopefully does not hit 2059 * this to often :-0 2060 */ 2061 struct mbuf *lat; 2062 2063 l_len = 0; 2064 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2065 l_len += SCTP_BUF_LEN(lat); 2066 } 2067 } 2068 if (l_len > the_len) { 2069 /* Trim the end round bytes off too */ 2070 m_adj(dmbuf, -(l_len - the_len)); 2071 } 2072 } 2073 if (dmbuf == NULL) { 2074 SCTP_STAT_INCR(sctps_nomem); 2075 return (0); 2076 } 2077 /* 2078 * Now no matter what, we need a control, get one if we don't have 2079 * one (we may have gotten it above when we found the message was 2080 * fragmented 2081 */ 2082 if (control == NULL) { 2083 sctp_alloc_a_readq(stcb, control); 2084 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2085 ppid, 2086 sid, 2087 chk_flags, 2088 NULL, fsn, mid); 2089 if (control == NULL) { 2090 SCTP_STAT_INCR(sctps_nomem); 2091 return (0); 2092 } 2093 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2094 struct mbuf *mm; 2095 2096 control->data = dmbuf; 2097 control->tail_mbuf = NULL; 2098 for (mm = control->data; mm; mm = mm->m_next) { 2099 control->length += SCTP_BUF_LEN(mm); 2100 if (SCTP_BUF_NEXT(mm) == NULL) { 2101 control->tail_mbuf = mm; 2102 } 2103 } 2104 control->end_added = 1; 2105 control->last_frag_seen = 1; 2106 control->first_frag_seen = 1; 2107 control->fsn_included = fsn; 2108 control->top_fsn = fsn; 2109 } 2110 created_control = 1; 2111 } 2112 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2113 chk_flags, ordered, mid, control); 2114 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2115 TAILQ_EMPTY(&asoc->resetHead) && 2116 ((ordered == 0) || 2117 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2118 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2119 /* Candidate for express delivery */ 2120 /* 2121 * Its not fragmented, No PD-API is up, Nothing in the 2122 * delivery queue, Its un-ordered OR ordered and the next to 2123 * deliver AND nothing else is stuck on the stream queue, 2124 * And there is room for it in the socket buffer. Lets just 2125 * stuff it up the buffer.... 2126 */ 2127 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2128 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2129 asoc->highest_tsn_inside_nr_map = tsn; 2130 } 2131 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2132 control, mid); 2133 2134 sctp_add_to_readq(stcb->sctp_ep, stcb, 2135 control, &stcb->sctp_socket->so_rcv, 2136 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2137 2138 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2139 /* for ordered, bump what we delivered */ 2140 asoc->strmin[sid].last_mid_delivered++; 2141 } 2142 SCTP_STAT_INCR(sctps_recvexpress); 2143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2144 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2145 SCTP_STR_LOG_FROM_EXPRS_DEL); 2146 } 2147 control = NULL; 2148 goto finish_express_del; 2149 } 2150 2151 /* Now will we need a chunk too? */ 2152 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2153 sctp_alloc_a_chunk(stcb, chk); 2154 if (chk == NULL) { 2155 /* No memory so we drop the chunk */ 2156 SCTP_STAT_INCR(sctps_nomem); 2157 if (last_chunk == 0) { 2158 /* we copied it, free the copy */ 2159 sctp_m_freem(dmbuf); 2160 } 2161 return (0); 2162 } 2163 chk->rec.data.tsn = tsn; 2164 chk->no_fr_allowed = 0; 2165 chk->rec.data.fsn = fsn; 2166 chk->rec.data.mid = mid; 2167 chk->rec.data.sid = sid; 2168 chk->rec.data.ppid = ppid; 2169 chk->rec.data.context = stcb->asoc.context; 2170 chk->rec.data.doing_fast_retransmit = 0; 2171 chk->rec.data.rcv_flags = chk_flags; 2172 chk->asoc = asoc; 2173 chk->send_size = the_len; 2174 chk->whoTo = net; 2175 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2176 chk, 2177 control, mid); 2178 atomic_add_int(&net->ref_count, 1); 2179 chk->data = dmbuf; 2180 } 2181 /* Set the appropriate TSN mark */ 2182 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2183 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2184 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2185 asoc->highest_tsn_inside_nr_map = tsn; 2186 } 2187 } else { 2188 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2189 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2190 asoc->highest_tsn_inside_map = tsn; 2191 } 2192 } 2193 /* Now is it complete (i.e. not fragmented)? */ 2194 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2195 /* 2196 * Special check for when streams are resetting. We could be 2197 * more smart about this and check the actual stream to see 2198 * if it is not being reset.. that way we would not create a 2199 * HOLB when amongst streams being reset and those not being 2200 * reset. 2201 * 2202 */ 2203 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2204 SCTP_TSN_GT(tsn, liste->tsn)) { 2205 /* 2206 * yep its past where we need to reset... go ahead 2207 * and queue it. 2208 */ 2209 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2210 /* first one on */ 2211 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2212 } else { 2213 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2214 unsigned char inserted = 0; 2215 2216 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2217 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2218 continue; 2219 } else { 2220 /* found it */ 2221 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2222 inserted = 1; 2223 break; 2224 } 2225 } 2226 if (inserted == 0) { 2227 /* 2228 * must be put at end, use prevP 2229 * (all setup from loop) to setup 2230 * nextP. 2231 */ 2232 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2233 } 2234 } 2235 goto finish_express_del; 2236 } 2237 if (chk_flags & SCTP_DATA_UNORDERED) { 2238 /* queue directly into socket buffer */ 2239 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2240 control, mid); 2241 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2242 sctp_add_to_readq(stcb->sctp_ep, stcb, 2243 control, 2244 &stcb->sctp_socket->so_rcv, 1, 2245 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2246 2247 } else { 2248 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2249 mid); 2250 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2251 if (*abort_flag) { 2252 if (last_chunk) { 2253 *m = NULL; 2254 } 2255 return (0); 2256 } 2257 } 2258 goto finish_express_del; 2259 } 2260 /* If we reach here its a reassembly */ 2261 need_reasm_check = 1; 2262 SCTPDBG(SCTP_DEBUG_XXX, 2263 "Queue data to stream for reasm control: %p MID: %u\n", 2264 control, mid); 2265 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2266 if (*abort_flag) { 2267 /* 2268 * the assoc is now gone and chk was put onto the reasm 2269 * queue, which has all been freed. 2270 */ 2271 if (last_chunk) { 2272 *m = NULL; 2273 } 2274 return (0); 2275 } 2276 finish_express_del: 2277 /* Here we tidy up things */ 2278 if (tsn == (asoc->cumulative_tsn + 1)) { 2279 /* Update cum-ack */ 2280 asoc->cumulative_tsn = tsn; 2281 } 2282 if (last_chunk) { 2283 *m = NULL; 2284 } 2285 if (ordered) { 2286 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2287 } else { 2288 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2289 } 2290 SCTP_STAT_INCR(sctps_recvdata); 2291 /* Set it present please */ 2292 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2293 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2294 } 2295 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2296 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2297 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2298 } 2299 if (need_reasm_check) { 2300 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2301 need_reasm_check = 0; 2302 } 2303 /* check the special flag for stream resets */ 2304 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2305 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2306 /* 2307 * we have finished working through the backlogged TSN's now 2308 * time to reset streams. 1: call reset function. 2: free 2309 * pending_reply space 3: distribute any chunks in 2310 * pending_reply_queue. 2311 */ 2312 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2313 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2314 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2315 SCTP_FREE(liste, SCTP_M_STRESET); 2316 /* sa_ignore FREED_MEMORY */ 2317 liste = TAILQ_FIRST(&asoc->resetHead); 2318 if (TAILQ_EMPTY(&asoc->resetHead)) { 2319 /* All can be removed */ 2320 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2321 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2322 strm = &asoc->strmin[control->sinfo_stream]; 2323 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2324 if (*abort_flag) { 2325 return (0); 2326 } 2327 if (need_reasm_check) { 2328 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2329 need_reasm_check = 0; 2330 } 2331 } 2332 } else { 2333 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2334 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2335 break; 2336 } 2337 /* 2338 * if control->sinfo_tsn is <= liste->tsn we 2339 * can process it which is the NOT of 2340 * control->sinfo_tsn > liste->tsn 2341 */ 2342 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2343 strm = &asoc->strmin[control->sinfo_stream]; 2344 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2345 if (*abort_flag) { 2346 return (0); 2347 } 2348 if (need_reasm_check) { 2349 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2350 need_reasm_check = 0; 2351 } 2352 } 2353 } 2354 } 2355 return (1); 2356 } 2357 2358 static const int8_t sctp_map_lookup_tab[256] = { 2359 0, 1, 0, 2, 0, 1, 0, 3, 2360 0, 1, 0, 2, 0, 1, 0, 4, 2361 0, 1, 0, 2, 0, 1, 0, 3, 2362 0, 1, 0, 2, 0, 1, 0, 5, 2363 0, 1, 0, 2, 0, 1, 0, 3, 2364 0, 1, 0, 2, 0, 1, 0, 4, 2365 0, 1, 0, 2, 0, 1, 0, 3, 2366 0, 1, 0, 2, 0, 1, 0, 6, 2367 0, 1, 0, 2, 0, 1, 0, 3, 2368 0, 1, 0, 2, 0, 1, 0, 4, 2369 0, 1, 0, 2, 0, 1, 0, 3, 2370 0, 1, 0, 2, 0, 1, 0, 5, 2371 0, 1, 0, 2, 0, 1, 0, 3, 2372 0, 1, 0, 2, 0, 1, 0, 4, 2373 0, 1, 0, 2, 0, 1, 0, 3, 2374 0, 1, 0, 2, 0, 1, 0, 7, 2375 0, 1, 0, 2, 0, 1, 0, 3, 2376 0, 1, 0, 2, 0, 1, 0, 4, 2377 0, 1, 0, 2, 0, 1, 0, 3, 2378 0, 1, 0, 2, 0, 1, 0, 5, 2379 0, 1, 0, 2, 0, 1, 0, 3, 2380 0, 1, 0, 2, 0, 1, 0, 4, 2381 0, 1, 0, 2, 0, 1, 0, 3, 2382 0, 1, 0, 2, 0, 1, 0, 6, 2383 0, 1, 0, 2, 0, 1, 0, 3, 2384 0, 1, 0, 2, 0, 1, 0, 4, 2385 0, 1, 0, 2, 0, 1, 0, 3, 2386 0, 1, 0, 2, 0, 1, 0, 5, 2387 0, 1, 0, 2, 0, 1, 0, 3, 2388 0, 1, 0, 2, 0, 1, 0, 4, 2389 0, 1, 0, 2, 0, 1, 0, 3, 2390 0, 1, 0, 2, 0, 1, 0, 8 2391 }; 2392 2393 void 2394 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2395 { 2396 /* 2397 * Now we also need to check the mapping array in a couple of ways. 2398 * 1) Did we move the cum-ack point? 2399 * 2400 * When you first glance at this you might think that all entries 2401 * that make up the position of the cum-ack would be in the 2402 * nr-mapping array only.. i.e. things up to the cum-ack are always 2403 * deliverable. Thats true with one exception, when its a fragmented 2404 * message we may not deliver the data until some threshold (or all 2405 * of it) is in place. So we must OR the nr_mapping_array and 2406 * mapping_array to get a true picture of the cum-ack. 2407 */ 2408 struct sctp_association *asoc; 2409 int at; 2410 uint8_t val; 2411 int slide_from, slide_end, lgap, distance; 2412 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2413 2414 asoc = &stcb->asoc; 2415 2416 old_cumack = asoc->cumulative_tsn; 2417 old_base = asoc->mapping_array_base_tsn; 2418 old_highest = asoc->highest_tsn_inside_map; 2419 /* 2420 * We could probably improve this a small bit by calculating the 2421 * offset of the current cum-ack as the starting point. 2422 */ 2423 at = 0; 2424 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2425 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2426 if (val == 0xff) { 2427 at += 8; 2428 } else { 2429 /* there is a 0 bit */ 2430 at += sctp_map_lookup_tab[val]; 2431 break; 2432 } 2433 } 2434 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2435 2436 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2437 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2438 #ifdef INVARIANTS 2439 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2440 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2441 #else 2442 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2443 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2444 sctp_print_mapping_array(asoc); 2445 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2446 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2447 } 2448 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2449 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2450 #endif 2451 } 2452 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2453 highest_tsn = asoc->highest_tsn_inside_nr_map; 2454 } else { 2455 highest_tsn = asoc->highest_tsn_inside_map; 2456 } 2457 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2458 /* The complete array was completed by a single FR */ 2459 /* highest becomes the cum-ack */ 2460 int clr; 2461 #ifdef INVARIANTS 2462 unsigned int i; 2463 #endif 2464 2465 /* clear the array */ 2466 clr = ((at + 7) >> 3); 2467 if (clr > asoc->mapping_array_size) { 2468 clr = asoc->mapping_array_size; 2469 } 2470 memset(asoc->mapping_array, 0, clr); 2471 memset(asoc->nr_mapping_array, 0, clr); 2472 #ifdef INVARIANTS 2473 for (i = 0; i < asoc->mapping_array_size; i++) { 2474 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2475 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2476 sctp_print_mapping_array(asoc); 2477 } 2478 } 2479 #endif 2480 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2481 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2482 } else if (at >= 8) { 2483 /* we can slide the mapping array down */ 2484 /* slide_from holds where we hit the first NON 0xff byte */ 2485 2486 /* 2487 * now calculate the ceiling of the move using our highest 2488 * TSN value 2489 */ 2490 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2491 slide_end = (lgap >> 3); 2492 if (slide_end < slide_from) { 2493 sctp_print_mapping_array(asoc); 2494 #ifdef INVARIANTS 2495 panic("impossible slide"); 2496 #else 2497 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2498 lgap, slide_end, slide_from, at); 2499 return; 2500 #endif 2501 } 2502 if (slide_end > asoc->mapping_array_size) { 2503 #ifdef INVARIANTS 2504 panic("would overrun buffer"); 2505 #else 2506 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2507 asoc->mapping_array_size, slide_end); 2508 slide_end = asoc->mapping_array_size; 2509 #endif 2510 } 2511 distance = (slide_end - slide_from) + 1; 2512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2513 sctp_log_map(old_base, old_cumack, old_highest, 2514 SCTP_MAP_PREPARE_SLIDE); 2515 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2516 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2517 } 2518 if (distance + slide_from > asoc->mapping_array_size || 2519 distance < 0) { 2520 /* 2521 * Here we do NOT slide forward the array so that 2522 * hopefully when more data comes in to fill it up 2523 * we will be able to slide it forward. Really I 2524 * don't think this should happen :-0 2525 */ 2526 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2527 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2528 (uint32_t)asoc->mapping_array_size, 2529 SCTP_MAP_SLIDE_NONE); 2530 } 2531 } else { 2532 int ii; 2533 2534 for (ii = 0; ii < distance; ii++) { 2535 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2536 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2537 } 2538 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2539 asoc->mapping_array[ii] = 0; 2540 asoc->nr_mapping_array[ii] = 0; 2541 } 2542 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2543 asoc->highest_tsn_inside_map += (slide_from << 3); 2544 } 2545 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2546 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2547 } 2548 asoc->mapping_array_base_tsn += (slide_from << 3); 2549 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2550 sctp_log_map(asoc->mapping_array_base_tsn, 2551 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2552 SCTP_MAP_SLIDE_RESULT); 2553 } 2554 } 2555 } 2556 } 2557 2558 void 2559 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2560 { 2561 struct sctp_association *asoc; 2562 uint32_t highest_tsn; 2563 int is_a_gap; 2564 2565 sctp_slide_mapping_arrays(stcb); 2566 asoc = &stcb->asoc; 2567 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2568 highest_tsn = asoc->highest_tsn_inside_nr_map; 2569 } else { 2570 highest_tsn = asoc->highest_tsn_inside_map; 2571 } 2572 /* Is there a gap now? */ 2573 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2574 2575 /* 2576 * Now we need to see if we need to queue a sack or just start the 2577 * timer (if allowed). 2578 */ 2579 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2580 /* 2581 * Ok special case, in SHUTDOWN-SENT case. here we maker 2582 * sure SACK timer is off and instead send a SHUTDOWN and a 2583 * SACK 2584 */ 2585 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2586 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2587 stcb->sctp_ep, stcb, NULL, 2588 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2589 } 2590 sctp_send_shutdown(stcb, 2591 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2592 if (is_a_gap) { 2593 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2594 } 2595 } else { 2596 /* 2597 * CMT DAC algorithm: increase number of packets received 2598 * since last ack 2599 */ 2600 stcb->asoc.cmt_dac_pkts_rcvd++; 2601 2602 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2603 * SACK */ 2604 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2605 * longer is one */ 2606 (stcb->asoc.numduptsns) || /* we have dup's */ 2607 (is_a_gap) || /* is still a gap */ 2608 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2609 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */ 2610 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2611 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2612 (stcb->asoc.send_sack == 0) && 2613 (stcb->asoc.numduptsns == 0) && 2614 (stcb->asoc.delayed_ack) && 2615 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2616 /* 2617 * CMT DAC algorithm: With CMT, delay acks 2618 * even in the face of reordering. 2619 * Therefore, if acks that do not have to be 2620 * sent because of the above reasons, will 2621 * be delayed. That is, acks that would have 2622 * been sent due to gap reports will be 2623 * delayed with DAC. Start the delayed ack 2624 * timer. 2625 */ 2626 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2627 stcb->sctp_ep, stcb, NULL); 2628 } else { 2629 /* 2630 * Ok we must build a SACK since the timer 2631 * is pending, we got our first packet OR 2632 * there are gaps or duplicates. 2633 */ 2634 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 2635 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 2636 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2637 } 2638 } else { 2639 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2640 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2641 stcb->sctp_ep, stcb, NULL); 2642 } 2643 } 2644 } 2645 } 2646 2647 int 2648 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2649 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2650 struct sctp_nets *net, uint32_t *high_tsn) 2651 { 2652 struct sctp_chunkhdr *ch, chunk_buf; 2653 struct sctp_association *asoc; 2654 int num_chunks = 0; /* number of control chunks processed */ 2655 int stop_proc = 0; 2656 int break_flag, last_chunk; 2657 int abort_flag = 0, was_a_gap; 2658 struct mbuf *m; 2659 uint32_t highest_tsn; 2660 uint16_t chk_length; 2661 2662 /* set the rwnd */ 2663 sctp_set_rwnd(stcb, &stcb->asoc); 2664 2665 m = *mm; 2666 SCTP_TCB_LOCK_ASSERT(stcb); 2667 asoc = &stcb->asoc; 2668 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2669 highest_tsn = asoc->highest_tsn_inside_nr_map; 2670 } else { 2671 highest_tsn = asoc->highest_tsn_inside_map; 2672 } 2673 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2674 /* 2675 * setup where we got the last DATA packet from for any SACK that 2676 * may need to go out. Don't bump the net. This is done ONLY when a 2677 * chunk is assigned. 2678 */ 2679 asoc->last_data_chunk_from = net; 2680 2681 /*- 2682 * Now before we proceed we must figure out if this is a wasted 2683 * cluster... i.e. it is a small packet sent in and yet the driver 2684 * underneath allocated a full cluster for it. If so we must copy it 2685 * to a smaller mbuf and free up the cluster mbuf. This will help 2686 * with cluster starvation. 2687 */ 2688 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2689 /* we only handle mbufs that are singletons.. not chains */ 2690 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2691 if (m) { 2692 /* ok lets see if we can copy the data up */ 2693 caddr_t *from, *to; 2694 2695 /* get the pointers and copy */ 2696 to = mtod(m, caddr_t *); 2697 from = mtod((*mm), caddr_t *); 2698 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2699 /* copy the length and free up the old */ 2700 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2701 sctp_m_freem(*mm); 2702 /* success, back copy */ 2703 *mm = m; 2704 } else { 2705 /* We are in trouble in the mbuf world .. yikes */ 2706 m = *mm; 2707 } 2708 } 2709 /* get pointer to the first chunk header */ 2710 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2711 sizeof(struct sctp_chunkhdr), 2712 (uint8_t *)&chunk_buf); 2713 if (ch == NULL) { 2714 return (1); 2715 } 2716 /* 2717 * process all DATA chunks... 2718 */ 2719 *high_tsn = asoc->cumulative_tsn; 2720 break_flag = 0; 2721 asoc->data_pkts_seen++; 2722 while (stop_proc == 0) { 2723 /* validate chunk length */ 2724 chk_length = ntohs(ch->chunk_length); 2725 if (length - *offset < chk_length) { 2726 /* all done, mutulated chunk */ 2727 stop_proc = 1; 2728 continue; 2729 } 2730 if ((asoc->idata_supported == 1) && 2731 (ch->chunk_type == SCTP_DATA)) { 2732 struct mbuf *op_err; 2733 char msg[SCTP_DIAG_INFO_LEN]; 2734 2735 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2736 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2737 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 2738 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2739 return (2); 2740 } 2741 if ((asoc->idata_supported == 0) && 2742 (ch->chunk_type == SCTP_IDATA)) { 2743 struct mbuf *op_err; 2744 char msg[SCTP_DIAG_INFO_LEN]; 2745 2746 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2747 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2748 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22; 2749 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2750 return (2); 2751 } 2752 if ((ch->chunk_type == SCTP_DATA) || 2753 (ch->chunk_type == SCTP_IDATA)) { 2754 uint16_t clen; 2755 2756 if (ch->chunk_type == SCTP_DATA) { 2757 clen = sizeof(struct sctp_data_chunk); 2758 } else { 2759 clen = sizeof(struct sctp_idata_chunk); 2760 } 2761 if (chk_length < clen) { 2762 /* 2763 * Need to send an abort since we had a 2764 * invalid data chunk. 2765 */ 2766 struct mbuf *op_err; 2767 char msg[SCTP_DIAG_INFO_LEN]; 2768 2769 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u", 2770 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2771 chk_length); 2772 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2773 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23; 2774 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2775 return (2); 2776 } 2777 #ifdef SCTP_AUDITING_ENABLED 2778 sctp_audit_log(0xB1, 0); 2779 #endif 2780 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2781 last_chunk = 1; 2782 } else { 2783 last_chunk = 0; 2784 } 2785 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2786 chk_length, net, high_tsn, &abort_flag, &break_flag, 2787 last_chunk, ch->chunk_type)) { 2788 num_chunks++; 2789 } 2790 if (abort_flag) 2791 return (2); 2792 2793 if (break_flag) { 2794 /* 2795 * Set because of out of rwnd space and no 2796 * drop rep space left. 2797 */ 2798 stop_proc = 1; 2799 continue; 2800 } 2801 } else { 2802 /* not a data chunk in the data region */ 2803 switch (ch->chunk_type) { 2804 case SCTP_INITIATION: 2805 case SCTP_INITIATION_ACK: 2806 case SCTP_SELECTIVE_ACK: 2807 case SCTP_NR_SELECTIVE_ACK: 2808 case SCTP_HEARTBEAT_REQUEST: 2809 case SCTP_HEARTBEAT_ACK: 2810 case SCTP_ABORT_ASSOCIATION: 2811 case SCTP_SHUTDOWN: 2812 case SCTP_SHUTDOWN_ACK: 2813 case SCTP_OPERATION_ERROR: 2814 case SCTP_COOKIE_ECHO: 2815 case SCTP_COOKIE_ACK: 2816 case SCTP_ECN_ECHO: 2817 case SCTP_ECN_CWR: 2818 case SCTP_SHUTDOWN_COMPLETE: 2819 case SCTP_AUTHENTICATION: 2820 case SCTP_ASCONF_ACK: 2821 case SCTP_PACKET_DROPPED: 2822 case SCTP_STREAM_RESET: 2823 case SCTP_FORWARD_CUM_TSN: 2824 case SCTP_ASCONF: 2825 { 2826 /* 2827 * Now, what do we do with KNOWN 2828 * chunks that are NOT in the right 2829 * place? 2830 * 2831 * For now, I do nothing but ignore 2832 * them. We may later want to add 2833 * sysctl stuff to switch out and do 2834 * either an ABORT() or possibly 2835 * process them. 2836 */ 2837 struct mbuf *op_err; 2838 char msg[SCTP_DIAG_INFO_LEN]; 2839 2840 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2841 ch->chunk_type); 2842 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2843 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2844 return (2); 2845 } 2846 default: 2847 /* 2848 * Unknown chunk type: use bit rules after 2849 * checking length 2850 */ 2851 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2852 /* 2853 * Need to send an abort since we 2854 * had a invalid chunk. 2855 */ 2856 struct mbuf *op_err; 2857 char msg[SCTP_DIAG_INFO_LEN]; 2858 2859 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length); 2860 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2861 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 2862 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2863 return (2); 2864 } 2865 if (ch->chunk_type & 0x40) { 2866 /* Add a error report to the queue */ 2867 struct mbuf *op_err; 2868 struct sctp_gen_error_cause *cause; 2869 2870 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2871 0, M_NOWAIT, 1, MT_DATA); 2872 if (op_err != NULL) { 2873 cause = mtod(op_err, struct sctp_gen_error_cause *); 2874 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2875 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2876 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2877 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2878 if (SCTP_BUF_NEXT(op_err) != NULL) { 2879 sctp_queue_op_err(stcb, op_err); 2880 } else { 2881 sctp_m_freem(op_err); 2882 } 2883 } 2884 } 2885 if ((ch->chunk_type & 0x80) == 0) { 2886 /* discard the rest of this packet */ 2887 stop_proc = 1; 2888 } /* else skip this bad chunk and 2889 * continue... */ 2890 break; 2891 } /* switch of chunk type */ 2892 } 2893 *offset += SCTP_SIZE32(chk_length); 2894 if ((*offset >= length) || stop_proc) { 2895 /* no more data left in the mbuf chain */ 2896 stop_proc = 1; 2897 continue; 2898 } 2899 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2900 sizeof(struct sctp_chunkhdr), 2901 (uint8_t *)&chunk_buf); 2902 if (ch == NULL) { 2903 *offset = length; 2904 stop_proc = 1; 2905 continue; 2906 } 2907 } 2908 if (break_flag) { 2909 /* 2910 * we need to report rwnd overrun drops. 2911 */ 2912 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2913 } 2914 if (num_chunks) { 2915 /* 2916 * Did we get data, if so update the time for auto-close and 2917 * give peer credit for being alive. 2918 */ 2919 SCTP_STAT_INCR(sctps_recvpktwithdata); 2920 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2921 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2922 stcb->asoc.overall_error_count, 2923 0, 2924 SCTP_FROM_SCTP_INDATA, 2925 __LINE__); 2926 } 2927 stcb->asoc.overall_error_count = 0; 2928 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2929 } 2930 /* now service all of the reassm queue if needed */ 2931 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2932 /* Assure that we ack right away */ 2933 stcb->asoc.send_sack = 1; 2934 } 2935 /* Start a sack timer or QUEUE a SACK for sending */ 2936 sctp_sack_check(stcb, was_a_gap); 2937 return (0); 2938 } 2939 2940 static int 2941 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2942 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2943 int *num_frs, 2944 uint32_t *biggest_newly_acked_tsn, 2945 uint32_t *this_sack_lowest_newack, 2946 int *rto_ok) 2947 { 2948 struct sctp_tmit_chunk *tp1; 2949 unsigned int theTSN; 2950 int j, wake_him = 0, circled = 0; 2951 2952 /* Recover the tp1 we last saw */ 2953 tp1 = *p_tp1; 2954 if (tp1 == NULL) { 2955 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2956 } 2957 for (j = frag_strt; j <= frag_end; j++) { 2958 theTSN = j + last_tsn; 2959 while (tp1) { 2960 if (tp1->rec.data.doing_fast_retransmit) 2961 (*num_frs) += 1; 2962 2963 /*- 2964 * CMT: CUCv2 algorithm. For each TSN being 2965 * processed from the sent queue, track the 2966 * next expected pseudo-cumack, or 2967 * rtx_pseudo_cumack, if required. Separate 2968 * cumack trackers for first transmissions, 2969 * and retransmissions. 2970 */ 2971 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2972 (tp1->whoTo->find_pseudo_cumack == 1) && 2973 (tp1->snd_count == 1)) { 2974 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 2975 tp1->whoTo->find_pseudo_cumack = 0; 2976 } 2977 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2978 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2979 (tp1->snd_count > 1)) { 2980 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 2981 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2982 } 2983 if (tp1->rec.data.tsn == theTSN) { 2984 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2985 /*- 2986 * must be held until 2987 * cum-ack passes 2988 */ 2989 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2990 /*- 2991 * If it is less than RESEND, it is 2992 * now no-longer in flight. 2993 * Higher values may already be set 2994 * via previous Gap Ack Blocks... 2995 * i.e. ACKED or RESEND. 2996 */ 2997 if (SCTP_TSN_GT(tp1->rec.data.tsn, 2998 *biggest_newly_acked_tsn)) { 2999 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 3000 } 3001 /*- 3002 * CMT: SFR algo (and HTNA) - set 3003 * saw_newack to 1 for dest being 3004 * newly acked. update 3005 * this_sack_highest_newack if 3006 * appropriate. 3007 */ 3008 if (tp1->rec.data.chunk_was_revoked == 0) 3009 tp1->whoTo->saw_newack = 1; 3010 3011 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3012 tp1->whoTo->this_sack_highest_newack)) { 3013 tp1->whoTo->this_sack_highest_newack = 3014 tp1->rec.data.tsn; 3015 } 3016 /*- 3017 * CMT DAC algo: also update 3018 * this_sack_lowest_newack 3019 */ 3020 if (*this_sack_lowest_newack == 0) { 3021 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3022 sctp_log_sack(*this_sack_lowest_newack, 3023 last_tsn, 3024 tp1->rec.data.tsn, 3025 0, 3026 0, 3027 SCTP_LOG_TSN_ACKED); 3028 } 3029 *this_sack_lowest_newack = tp1->rec.data.tsn; 3030 } 3031 /*- 3032 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3033 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3034 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3035 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3036 * Separate pseudo_cumack trackers for first transmissions and 3037 * retransmissions. 3038 */ 3039 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3040 if (tp1->rec.data.chunk_was_revoked == 0) { 3041 tp1->whoTo->new_pseudo_cumack = 1; 3042 } 3043 tp1->whoTo->find_pseudo_cumack = 1; 3044 } 3045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3046 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3047 } 3048 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3049 if (tp1->rec.data.chunk_was_revoked == 0) { 3050 tp1->whoTo->new_pseudo_cumack = 1; 3051 } 3052 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3053 } 3054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3055 sctp_log_sack(*biggest_newly_acked_tsn, 3056 last_tsn, 3057 tp1->rec.data.tsn, 3058 frag_strt, 3059 frag_end, 3060 SCTP_LOG_TSN_ACKED); 3061 } 3062 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3063 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3064 tp1->whoTo->flight_size, 3065 tp1->book_size, 3066 (uint32_t)(uintptr_t)tp1->whoTo, 3067 tp1->rec.data.tsn); 3068 } 3069 sctp_flight_size_decrease(tp1); 3070 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3071 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3072 tp1); 3073 } 3074 sctp_total_flight_decrease(stcb, tp1); 3075 3076 tp1->whoTo->net_ack += tp1->send_size; 3077 if (tp1->snd_count < 2) { 3078 /*- 3079 * True non-retransmitted chunk 3080 */ 3081 tp1->whoTo->net_ack2 += tp1->send_size; 3082 3083 /*- 3084 * update RTO too ? 3085 */ 3086 if (tp1->do_rtt) { 3087 if (*rto_ok && 3088 sctp_calculate_rto(stcb, 3089 &stcb->asoc, 3090 tp1->whoTo, 3091 &tp1->sent_rcv_time, 3092 SCTP_RTT_FROM_DATA)) { 3093 *rto_ok = 0; 3094 } 3095 if (tp1->whoTo->rto_needed == 0) { 3096 tp1->whoTo->rto_needed = 1; 3097 } 3098 tp1->do_rtt = 0; 3099 } 3100 } 3101 } 3102 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3103 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3104 stcb->asoc.this_sack_highest_gap)) { 3105 stcb->asoc.this_sack_highest_gap = 3106 tp1->rec.data.tsn; 3107 } 3108 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3109 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3110 #ifdef SCTP_AUDITING_ENABLED 3111 sctp_audit_log(0xB2, 3112 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3113 #endif 3114 } 3115 } 3116 /*- 3117 * All chunks NOT UNSENT fall through here and are marked 3118 * (leave PR-SCTP ones that are to skip alone though) 3119 */ 3120 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3121 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3122 tp1->sent = SCTP_DATAGRAM_MARKED; 3123 } 3124 if (tp1->rec.data.chunk_was_revoked) { 3125 /* deflate the cwnd */ 3126 tp1->whoTo->cwnd -= tp1->book_size; 3127 tp1->rec.data.chunk_was_revoked = 0; 3128 } 3129 /* NR Sack code here */ 3130 if (nr_sacking && 3131 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3132 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3133 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3134 #ifdef INVARIANTS 3135 } else { 3136 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3137 #endif 3138 } 3139 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3140 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3141 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3142 stcb->asoc.trigger_reset = 1; 3143 } 3144 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3145 if (tp1->data) { 3146 /* 3147 * sa_ignore 3148 * NO_NULL_CHK 3149 */ 3150 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3151 sctp_m_freem(tp1->data); 3152 tp1->data = NULL; 3153 } 3154 wake_him++; 3155 } 3156 } 3157 break; 3158 } /* if (tp1->tsn == theTSN) */ 3159 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3160 break; 3161 } 3162 tp1 = TAILQ_NEXT(tp1, sctp_next); 3163 if ((tp1 == NULL) && (circled == 0)) { 3164 circled++; 3165 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3166 } 3167 } /* end while (tp1) */ 3168 if (tp1 == NULL) { 3169 circled = 0; 3170 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3171 } 3172 /* In case the fragments were not in order we must reset */ 3173 } /* end for (j = fragStart */ 3174 *p_tp1 = tp1; 3175 return (wake_him); /* Return value only used for nr-sack */ 3176 } 3177 3178 static int 3179 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3180 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3181 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3182 int num_seg, int num_nr_seg, int *rto_ok) 3183 { 3184 struct sctp_gap_ack_block *frag, block; 3185 struct sctp_tmit_chunk *tp1; 3186 int i; 3187 int num_frs = 0; 3188 int chunk_freed; 3189 int non_revocable; 3190 uint16_t frag_strt, frag_end, prev_frag_end; 3191 3192 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3193 prev_frag_end = 0; 3194 chunk_freed = 0; 3195 3196 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3197 if (i == num_seg) { 3198 prev_frag_end = 0; 3199 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3200 } 3201 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3202 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3203 *offset += sizeof(block); 3204 if (frag == NULL) { 3205 return (chunk_freed); 3206 } 3207 frag_strt = ntohs(frag->start); 3208 frag_end = ntohs(frag->end); 3209 3210 if (frag_strt > frag_end) { 3211 /* This gap report is malformed, skip it. */ 3212 continue; 3213 } 3214 if (frag_strt <= prev_frag_end) { 3215 /* This gap report is not in order, so restart. */ 3216 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3217 } 3218 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3219 *biggest_tsn_acked = last_tsn + frag_end; 3220 } 3221 if (i < num_seg) { 3222 non_revocable = 0; 3223 } else { 3224 non_revocable = 1; 3225 } 3226 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3227 non_revocable, &num_frs, biggest_newly_acked_tsn, 3228 this_sack_lowest_newack, rto_ok)) { 3229 chunk_freed = 1; 3230 } 3231 prev_frag_end = frag_end; 3232 } 3233 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3234 if (num_frs) 3235 sctp_log_fr(*biggest_tsn_acked, 3236 *biggest_newly_acked_tsn, 3237 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3238 } 3239 return (chunk_freed); 3240 } 3241 3242 static void 3243 sctp_check_for_revoked(struct sctp_tcb *stcb, 3244 struct sctp_association *asoc, uint32_t cumack, 3245 uint32_t biggest_tsn_acked) 3246 { 3247 struct sctp_tmit_chunk *tp1; 3248 3249 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3250 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3251 /* 3252 * ok this guy is either ACK or MARKED. If it is 3253 * ACKED it has been previously acked but not this 3254 * time i.e. revoked. If it is MARKED it was ACK'ed 3255 * again. 3256 */ 3257 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3258 break; 3259 } 3260 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3261 /* it has been revoked */ 3262 tp1->sent = SCTP_DATAGRAM_SENT; 3263 tp1->rec.data.chunk_was_revoked = 1; 3264 /* 3265 * We must add this stuff back in to assure 3266 * timers and such get started. 3267 */ 3268 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3269 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3270 tp1->whoTo->flight_size, 3271 tp1->book_size, 3272 (uint32_t)(uintptr_t)tp1->whoTo, 3273 tp1->rec.data.tsn); 3274 } 3275 sctp_flight_size_increase(tp1); 3276 sctp_total_flight_increase(stcb, tp1); 3277 /* 3278 * We inflate the cwnd to compensate for our 3279 * artificial inflation of the flight_size. 3280 */ 3281 tp1->whoTo->cwnd += tp1->book_size; 3282 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3283 sctp_log_sack(asoc->last_acked_seq, 3284 cumack, 3285 tp1->rec.data.tsn, 3286 0, 3287 0, 3288 SCTP_LOG_TSN_REVOKED); 3289 } 3290 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3291 /* it has been re-acked in this SACK */ 3292 tp1->sent = SCTP_DATAGRAM_ACKED; 3293 } 3294 } 3295 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3296 break; 3297 } 3298 } 3299 3300 static void 3301 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3302 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3303 { 3304 struct sctp_tmit_chunk *tp1; 3305 int strike_flag = 0; 3306 struct timeval now; 3307 uint32_t sending_seq; 3308 struct sctp_nets *net; 3309 int num_dests_sacked = 0; 3310 3311 /* 3312 * select the sending_seq, this is either the next thing ready to be 3313 * sent but not transmitted, OR, the next seq we assign. 3314 */ 3315 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3316 if (tp1 == NULL) { 3317 sending_seq = asoc->sending_seq; 3318 } else { 3319 sending_seq = tp1->rec.data.tsn; 3320 } 3321 3322 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3323 if ((asoc->sctp_cmt_on_off > 0) && 3324 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3325 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3326 if (net->saw_newack) 3327 num_dests_sacked++; 3328 } 3329 } 3330 if (stcb->asoc.prsctp_supported) { 3331 (void)SCTP_GETTIME_TIMEVAL(&now); 3332 } 3333 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3334 strike_flag = 0; 3335 if (tp1->no_fr_allowed) { 3336 /* this one had a timeout or something */ 3337 continue; 3338 } 3339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3340 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3341 sctp_log_fr(biggest_tsn_newly_acked, 3342 tp1->rec.data.tsn, 3343 tp1->sent, 3344 SCTP_FR_LOG_CHECK_STRIKE); 3345 } 3346 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3347 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3348 /* done */ 3349 break; 3350 } 3351 if (stcb->asoc.prsctp_supported) { 3352 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3353 /* Is it expired? */ 3354 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3355 /* Yes so drop it */ 3356 if (tp1->data != NULL) { 3357 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3358 SCTP_SO_NOT_LOCKED); 3359 } 3360 continue; 3361 } 3362 } 3363 } 3364 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3365 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3366 /* we are beyond the tsn in the sack */ 3367 break; 3368 } 3369 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3370 /* either a RESEND, ACKED, or MARKED */ 3371 /* skip */ 3372 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3373 /* Continue strikin FWD-TSN chunks */ 3374 tp1->rec.data.fwd_tsn_cnt++; 3375 } 3376 continue; 3377 } 3378 /* 3379 * CMT : SFR algo (covers part of DAC and HTNA as well) 3380 */ 3381 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3382 /* 3383 * No new acks were received for data sent to this 3384 * dest. Therefore, according to the SFR algo for 3385 * CMT, no data sent to this dest can be marked for 3386 * FR using this SACK. 3387 */ 3388 continue; 3389 } else if (tp1->whoTo && 3390 SCTP_TSN_GT(tp1->rec.data.tsn, 3391 tp1->whoTo->this_sack_highest_newack) && 3392 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3393 /* 3394 * CMT: New acks were received for data sent to this 3395 * dest. But no new acks were seen for data sent 3396 * after tp1. Therefore, according to the SFR algo 3397 * for CMT, tp1 cannot be marked for FR using this 3398 * SACK. This step covers part of the DAC algo and 3399 * the HTNA algo as well. 3400 */ 3401 continue; 3402 } 3403 /* 3404 * Here we check to see if we were have already done a FR 3405 * and if so we see if the biggest TSN we saw in the sack is 3406 * smaller than the recovery point. If so we don't strike 3407 * the tsn... otherwise we CAN strike the TSN. 3408 */ 3409 /* 3410 * @@@ JRI: Check for CMT if (accum_moved && 3411 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3412 * 0)) { 3413 */ 3414 if (accum_moved && asoc->fast_retran_loss_recovery) { 3415 /* 3416 * Strike the TSN if in fast-recovery and cum-ack 3417 * moved. 3418 */ 3419 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3420 sctp_log_fr(biggest_tsn_newly_acked, 3421 tp1->rec.data.tsn, 3422 tp1->sent, 3423 SCTP_FR_LOG_STRIKE_CHUNK); 3424 } 3425 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3426 tp1->sent++; 3427 } 3428 if ((asoc->sctp_cmt_on_off > 0) && 3429 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3430 /* 3431 * CMT DAC algorithm: If SACK flag is set to 3432 * 0, then lowest_newack test will not pass 3433 * because it would have been set to the 3434 * cumack earlier. If not already to be 3435 * rtx'd, If not a mixed sack and if tp1 is 3436 * not between two sacked TSNs, then mark by 3437 * one more. NOTE that we are marking by one 3438 * additional time since the SACK DAC flag 3439 * indicates that two packets have been 3440 * received after this missing TSN. 3441 */ 3442 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3443 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3445 sctp_log_fr(16 + num_dests_sacked, 3446 tp1->rec.data.tsn, 3447 tp1->sent, 3448 SCTP_FR_LOG_STRIKE_CHUNK); 3449 } 3450 tp1->sent++; 3451 } 3452 } 3453 } else if ((tp1->rec.data.doing_fast_retransmit) && 3454 (asoc->sctp_cmt_on_off == 0)) { 3455 /* 3456 * For those that have done a FR we must take 3457 * special consideration if we strike. I.e the 3458 * biggest_newly_acked must be higher than the 3459 * sending_seq at the time we did the FR. 3460 */ 3461 if ( 3462 #ifdef SCTP_FR_TO_ALTERNATE 3463 /* 3464 * If FR's go to new networks, then we must only do 3465 * this for singly homed asoc's. However if the FR's 3466 * go to the same network (Armando's work) then its 3467 * ok to FR multiple times. 3468 */ 3469 (asoc->numnets < 2) 3470 #else 3471 (1) 3472 #endif 3473 ) { 3474 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3475 tp1->rec.data.fast_retran_tsn)) { 3476 /* 3477 * Strike the TSN, since this ack is 3478 * beyond where things were when we 3479 * did a FR. 3480 */ 3481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3482 sctp_log_fr(biggest_tsn_newly_acked, 3483 tp1->rec.data.tsn, 3484 tp1->sent, 3485 SCTP_FR_LOG_STRIKE_CHUNK); 3486 } 3487 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3488 tp1->sent++; 3489 } 3490 strike_flag = 1; 3491 if ((asoc->sctp_cmt_on_off > 0) && 3492 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3493 /* 3494 * CMT DAC algorithm: If 3495 * SACK flag is set to 0, 3496 * then lowest_newack test 3497 * will not pass because it 3498 * would have been set to 3499 * the cumack earlier. If 3500 * not already to be rtx'd, 3501 * If not a mixed sack and 3502 * if tp1 is not between two 3503 * sacked TSNs, then mark by 3504 * one more. NOTE that we 3505 * are marking by one 3506 * additional time since the 3507 * SACK DAC flag indicates 3508 * that two packets have 3509 * been received after this 3510 * missing TSN. 3511 */ 3512 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3513 (num_dests_sacked == 1) && 3514 SCTP_TSN_GT(this_sack_lowest_newack, 3515 tp1->rec.data.tsn)) { 3516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3517 sctp_log_fr(32 + num_dests_sacked, 3518 tp1->rec.data.tsn, 3519 tp1->sent, 3520 SCTP_FR_LOG_STRIKE_CHUNK); 3521 } 3522 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3523 tp1->sent++; 3524 } 3525 } 3526 } 3527 } 3528 } 3529 /* 3530 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3531 * algo covers HTNA. 3532 */ 3533 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3534 biggest_tsn_newly_acked)) { 3535 /* 3536 * We don't strike these: This is the HTNA 3537 * algorithm i.e. we don't strike If our TSN is 3538 * larger than the Highest TSN Newly Acked. 3539 */ 3540 ; 3541 } else { 3542 /* Strike the TSN */ 3543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3544 sctp_log_fr(biggest_tsn_newly_acked, 3545 tp1->rec.data.tsn, 3546 tp1->sent, 3547 SCTP_FR_LOG_STRIKE_CHUNK); 3548 } 3549 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3550 tp1->sent++; 3551 } 3552 if ((asoc->sctp_cmt_on_off > 0) && 3553 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3554 /* 3555 * CMT DAC algorithm: If SACK flag is set to 3556 * 0, then lowest_newack test will not pass 3557 * because it would have been set to the 3558 * cumack earlier. If not already to be 3559 * rtx'd, If not a mixed sack and if tp1 is 3560 * not between two sacked TSNs, then mark by 3561 * one more. NOTE that we are marking by one 3562 * additional time since the SACK DAC flag 3563 * indicates that two packets have been 3564 * received after this missing TSN. 3565 */ 3566 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3567 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3569 sctp_log_fr(48 + num_dests_sacked, 3570 tp1->rec.data.tsn, 3571 tp1->sent, 3572 SCTP_FR_LOG_STRIKE_CHUNK); 3573 } 3574 tp1->sent++; 3575 } 3576 } 3577 } 3578 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3579 struct sctp_nets *alt; 3580 3581 /* fix counts and things */ 3582 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3583 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3584 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3585 tp1->book_size, 3586 (uint32_t)(uintptr_t)tp1->whoTo, 3587 tp1->rec.data.tsn); 3588 } 3589 if (tp1->whoTo) { 3590 tp1->whoTo->net_ack++; 3591 sctp_flight_size_decrease(tp1); 3592 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3593 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3594 tp1); 3595 } 3596 } 3597 3598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3599 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3600 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3601 } 3602 /* add back to the rwnd */ 3603 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3604 3605 /* remove from the total flight */ 3606 sctp_total_flight_decrease(stcb, tp1); 3607 3608 if ((stcb->asoc.prsctp_supported) && 3609 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3610 /* 3611 * Has it been retransmitted tv_sec times? - 3612 * we store the retran count there. 3613 */ 3614 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3615 /* Yes, so drop it */ 3616 if (tp1->data != NULL) { 3617 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3618 SCTP_SO_NOT_LOCKED); 3619 } 3620 /* Make sure to flag we had a FR */ 3621 if (tp1->whoTo != NULL) { 3622 tp1->whoTo->net_ack++; 3623 } 3624 continue; 3625 } 3626 } 3627 /* 3628 * SCTP_PRINTF("OK, we are now ready to FR this 3629 * guy\n"); 3630 */ 3631 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3632 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3633 0, SCTP_FR_MARKED); 3634 } 3635 if (strike_flag) { 3636 /* This is a subsequent FR */ 3637 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3638 } 3639 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3640 if (asoc->sctp_cmt_on_off > 0) { 3641 /* 3642 * CMT: Using RTX_SSTHRESH policy for CMT. 3643 * If CMT is being used, then pick dest with 3644 * largest ssthresh for any retransmission. 3645 */ 3646 tp1->no_fr_allowed = 1; 3647 alt = tp1->whoTo; 3648 /* sa_ignore NO_NULL_CHK */ 3649 if (asoc->sctp_cmt_pf > 0) { 3650 /* 3651 * JRS 5/18/07 - If CMT PF is on, 3652 * use the PF version of 3653 * find_alt_net() 3654 */ 3655 alt = sctp_find_alternate_net(stcb, alt, 2); 3656 } else { 3657 /* 3658 * JRS 5/18/07 - If only CMT is on, 3659 * use the CMT version of 3660 * find_alt_net() 3661 */ 3662 /* sa_ignore NO_NULL_CHK */ 3663 alt = sctp_find_alternate_net(stcb, alt, 1); 3664 } 3665 if (alt == NULL) { 3666 alt = tp1->whoTo; 3667 } 3668 /* 3669 * CUCv2: If a different dest is picked for 3670 * the retransmission, then new 3671 * (rtx-)pseudo_cumack needs to be tracked 3672 * for orig dest. Let CUCv2 track new (rtx-) 3673 * pseudo-cumack always. 3674 */ 3675 if (tp1->whoTo) { 3676 tp1->whoTo->find_pseudo_cumack = 1; 3677 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3678 } 3679 } else { /* CMT is OFF */ 3680 #ifdef SCTP_FR_TO_ALTERNATE 3681 /* Can we find an alternate? */ 3682 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3683 #else 3684 /* 3685 * default behavior is to NOT retransmit 3686 * FR's to an alternate. Armando Caro's 3687 * paper details why. 3688 */ 3689 alt = tp1->whoTo; 3690 #endif 3691 } 3692 3693 tp1->rec.data.doing_fast_retransmit = 1; 3694 /* mark the sending seq for possible subsequent FR's */ 3695 /* 3696 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3697 * (uint32_t)tpi->rec.data.tsn); 3698 */ 3699 if (TAILQ_EMPTY(&asoc->send_queue)) { 3700 /* 3701 * If the queue of send is empty then its 3702 * the next sequence number that will be 3703 * assigned so we subtract one from this to 3704 * get the one we last sent. 3705 */ 3706 tp1->rec.data.fast_retran_tsn = sending_seq; 3707 } else { 3708 /* 3709 * If there are chunks on the send queue 3710 * (unsent data that has made it from the 3711 * stream queues but not out the door, we 3712 * take the first one (which will have the 3713 * lowest TSN) and subtract one to get the 3714 * one we last sent. 3715 */ 3716 struct sctp_tmit_chunk *ttt; 3717 3718 ttt = TAILQ_FIRST(&asoc->send_queue); 3719 tp1->rec.data.fast_retran_tsn = 3720 ttt->rec.data.tsn; 3721 } 3722 3723 if (tp1->do_rtt) { 3724 /* 3725 * this guy had a RTO calculation pending on 3726 * it, cancel it 3727 */ 3728 if ((tp1->whoTo != NULL) && 3729 (tp1->whoTo->rto_needed == 0)) { 3730 tp1->whoTo->rto_needed = 1; 3731 } 3732 tp1->do_rtt = 0; 3733 } 3734 if (alt != tp1->whoTo) { 3735 /* yes, there is an alternate. */ 3736 sctp_free_remote_addr(tp1->whoTo); 3737 /* sa_ignore FREED_MEMORY */ 3738 tp1->whoTo = alt; 3739 atomic_add_int(&alt->ref_count, 1); 3740 } 3741 } 3742 } 3743 } 3744 3745 struct sctp_tmit_chunk * 3746 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3747 struct sctp_association *asoc) 3748 { 3749 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3750 struct timeval now; 3751 int now_filled = 0; 3752 3753 if (asoc->prsctp_supported == 0) { 3754 return (NULL); 3755 } 3756 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3757 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3758 tp1->sent != SCTP_DATAGRAM_RESEND && 3759 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3760 /* no chance to advance, out of here */ 3761 break; 3762 } 3763 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3764 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3765 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3766 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3767 asoc->advanced_peer_ack_point, 3768 tp1->rec.data.tsn, 0, 0); 3769 } 3770 } 3771 if (!PR_SCTP_ENABLED(tp1->flags)) { 3772 /* 3773 * We can't fwd-tsn past any that are reliable aka 3774 * retransmitted until the asoc fails. 3775 */ 3776 break; 3777 } 3778 if (!now_filled) { 3779 (void)SCTP_GETTIME_TIMEVAL(&now); 3780 now_filled = 1; 3781 } 3782 /* 3783 * now we got a chunk which is marked for another 3784 * retransmission to a PR-stream but has run out its chances 3785 * already maybe OR has been marked to skip now. Can we skip 3786 * it if its a resend? 3787 */ 3788 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3789 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3790 /* 3791 * Now is this one marked for resend and its time is 3792 * now up? 3793 */ 3794 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3795 /* Yes so drop it */ 3796 if (tp1->data) { 3797 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3798 1, SCTP_SO_NOT_LOCKED); 3799 } 3800 } else { 3801 /* 3802 * No, we are done when hit one for resend 3803 * whos time as not expired. 3804 */ 3805 break; 3806 } 3807 } 3808 /* 3809 * Ok now if this chunk is marked to drop it we can clean up 3810 * the chunk, advance our peer ack point and we can check 3811 * the next chunk. 3812 */ 3813 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3814 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3815 /* advance PeerAckPoint goes forward */ 3816 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3817 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3818 a_adv = tp1; 3819 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3820 /* No update but we do save the chk */ 3821 a_adv = tp1; 3822 } 3823 } else { 3824 /* 3825 * If it is still in RESEND we can advance no 3826 * further 3827 */ 3828 break; 3829 } 3830 } 3831 return (a_adv); 3832 } 3833 3834 static int 3835 sctp_fs_audit(struct sctp_association *asoc) 3836 { 3837 struct sctp_tmit_chunk *chk; 3838 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3839 int ret; 3840 #ifndef INVARIANTS 3841 int entry_flight, entry_cnt; 3842 #endif 3843 3844 ret = 0; 3845 #ifndef INVARIANTS 3846 entry_flight = asoc->total_flight; 3847 entry_cnt = asoc->total_flight_count; 3848 #endif 3849 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3850 return (0); 3851 3852 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3853 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3854 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3855 chk->rec.data.tsn, 3856 chk->send_size, 3857 chk->snd_count); 3858 inflight++; 3859 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3860 resend++; 3861 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3862 inbetween++; 3863 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3864 above++; 3865 } else { 3866 acked++; 3867 } 3868 } 3869 3870 if ((inflight > 0) || (inbetween > 0)) { 3871 #ifdef INVARIANTS 3872 panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d", 3873 inflight, inbetween, resend, above, acked); 3874 #else 3875 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3876 entry_flight, entry_cnt); 3877 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3878 inflight, inbetween, resend, above, acked); 3879 ret = 1; 3880 #endif 3881 } 3882 return (ret); 3883 } 3884 3885 static void 3886 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3887 struct sctp_association *asoc, 3888 struct sctp_tmit_chunk *tp1) 3889 { 3890 tp1->window_probe = 0; 3891 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3892 /* TSN's skipped we do NOT move back. */ 3893 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3894 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3895 tp1->book_size, 3896 (uint32_t)(uintptr_t)tp1->whoTo, 3897 tp1->rec.data.tsn); 3898 return; 3899 } 3900 /* First setup this by shrinking flight */ 3901 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3902 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3903 tp1); 3904 } 3905 sctp_flight_size_decrease(tp1); 3906 sctp_total_flight_decrease(stcb, tp1); 3907 /* Now mark for resend */ 3908 tp1->sent = SCTP_DATAGRAM_RESEND; 3909 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3910 3911 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3912 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3913 tp1->whoTo->flight_size, 3914 tp1->book_size, 3915 (uint32_t)(uintptr_t)tp1->whoTo, 3916 tp1->rec.data.tsn); 3917 } 3918 } 3919 3920 void 3921 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3922 uint32_t rwnd, int *abort_now, int ecne_seen) 3923 { 3924 struct sctp_nets *net; 3925 struct sctp_association *asoc; 3926 struct sctp_tmit_chunk *tp1, *tp2; 3927 uint32_t old_rwnd; 3928 int win_probe_recovery = 0; 3929 int win_probe_recovered = 0; 3930 int j, done_once = 0; 3931 int rto_ok = 1; 3932 uint32_t send_s; 3933 3934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3935 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3936 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3937 } 3938 SCTP_TCB_LOCK_ASSERT(stcb); 3939 #ifdef SCTP_ASOCLOG_OF_TSNS 3940 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3941 stcb->asoc.cumack_log_at++; 3942 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3943 stcb->asoc.cumack_log_at = 0; 3944 } 3945 #endif 3946 asoc = &stcb->asoc; 3947 old_rwnd = asoc->peers_rwnd; 3948 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3949 /* old ack */ 3950 return; 3951 } else if (asoc->last_acked_seq == cumack) { 3952 /* Window update sack */ 3953 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3954 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3955 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3956 /* SWS sender side engages */ 3957 asoc->peers_rwnd = 0; 3958 } 3959 if (asoc->peers_rwnd > old_rwnd) { 3960 goto again; 3961 } 3962 return; 3963 } 3964 3965 /* First setup for CC stuff */ 3966 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3967 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3968 /* Drag along the window_tsn for cwr's */ 3969 net->cwr_window_tsn = cumack; 3970 } 3971 net->prev_cwnd = net->cwnd; 3972 net->net_ack = 0; 3973 net->net_ack2 = 0; 3974 3975 /* 3976 * CMT: Reset CUC and Fast recovery algo variables before 3977 * SACK processing 3978 */ 3979 net->new_pseudo_cumack = 0; 3980 net->will_exit_fast_recovery = 0; 3981 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3982 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3983 } 3984 } 3985 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3986 tp1 = TAILQ_LAST(&asoc->sent_queue, 3987 sctpchunk_listhead); 3988 send_s = tp1->rec.data.tsn + 1; 3989 } else { 3990 send_s = asoc->sending_seq; 3991 } 3992 if (SCTP_TSN_GE(cumack, send_s)) { 3993 struct mbuf *op_err; 3994 char msg[SCTP_DIAG_INFO_LEN]; 3995 3996 *abort_now = 1; 3997 /* XXX */ 3998 SCTP_SNPRINTF(msg, sizeof(msg), 3999 "Cum ack %8.8x greater or equal than TSN %8.8x", 4000 cumack, send_s); 4001 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4002 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4003 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4004 return; 4005 } 4006 asoc->this_sack_highest_gap = cumack; 4007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4008 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4009 stcb->asoc.overall_error_count, 4010 0, 4011 SCTP_FROM_SCTP_INDATA, 4012 __LINE__); 4013 } 4014 stcb->asoc.overall_error_count = 0; 4015 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4016 /* process the new consecutive TSN first */ 4017 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4018 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4019 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4020 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4021 } 4022 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4023 /* 4024 * If it is less than ACKED, it is 4025 * now no-longer in flight. Higher 4026 * values may occur during marking 4027 */ 4028 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4029 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4030 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4031 tp1->whoTo->flight_size, 4032 tp1->book_size, 4033 (uint32_t)(uintptr_t)tp1->whoTo, 4034 tp1->rec.data.tsn); 4035 } 4036 sctp_flight_size_decrease(tp1); 4037 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4038 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4039 tp1); 4040 } 4041 /* sa_ignore NO_NULL_CHK */ 4042 sctp_total_flight_decrease(stcb, tp1); 4043 } 4044 tp1->whoTo->net_ack += tp1->send_size; 4045 if (tp1->snd_count < 2) { 4046 /* 4047 * True non-retransmitted 4048 * chunk 4049 */ 4050 tp1->whoTo->net_ack2 += 4051 tp1->send_size; 4052 4053 /* update RTO too? */ 4054 if (tp1->do_rtt) { 4055 if (rto_ok && 4056 sctp_calculate_rto(stcb, 4057 &stcb->asoc, 4058 tp1->whoTo, 4059 &tp1->sent_rcv_time, 4060 SCTP_RTT_FROM_DATA)) { 4061 rto_ok = 0; 4062 } 4063 if (tp1->whoTo->rto_needed == 0) { 4064 tp1->whoTo->rto_needed = 1; 4065 } 4066 tp1->do_rtt = 0; 4067 } 4068 } 4069 /* 4070 * CMT: CUCv2 algorithm. From the 4071 * cumack'd TSNs, for each TSN being 4072 * acked for the first time, set the 4073 * following variables for the 4074 * corresp destination. 4075 * new_pseudo_cumack will trigger a 4076 * cwnd update. 4077 * find_(rtx_)pseudo_cumack will 4078 * trigger search for the next 4079 * expected (rtx-)pseudo-cumack. 4080 */ 4081 tp1->whoTo->new_pseudo_cumack = 1; 4082 tp1->whoTo->find_pseudo_cumack = 1; 4083 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4085 /* sa_ignore NO_NULL_CHK */ 4086 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4087 } 4088 } 4089 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4090 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4091 } 4092 if (tp1->rec.data.chunk_was_revoked) { 4093 /* deflate the cwnd */ 4094 tp1->whoTo->cwnd -= tp1->book_size; 4095 tp1->rec.data.chunk_was_revoked = 0; 4096 } 4097 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4098 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4099 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4100 #ifdef INVARIANTS 4101 } else { 4102 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4103 #endif 4104 } 4105 } 4106 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4107 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4108 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4109 asoc->trigger_reset = 1; 4110 } 4111 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4112 if (tp1->data) { 4113 /* sa_ignore NO_NULL_CHK */ 4114 sctp_free_bufspace(stcb, asoc, tp1, 1); 4115 sctp_m_freem(tp1->data); 4116 tp1->data = NULL; 4117 } 4118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4119 sctp_log_sack(asoc->last_acked_seq, 4120 cumack, 4121 tp1->rec.data.tsn, 4122 0, 4123 0, 4124 SCTP_LOG_FREE_SENT); 4125 } 4126 asoc->sent_queue_cnt--; 4127 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4128 } else { 4129 break; 4130 } 4131 } 4132 } 4133 /* sa_ignore NO_NULL_CHK */ 4134 if (stcb->sctp_socket) { 4135 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4137 /* sa_ignore NO_NULL_CHK */ 4138 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4139 } 4140 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4141 } else { 4142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4143 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4144 } 4145 } 4146 4147 /* JRS - Use the congestion control given in the CC module */ 4148 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4149 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4150 if (net->net_ack2 > 0) { 4151 /* 4152 * Karn's rule applies to clearing error 4153 * count, this is optional. 4154 */ 4155 net->error_count = 0; 4156 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 4157 /* addr came good */ 4158 net->dest_state |= SCTP_ADDR_REACHABLE; 4159 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4160 0, (void *)net, SCTP_SO_NOT_LOCKED); 4161 } 4162 if (net == stcb->asoc.primary_destination) { 4163 if (stcb->asoc.alternate) { 4164 /* 4165 * release the alternate, 4166 * primary is good 4167 */ 4168 sctp_free_remote_addr(stcb->asoc.alternate); 4169 stcb->asoc.alternate = NULL; 4170 } 4171 } 4172 if (net->dest_state & SCTP_ADDR_PF) { 4173 net->dest_state &= ~SCTP_ADDR_PF; 4174 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4175 stcb->sctp_ep, stcb, net, 4176 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4177 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4178 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4179 /* Done with this net */ 4180 net->net_ack = 0; 4181 } 4182 /* restore any doubled timers */ 4183 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4184 if (net->RTO < stcb->asoc.minrto) { 4185 net->RTO = stcb->asoc.minrto; 4186 } 4187 if (net->RTO > stcb->asoc.maxrto) { 4188 net->RTO = stcb->asoc.maxrto; 4189 } 4190 } 4191 } 4192 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4193 } 4194 asoc->last_acked_seq = cumack; 4195 4196 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4197 /* nothing left in-flight */ 4198 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4199 net->flight_size = 0; 4200 net->partial_bytes_acked = 0; 4201 } 4202 asoc->total_flight = 0; 4203 asoc->total_flight_count = 0; 4204 } 4205 4206 /* RWND update */ 4207 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4208 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4209 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4210 /* SWS sender side engages */ 4211 asoc->peers_rwnd = 0; 4212 } 4213 if (asoc->peers_rwnd > old_rwnd) { 4214 win_probe_recovery = 1; 4215 } 4216 /* Now assure a timer where data is queued at */ 4217 again: 4218 j = 0; 4219 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4220 if (win_probe_recovery && (net->window_probe)) { 4221 win_probe_recovered = 1; 4222 /* 4223 * Find first chunk that was used with window probe 4224 * and clear the sent 4225 */ 4226 /* sa_ignore FREED_MEMORY */ 4227 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4228 if (tp1->window_probe) { 4229 /* move back to data send queue */ 4230 sctp_window_probe_recovery(stcb, asoc, tp1); 4231 break; 4232 } 4233 } 4234 } 4235 if (net->flight_size) { 4236 j++; 4237 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4238 if (net->window_probe) { 4239 net->window_probe = 0; 4240 } 4241 } else { 4242 if (net->window_probe) { 4243 /* 4244 * In window probes we must assure a timer 4245 * is still running there 4246 */ 4247 net->window_probe = 0; 4248 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4249 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4250 } 4251 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4252 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4253 stcb, net, 4254 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4255 } 4256 } 4257 } 4258 if ((j == 0) && 4259 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4260 (asoc->sent_queue_retran_cnt == 0) && 4261 (win_probe_recovered == 0) && 4262 (done_once == 0)) { 4263 /* 4264 * huh, this should not happen unless all packets are 4265 * PR-SCTP and marked to skip of course. 4266 */ 4267 if (sctp_fs_audit(asoc)) { 4268 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4269 net->flight_size = 0; 4270 } 4271 asoc->total_flight = 0; 4272 asoc->total_flight_count = 0; 4273 asoc->sent_queue_retran_cnt = 0; 4274 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4275 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4276 sctp_flight_size_increase(tp1); 4277 sctp_total_flight_increase(stcb, tp1); 4278 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4279 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4280 } 4281 } 4282 } 4283 done_once = 1; 4284 goto again; 4285 } 4286 /**********************************/ 4287 /* Now what about shutdown issues */ 4288 /**********************************/ 4289 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4290 /* nothing left on sendqueue.. consider done */ 4291 /* clean up */ 4292 if ((asoc->stream_queue_cnt == 1) && 4293 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4294 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4295 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4296 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4297 } 4298 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4299 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4300 (asoc->stream_queue_cnt == 1) && 4301 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4302 struct mbuf *op_err; 4303 4304 *abort_now = 1; 4305 /* XXX */ 4306 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4307 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28; 4308 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4309 return; 4310 } 4311 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4312 (asoc->stream_queue_cnt == 0)) { 4313 struct sctp_nets *netp; 4314 4315 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4316 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4317 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4318 } 4319 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4320 sctp_stop_timers_for_shutdown(stcb); 4321 if (asoc->alternate) { 4322 netp = asoc->alternate; 4323 } else { 4324 netp = asoc->primary_destination; 4325 } 4326 sctp_send_shutdown(stcb, netp); 4327 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4328 stcb->sctp_ep, stcb, netp); 4329 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4330 stcb->sctp_ep, stcb, NULL); 4331 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4332 (asoc->stream_queue_cnt == 0)) { 4333 struct sctp_nets *netp; 4334 4335 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4336 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 4337 sctp_stop_timers_for_shutdown(stcb); 4338 if (asoc->alternate) { 4339 netp = asoc->alternate; 4340 } else { 4341 netp = asoc->primary_destination; 4342 } 4343 sctp_send_shutdown_ack(stcb, netp); 4344 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4345 stcb->sctp_ep, stcb, netp); 4346 } 4347 } 4348 /*********************************************/ 4349 /* Here we perform PR-SCTP procedures */ 4350 /* (section 4.2) */ 4351 /*********************************************/ 4352 /* C1. update advancedPeerAckPoint */ 4353 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4354 asoc->advanced_peer_ack_point = cumack; 4355 } 4356 /* PR-Sctp issues need to be addressed too */ 4357 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4358 struct sctp_tmit_chunk *lchk; 4359 uint32_t old_adv_peer_ack_point; 4360 4361 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4362 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4363 /* C3. See if we need to send a Fwd-TSN */ 4364 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4365 /* 4366 * ISSUE with ECN, see FWD-TSN processing. 4367 */ 4368 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4369 send_forward_tsn(stcb, asoc); 4370 } else if (lchk) { 4371 /* try to FR fwd-tsn's that get lost too */ 4372 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4373 send_forward_tsn(stcb, asoc); 4374 } 4375 } 4376 } 4377 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 4378 if (lchk->whoTo != NULL) { 4379 break; 4380 } 4381 } 4382 if (lchk != NULL) { 4383 /* Assure a timer is up */ 4384 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4385 stcb->sctp_ep, stcb, lchk->whoTo); 4386 } 4387 } 4388 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4389 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4390 rwnd, 4391 stcb->asoc.peers_rwnd, 4392 stcb->asoc.total_flight, 4393 stcb->asoc.total_output_queue_size); 4394 } 4395 } 4396 4397 void 4398 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4399 struct sctp_tcb *stcb, 4400 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4401 int *abort_now, uint8_t flags, 4402 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4403 { 4404 struct sctp_association *asoc; 4405 struct sctp_tmit_chunk *tp1, *tp2; 4406 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4407 uint16_t wake_him = 0; 4408 uint32_t send_s = 0; 4409 long j; 4410 int accum_moved = 0; 4411 int will_exit_fast_recovery = 0; 4412 uint32_t a_rwnd, old_rwnd; 4413 int win_probe_recovery = 0; 4414 int win_probe_recovered = 0; 4415 struct sctp_nets *net = NULL; 4416 int done_once; 4417 int rto_ok = 1; 4418 uint8_t reneged_all = 0; 4419 uint8_t cmt_dac_flag; 4420 4421 /* 4422 * we take any chance we can to service our queues since we cannot 4423 * get awoken when the socket is read from :< 4424 */ 4425 /* 4426 * Now perform the actual SACK handling: 1) Verify that it is not an 4427 * old sack, if so discard. 2) If there is nothing left in the send 4428 * queue (cum-ack is equal to last acked) then you have a duplicate 4429 * too, update any rwnd change and verify no timers are running. 4430 * then return. 3) Process any new consecutive data i.e. cum-ack 4431 * moved process these first and note that it moved. 4) Process any 4432 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4433 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4434 * sync up flightsizes and things, stop all timers and also check 4435 * for shutdown_pending state. If so then go ahead and send off the 4436 * shutdown. If in shutdown recv, send off the shutdown-ack and 4437 * start that timer, Ret. 9) Strike any non-acked things and do FR 4438 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4439 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4440 * if in shutdown_recv state. 4441 */ 4442 SCTP_TCB_LOCK_ASSERT(stcb); 4443 /* CMT DAC algo */ 4444 this_sack_lowest_newack = 0; 4445 SCTP_STAT_INCR(sctps_slowpath_sack); 4446 last_tsn = cum_ack; 4447 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4448 #ifdef SCTP_ASOCLOG_OF_TSNS 4449 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4450 stcb->asoc.cumack_log_at++; 4451 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4452 stcb->asoc.cumack_log_at = 0; 4453 } 4454 #endif 4455 a_rwnd = rwnd; 4456 4457 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4458 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4459 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4460 } 4461 4462 old_rwnd = stcb->asoc.peers_rwnd; 4463 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4464 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4465 stcb->asoc.overall_error_count, 4466 0, 4467 SCTP_FROM_SCTP_INDATA, 4468 __LINE__); 4469 } 4470 stcb->asoc.overall_error_count = 0; 4471 asoc = &stcb->asoc; 4472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4473 sctp_log_sack(asoc->last_acked_seq, 4474 cum_ack, 4475 0, 4476 num_seg, 4477 num_dup, 4478 SCTP_LOG_NEW_SACK); 4479 } 4480 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4481 uint16_t i; 4482 uint32_t *dupdata, dblock; 4483 4484 for (i = 0; i < num_dup; i++) { 4485 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4486 sizeof(uint32_t), (uint8_t *)&dblock); 4487 if (dupdata == NULL) { 4488 break; 4489 } 4490 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4491 } 4492 } 4493 /* reality check */ 4494 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4495 tp1 = TAILQ_LAST(&asoc->sent_queue, 4496 sctpchunk_listhead); 4497 send_s = tp1->rec.data.tsn + 1; 4498 } else { 4499 tp1 = NULL; 4500 send_s = asoc->sending_seq; 4501 } 4502 if (SCTP_TSN_GE(cum_ack, send_s)) { 4503 struct mbuf *op_err; 4504 char msg[SCTP_DIAG_INFO_LEN]; 4505 4506 /* 4507 * no way, we have not even sent this TSN out yet. Peer is 4508 * hopelessly messed up with us. 4509 */ 4510 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4511 cum_ack, send_s); 4512 if (tp1) { 4513 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4514 tp1->rec.data.tsn, (void *)tp1); 4515 } 4516 hopeless_peer: 4517 *abort_now = 1; 4518 /* XXX */ 4519 SCTP_SNPRINTF(msg, sizeof(msg), 4520 "Cum ack %8.8x greater or equal than TSN %8.8x", 4521 cum_ack, send_s); 4522 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4523 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29; 4524 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4525 return; 4526 } 4527 /**********************/ 4528 /* 1) check the range */ 4529 /**********************/ 4530 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4531 /* acking something behind */ 4532 return; 4533 } 4534 4535 /* update the Rwnd of the peer */ 4536 if (TAILQ_EMPTY(&asoc->sent_queue) && 4537 TAILQ_EMPTY(&asoc->send_queue) && 4538 (asoc->stream_queue_cnt == 0)) { 4539 /* nothing left on send/sent and strmq */ 4540 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4541 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4542 asoc->peers_rwnd, 0, 0, a_rwnd); 4543 } 4544 asoc->peers_rwnd = a_rwnd; 4545 if (asoc->sent_queue_retran_cnt) { 4546 asoc->sent_queue_retran_cnt = 0; 4547 } 4548 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4549 /* SWS sender side engages */ 4550 asoc->peers_rwnd = 0; 4551 } 4552 /* stop any timers */ 4553 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4554 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4555 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4556 net->partial_bytes_acked = 0; 4557 net->flight_size = 0; 4558 } 4559 asoc->total_flight = 0; 4560 asoc->total_flight_count = 0; 4561 return; 4562 } 4563 /* 4564 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4565 * things. The total byte count acked is tracked in netAckSz AND 4566 * netAck2 is used to track the total bytes acked that are un- 4567 * ambiguous and were never retransmitted. We track these on a per 4568 * destination address basis. 4569 */ 4570 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4571 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4572 /* Drag along the window_tsn for cwr's */ 4573 net->cwr_window_tsn = cum_ack; 4574 } 4575 net->prev_cwnd = net->cwnd; 4576 net->net_ack = 0; 4577 net->net_ack2 = 0; 4578 4579 /* 4580 * CMT: Reset CUC and Fast recovery algo variables before 4581 * SACK processing 4582 */ 4583 net->new_pseudo_cumack = 0; 4584 net->will_exit_fast_recovery = 0; 4585 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4586 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4587 } 4588 4589 /* 4590 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4591 * to be greater than the cumack. Also reset saw_newack to 0 4592 * for all dests. 4593 */ 4594 net->saw_newack = 0; 4595 net->this_sack_highest_newack = last_tsn; 4596 } 4597 /* process the new consecutive TSN first */ 4598 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4599 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4600 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4601 accum_moved = 1; 4602 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4603 /* 4604 * If it is less than ACKED, it is 4605 * now no-longer in flight. Higher 4606 * values may occur during marking 4607 */ 4608 if ((tp1->whoTo->dest_state & 4609 SCTP_ADDR_UNCONFIRMED) && 4610 (tp1->snd_count < 2)) { 4611 /* 4612 * If there was no retran 4613 * and the address is 4614 * un-confirmed and we sent 4615 * there and are now 4616 * sacked.. its confirmed, 4617 * mark it so. 4618 */ 4619 tp1->whoTo->dest_state &= 4620 ~SCTP_ADDR_UNCONFIRMED; 4621 } 4622 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4623 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4624 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4625 tp1->whoTo->flight_size, 4626 tp1->book_size, 4627 (uint32_t)(uintptr_t)tp1->whoTo, 4628 tp1->rec.data.tsn); 4629 } 4630 sctp_flight_size_decrease(tp1); 4631 sctp_total_flight_decrease(stcb, tp1); 4632 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4633 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4634 tp1); 4635 } 4636 } 4637 tp1->whoTo->net_ack += tp1->send_size; 4638 4639 /* CMT SFR and DAC algos */ 4640 this_sack_lowest_newack = tp1->rec.data.tsn; 4641 tp1->whoTo->saw_newack = 1; 4642 4643 if (tp1->snd_count < 2) { 4644 /* 4645 * True non-retransmitted 4646 * chunk 4647 */ 4648 tp1->whoTo->net_ack2 += 4649 tp1->send_size; 4650 4651 /* update RTO too? */ 4652 if (tp1->do_rtt) { 4653 if (rto_ok && 4654 sctp_calculate_rto(stcb, 4655 &stcb->asoc, 4656 tp1->whoTo, 4657 &tp1->sent_rcv_time, 4658 SCTP_RTT_FROM_DATA)) { 4659 rto_ok = 0; 4660 } 4661 if (tp1->whoTo->rto_needed == 0) { 4662 tp1->whoTo->rto_needed = 1; 4663 } 4664 tp1->do_rtt = 0; 4665 } 4666 } 4667 /* 4668 * CMT: CUCv2 algorithm. From the 4669 * cumack'd TSNs, for each TSN being 4670 * acked for the first time, set the 4671 * following variables for the 4672 * corresp destination. 4673 * new_pseudo_cumack will trigger a 4674 * cwnd update. 4675 * find_(rtx_)pseudo_cumack will 4676 * trigger search for the next 4677 * expected (rtx-)pseudo-cumack. 4678 */ 4679 tp1->whoTo->new_pseudo_cumack = 1; 4680 tp1->whoTo->find_pseudo_cumack = 1; 4681 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4682 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4683 sctp_log_sack(asoc->last_acked_seq, 4684 cum_ack, 4685 tp1->rec.data.tsn, 4686 0, 4687 0, 4688 SCTP_LOG_TSN_ACKED); 4689 } 4690 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4691 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4692 } 4693 } 4694 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4695 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4696 #ifdef SCTP_AUDITING_ENABLED 4697 sctp_audit_log(0xB3, 4698 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4699 #endif 4700 } 4701 if (tp1->rec.data.chunk_was_revoked) { 4702 /* deflate the cwnd */ 4703 tp1->whoTo->cwnd -= tp1->book_size; 4704 tp1->rec.data.chunk_was_revoked = 0; 4705 } 4706 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4707 tp1->sent = SCTP_DATAGRAM_ACKED; 4708 } 4709 } 4710 } else { 4711 break; 4712 } 4713 } 4714 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4715 /* always set this up to cum-ack */ 4716 asoc->this_sack_highest_gap = last_tsn; 4717 4718 if ((num_seg > 0) || (num_nr_seg > 0)) { 4719 /* 4720 * thisSackHighestGap will increase while handling NEW 4721 * segments this_sack_highest_newack will increase while 4722 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4723 * used for CMT DAC algo. saw_newack will also change. 4724 */ 4725 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4726 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4727 num_seg, num_nr_seg, &rto_ok)) { 4728 wake_him++; 4729 } 4730 /* 4731 * validate the biggest_tsn_acked in the gap acks if strict 4732 * adherence is wanted. 4733 */ 4734 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4735 /* 4736 * peer is either confused or we are under attack. 4737 * We must abort. 4738 */ 4739 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4740 biggest_tsn_acked, send_s); 4741 goto hopeless_peer; 4742 } 4743 } 4744 /*******************************************/ 4745 /* cancel ALL T3-send timer if accum moved */ 4746 /*******************************************/ 4747 if (asoc->sctp_cmt_on_off > 0) { 4748 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4749 if (net->new_pseudo_cumack) 4750 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4751 stcb, net, 4752 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4753 } 4754 } else { 4755 if (accum_moved) { 4756 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4757 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4758 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 4759 } 4760 } 4761 } 4762 /********************************************/ 4763 /* drop the acked chunks from the sentqueue */ 4764 /********************************************/ 4765 asoc->last_acked_seq = cum_ack; 4766 4767 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4768 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4769 break; 4770 } 4771 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4772 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4773 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4774 #ifdef INVARIANTS 4775 } else { 4776 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4777 #endif 4778 } 4779 } 4780 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4781 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4782 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4783 asoc->trigger_reset = 1; 4784 } 4785 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4786 if (PR_SCTP_ENABLED(tp1->flags)) { 4787 if (asoc->pr_sctp_cnt != 0) 4788 asoc->pr_sctp_cnt--; 4789 } 4790 asoc->sent_queue_cnt--; 4791 if (tp1->data) { 4792 /* sa_ignore NO_NULL_CHK */ 4793 sctp_free_bufspace(stcb, asoc, tp1, 1); 4794 sctp_m_freem(tp1->data); 4795 tp1->data = NULL; 4796 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4797 asoc->sent_queue_cnt_removeable--; 4798 } 4799 } 4800 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4801 sctp_log_sack(asoc->last_acked_seq, 4802 cum_ack, 4803 tp1->rec.data.tsn, 4804 0, 4805 0, 4806 SCTP_LOG_FREE_SENT); 4807 } 4808 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4809 wake_him++; 4810 } 4811 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4812 #ifdef INVARIANTS 4813 panic("Warning flight size is positive and should be 0"); 4814 #else 4815 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4816 asoc->total_flight); 4817 #endif 4818 asoc->total_flight = 0; 4819 } 4820 4821 /* sa_ignore NO_NULL_CHK */ 4822 if ((wake_him) && (stcb->sctp_socket)) { 4823 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4824 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4825 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4826 } 4827 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4828 } else { 4829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4830 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4831 } 4832 } 4833 4834 if (asoc->fast_retran_loss_recovery && accum_moved) { 4835 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4836 /* Setup so we will exit RFC2582 fast recovery */ 4837 will_exit_fast_recovery = 1; 4838 } 4839 } 4840 /* 4841 * Check for revoked fragments: 4842 * 4843 * if Previous sack - Had no frags then we can't have any revoked if 4844 * Previous sack - Had frag's then - If we now have frags aka 4845 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4846 * some of them. else - The peer revoked all ACKED fragments, since 4847 * we had some before and now we have NONE. 4848 */ 4849 4850 if (num_seg) { 4851 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4852 asoc->saw_sack_with_frags = 1; 4853 } else if (asoc->saw_sack_with_frags) { 4854 int cnt_revoked = 0; 4855 4856 /* Peer revoked all dg's marked or acked */ 4857 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4858 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4859 tp1->sent = SCTP_DATAGRAM_SENT; 4860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4861 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4862 tp1->whoTo->flight_size, 4863 tp1->book_size, 4864 (uint32_t)(uintptr_t)tp1->whoTo, 4865 tp1->rec.data.tsn); 4866 } 4867 sctp_flight_size_increase(tp1); 4868 sctp_total_flight_increase(stcb, tp1); 4869 tp1->rec.data.chunk_was_revoked = 1; 4870 /* 4871 * To ensure that this increase in 4872 * flightsize, which is artificial, does not 4873 * throttle the sender, we also increase the 4874 * cwnd artificially. 4875 */ 4876 tp1->whoTo->cwnd += tp1->book_size; 4877 cnt_revoked++; 4878 } 4879 } 4880 if (cnt_revoked) { 4881 reneged_all = 1; 4882 } 4883 asoc->saw_sack_with_frags = 0; 4884 } 4885 if (num_nr_seg > 0) 4886 asoc->saw_sack_with_nr_frags = 1; 4887 else 4888 asoc->saw_sack_with_nr_frags = 0; 4889 4890 /* JRS - Use the congestion control given in the CC module */ 4891 if (ecne_seen == 0) { 4892 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4893 if (net->net_ack2 > 0) { 4894 /* 4895 * Karn's rule applies to clearing error 4896 * count, this is optional. 4897 */ 4898 net->error_count = 0; 4899 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 4900 /* addr came good */ 4901 net->dest_state |= SCTP_ADDR_REACHABLE; 4902 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4903 0, (void *)net, SCTP_SO_NOT_LOCKED); 4904 } 4905 4906 if (net == stcb->asoc.primary_destination) { 4907 if (stcb->asoc.alternate) { 4908 /* 4909 * release the alternate, 4910 * primary is good 4911 */ 4912 sctp_free_remote_addr(stcb->asoc.alternate); 4913 stcb->asoc.alternate = NULL; 4914 } 4915 } 4916 4917 if (net->dest_state & SCTP_ADDR_PF) { 4918 net->dest_state &= ~SCTP_ADDR_PF; 4919 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4920 stcb->sctp_ep, stcb, net, 4921 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 4922 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4923 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4924 /* Done with this net */ 4925 net->net_ack = 0; 4926 } 4927 /* restore any doubled timers */ 4928 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4929 if (net->RTO < stcb->asoc.minrto) { 4930 net->RTO = stcb->asoc.minrto; 4931 } 4932 if (net->RTO > stcb->asoc.maxrto) { 4933 net->RTO = stcb->asoc.maxrto; 4934 } 4935 } 4936 } 4937 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4938 } 4939 4940 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4941 /* nothing left in-flight */ 4942 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4943 /* stop all timers */ 4944 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4945 stcb, net, 4946 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34); 4947 net->flight_size = 0; 4948 net->partial_bytes_acked = 0; 4949 } 4950 asoc->total_flight = 0; 4951 asoc->total_flight_count = 0; 4952 } 4953 4954 /**********************************/ 4955 /* Now what about shutdown issues */ 4956 /**********************************/ 4957 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4958 /* nothing left on sendqueue.. consider done */ 4959 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4960 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4961 asoc->peers_rwnd, 0, 0, a_rwnd); 4962 } 4963 asoc->peers_rwnd = a_rwnd; 4964 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4965 /* SWS sender side engages */ 4966 asoc->peers_rwnd = 0; 4967 } 4968 /* clean up */ 4969 if ((asoc->stream_queue_cnt == 1) && 4970 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4971 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4972 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4973 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4974 } 4975 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4976 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4977 (asoc->stream_queue_cnt == 1) && 4978 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4979 struct mbuf *op_err; 4980 4981 *abort_now = 1; 4982 /* XXX */ 4983 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4984 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35; 4985 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4986 return; 4987 } 4988 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4989 (asoc->stream_queue_cnt == 0)) { 4990 struct sctp_nets *netp; 4991 4992 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4993 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4994 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4995 } 4996 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4997 sctp_stop_timers_for_shutdown(stcb); 4998 if (asoc->alternate) { 4999 netp = asoc->alternate; 5000 } else { 5001 netp = asoc->primary_destination; 5002 } 5003 sctp_send_shutdown(stcb, netp); 5004 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5005 stcb->sctp_ep, stcb, netp); 5006 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5007 stcb->sctp_ep, stcb, NULL); 5008 return; 5009 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5010 (asoc->stream_queue_cnt == 0)) { 5011 struct sctp_nets *netp; 5012 5013 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5014 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 5015 sctp_stop_timers_for_shutdown(stcb); 5016 if (asoc->alternate) { 5017 netp = asoc->alternate; 5018 } else { 5019 netp = asoc->primary_destination; 5020 } 5021 sctp_send_shutdown_ack(stcb, netp); 5022 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5023 stcb->sctp_ep, stcb, netp); 5024 return; 5025 } 5026 } 5027 /* 5028 * Now here we are going to recycle net_ack for a different use... 5029 * HEADS UP. 5030 */ 5031 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5032 net->net_ack = 0; 5033 } 5034 5035 /* 5036 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5037 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5038 * automatically ensure that. 5039 */ 5040 if ((asoc->sctp_cmt_on_off > 0) && 5041 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5042 (cmt_dac_flag == 0)) { 5043 this_sack_lowest_newack = cum_ack; 5044 } 5045 if ((num_seg > 0) || (num_nr_seg > 0)) { 5046 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5047 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5048 } 5049 /* JRS - Use the congestion control given in the CC module */ 5050 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5051 5052 /* Now are we exiting loss recovery ? */ 5053 if (will_exit_fast_recovery) { 5054 /* Ok, we must exit fast recovery */ 5055 asoc->fast_retran_loss_recovery = 0; 5056 } 5057 if ((asoc->sat_t3_loss_recovery) && 5058 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5059 /* end satellite t3 loss recovery */ 5060 asoc->sat_t3_loss_recovery = 0; 5061 } 5062 /* 5063 * CMT Fast recovery 5064 */ 5065 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5066 if (net->will_exit_fast_recovery) { 5067 /* Ok, we must exit fast recovery */ 5068 net->fast_retran_loss_recovery = 0; 5069 } 5070 } 5071 5072 /* Adjust and set the new rwnd value */ 5073 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5074 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5075 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5076 } 5077 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5078 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5079 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5080 /* SWS sender side engages */ 5081 asoc->peers_rwnd = 0; 5082 } 5083 if (asoc->peers_rwnd > old_rwnd) { 5084 win_probe_recovery = 1; 5085 } 5086 5087 /* 5088 * Now we must setup so we have a timer up for anyone with 5089 * outstanding data. 5090 */ 5091 done_once = 0; 5092 again: 5093 j = 0; 5094 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5095 if (win_probe_recovery && (net->window_probe)) { 5096 win_probe_recovered = 1; 5097 /*- 5098 * Find first chunk that was used with 5099 * window probe and clear the event. Put 5100 * it back into the send queue as if has 5101 * not been sent. 5102 */ 5103 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5104 if (tp1->window_probe) { 5105 sctp_window_probe_recovery(stcb, asoc, tp1); 5106 break; 5107 } 5108 } 5109 } 5110 if (net->flight_size) { 5111 j++; 5112 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5113 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5114 stcb->sctp_ep, stcb, net); 5115 } 5116 if (net->window_probe) { 5117 net->window_probe = 0; 5118 } 5119 } else { 5120 if (net->window_probe) { 5121 /* 5122 * In window probes we must assure a timer 5123 * is still running there 5124 */ 5125 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5126 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5127 stcb->sctp_ep, stcb, net); 5128 } 5129 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5130 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5131 stcb, net, 5132 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36); 5133 } 5134 } 5135 } 5136 if ((j == 0) && 5137 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5138 (asoc->sent_queue_retran_cnt == 0) && 5139 (win_probe_recovered == 0) && 5140 (done_once == 0)) { 5141 /* 5142 * huh, this should not happen unless all packets are 5143 * PR-SCTP and marked to skip of course. 5144 */ 5145 if (sctp_fs_audit(asoc)) { 5146 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5147 net->flight_size = 0; 5148 } 5149 asoc->total_flight = 0; 5150 asoc->total_flight_count = 0; 5151 asoc->sent_queue_retran_cnt = 0; 5152 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5153 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5154 sctp_flight_size_increase(tp1); 5155 sctp_total_flight_increase(stcb, tp1); 5156 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5157 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5158 } 5159 } 5160 } 5161 done_once = 1; 5162 goto again; 5163 } 5164 /*********************************************/ 5165 /* Here we perform PR-SCTP procedures */ 5166 /* (section 4.2) */ 5167 /*********************************************/ 5168 /* C1. update advancedPeerAckPoint */ 5169 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5170 asoc->advanced_peer_ack_point = cum_ack; 5171 } 5172 /* C2. try to further move advancedPeerAckPoint ahead */ 5173 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5174 struct sctp_tmit_chunk *lchk; 5175 uint32_t old_adv_peer_ack_point; 5176 5177 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5178 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5179 /* C3. See if we need to send a Fwd-TSN */ 5180 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5181 /* 5182 * ISSUE with ECN, see FWD-TSN processing. 5183 */ 5184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5185 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5186 0xee, cum_ack, asoc->advanced_peer_ack_point, 5187 old_adv_peer_ack_point); 5188 } 5189 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5190 send_forward_tsn(stcb, asoc); 5191 } else if (lchk) { 5192 /* try to FR fwd-tsn's that get lost too */ 5193 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5194 send_forward_tsn(stcb, asoc); 5195 } 5196 } 5197 } 5198 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 5199 if (lchk->whoTo != NULL) { 5200 break; 5201 } 5202 } 5203 if (lchk != NULL) { 5204 /* Assure a timer is up */ 5205 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5206 stcb->sctp_ep, stcb, lchk->whoTo); 5207 } 5208 } 5209 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5210 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5211 a_rwnd, 5212 stcb->asoc.peers_rwnd, 5213 stcb->asoc.total_flight, 5214 stcb->asoc.total_output_queue_size); 5215 } 5216 } 5217 5218 void 5219 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5220 { 5221 /* Copy cum-ack */ 5222 uint32_t cum_ack, a_rwnd; 5223 5224 cum_ack = ntohl(cp->cumulative_tsn_ack); 5225 /* Arrange so a_rwnd does NOT change */ 5226 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5227 5228 /* Now call the express sack handling */ 5229 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5230 } 5231 5232 static void 5233 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5234 struct sctp_stream_in *strmin) 5235 { 5236 struct sctp_queued_to_read *control, *ncontrol; 5237 struct sctp_association *asoc; 5238 uint32_t mid; 5239 int need_reasm_check = 0; 5240 5241 asoc = &stcb->asoc; 5242 mid = strmin->last_mid_delivered; 5243 /* 5244 * First deliver anything prior to and including the stream no that 5245 * came in. 5246 */ 5247 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5248 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5249 /* this is deliverable now */ 5250 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5251 if (control->on_strm_q) { 5252 if (control->on_strm_q == SCTP_ON_ORDERED) { 5253 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5254 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5255 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5256 #ifdef INVARIANTS 5257 } else { 5258 panic("strmin: %p ctl: %p unknown %d", 5259 strmin, control, control->on_strm_q); 5260 #endif 5261 } 5262 control->on_strm_q = 0; 5263 } 5264 /* subtract pending on streams */ 5265 if (asoc->size_on_all_streams >= control->length) { 5266 asoc->size_on_all_streams -= control->length; 5267 } else { 5268 #ifdef INVARIANTS 5269 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5270 #else 5271 asoc->size_on_all_streams = 0; 5272 #endif 5273 } 5274 sctp_ucount_decr(asoc->cnt_on_all_streams); 5275 /* deliver it to at least the delivery-q */ 5276 if (stcb->sctp_socket) { 5277 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5278 sctp_add_to_readq(stcb->sctp_ep, stcb, 5279 control, 5280 &stcb->sctp_socket->so_rcv, 5281 1, SCTP_READ_LOCK_HELD, 5282 SCTP_SO_NOT_LOCKED); 5283 } 5284 } else { 5285 /* Its a fragmented message */ 5286 if (control->first_frag_seen) { 5287 /* 5288 * Make it so this is next to 5289 * deliver, we restore later 5290 */ 5291 strmin->last_mid_delivered = control->mid - 1; 5292 need_reasm_check = 1; 5293 break; 5294 } 5295 } 5296 } else { 5297 /* no more delivery now. */ 5298 break; 5299 } 5300 } 5301 if (need_reasm_check) { 5302 int ret; 5303 5304 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5305 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5306 /* Restore the next to deliver unless we are ahead */ 5307 strmin->last_mid_delivered = mid; 5308 } 5309 if (ret == 0) { 5310 /* Left the front Partial one on */ 5311 return; 5312 } 5313 need_reasm_check = 0; 5314 } 5315 /* 5316 * now we must deliver things in queue the normal way if any are 5317 * now ready. 5318 */ 5319 mid = strmin->last_mid_delivered + 1; 5320 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5321 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5322 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5323 /* this is deliverable now */ 5324 if (control->on_strm_q) { 5325 if (control->on_strm_q == SCTP_ON_ORDERED) { 5326 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5327 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5328 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5329 #ifdef INVARIANTS 5330 } else { 5331 panic("strmin: %p ctl: %p unknown %d", 5332 strmin, control, control->on_strm_q); 5333 #endif 5334 } 5335 control->on_strm_q = 0; 5336 } 5337 /* subtract pending on streams */ 5338 if (asoc->size_on_all_streams >= control->length) { 5339 asoc->size_on_all_streams -= control->length; 5340 } else { 5341 #ifdef INVARIANTS 5342 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5343 #else 5344 asoc->size_on_all_streams = 0; 5345 #endif 5346 } 5347 sctp_ucount_decr(asoc->cnt_on_all_streams); 5348 /* deliver it to at least the delivery-q */ 5349 strmin->last_mid_delivered = control->mid; 5350 if (stcb->sctp_socket) { 5351 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5352 sctp_add_to_readq(stcb->sctp_ep, stcb, 5353 control, 5354 &stcb->sctp_socket->so_rcv, 1, 5355 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5356 } 5357 mid = strmin->last_mid_delivered + 1; 5358 } else { 5359 /* Its a fragmented message */ 5360 if (control->first_frag_seen) { 5361 /* 5362 * Make it so this is next to 5363 * deliver 5364 */ 5365 strmin->last_mid_delivered = control->mid - 1; 5366 need_reasm_check = 1; 5367 break; 5368 } 5369 } 5370 } else { 5371 break; 5372 } 5373 } 5374 if (need_reasm_check) { 5375 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5376 } 5377 } 5378 5379 static void 5380 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5381 struct sctp_association *asoc, struct sctp_stream_in *strm, 5382 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn) 5383 { 5384 struct sctp_tmit_chunk *chk, *nchk; 5385 5386 /* 5387 * For now large messages held on the stream reasm that are complete 5388 * will be tossed too. We could in theory do more work to spin 5389 * through and stop after dumping one msg aka seeing the start of a 5390 * new msg at the head, and call the delivery function... to see if 5391 * it can be delivered... But for now we just dump everything on the 5392 * queue. 5393 */ 5394 if (!asoc->idata_supported && !ordered && 5395 control->first_frag_seen && 5396 SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5397 return; 5398 } 5399 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5400 /* Purge hanging chunks */ 5401 if (!asoc->idata_supported && !ordered) { 5402 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5403 break; 5404 } 5405 } 5406 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5407 if (asoc->size_on_reasm_queue >= chk->send_size) { 5408 asoc->size_on_reasm_queue -= chk->send_size; 5409 } else { 5410 #ifdef INVARIANTS 5411 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5412 #else 5413 asoc->size_on_reasm_queue = 0; 5414 #endif 5415 } 5416 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5417 if (chk->data) { 5418 sctp_m_freem(chk->data); 5419 chk->data = NULL; 5420 } 5421 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5422 } 5423 if (!TAILQ_EMPTY(&control->reasm)) { 5424 /* This has to be old data, unordered */ 5425 if (control->data) { 5426 sctp_m_freem(control->data); 5427 control->data = NULL; 5428 } 5429 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5430 chk = TAILQ_FIRST(&control->reasm); 5431 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5432 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5433 sctp_add_chk_to_control(control, strm, stcb, asoc, 5434 chk, SCTP_READ_LOCK_HELD); 5435 } 5436 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5437 return; 5438 } 5439 if (control->on_strm_q == SCTP_ON_ORDERED) { 5440 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5441 if (asoc->size_on_all_streams >= control->length) { 5442 asoc->size_on_all_streams -= control->length; 5443 } else { 5444 #ifdef INVARIANTS 5445 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5446 #else 5447 asoc->size_on_all_streams = 0; 5448 #endif 5449 } 5450 sctp_ucount_decr(asoc->cnt_on_all_streams); 5451 control->on_strm_q = 0; 5452 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5453 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5454 control->on_strm_q = 0; 5455 #ifdef INVARIANTS 5456 } else if (control->on_strm_q) { 5457 panic("strm: %p ctl: %p unknown %d", 5458 strm, control, control->on_strm_q); 5459 #endif 5460 } 5461 control->on_strm_q = 0; 5462 if (control->on_read_q == 0) { 5463 sctp_free_remote_addr(control->whoFrom); 5464 if (control->data) { 5465 sctp_m_freem(control->data); 5466 control->data = NULL; 5467 } 5468 sctp_free_a_readq(stcb, control); 5469 } 5470 } 5471 5472 void 5473 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5474 struct sctp_forward_tsn_chunk *fwd, 5475 int *abort_flag, struct mbuf *m, int offset) 5476 { 5477 /* The pr-sctp fwd tsn */ 5478 /* 5479 * here we will perform all the data receiver side steps for 5480 * processing FwdTSN, as required in by pr-sctp draft: 5481 * 5482 * Assume we get FwdTSN(x): 5483 * 5484 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5485 * + others we have 3) examine and update re-ordering queue on 5486 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5487 * report where we are. 5488 */ 5489 struct sctp_association *asoc; 5490 uint32_t new_cum_tsn, gap; 5491 unsigned int i, fwd_sz, m_size; 5492 struct sctp_stream_in *strm; 5493 struct sctp_queued_to_read *control, *ncontrol; 5494 5495 asoc = &stcb->asoc; 5496 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5497 SCTPDBG(SCTP_DEBUG_INDATA1, 5498 "Bad size too small/big fwd-tsn\n"); 5499 return; 5500 } 5501 m_size = (stcb->asoc.mapping_array_size << 3); 5502 /*************************************************************/ 5503 /* 1. Here we update local cumTSN and shift the bitmap array */ 5504 /*************************************************************/ 5505 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5506 5507 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5508 /* Already got there ... */ 5509 return; 5510 } 5511 /* 5512 * now we know the new TSN is more advanced, let's find the actual 5513 * gap 5514 */ 5515 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5516 asoc->cumulative_tsn = new_cum_tsn; 5517 if (gap >= m_size) { 5518 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5519 struct mbuf *op_err; 5520 char msg[SCTP_DIAG_INFO_LEN]; 5521 5522 /* 5523 * out of range (of single byte chunks in the rwnd I 5524 * give out). This must be an attacker. 5525 */ 5526 *abort_flag = 1; 5527 SCTP_SNPRINTF(msg, sizeof(msg), 5528 "New cum ack %8.8x too high, highest TSN %8.8x", 5529 new_cum_tsn, asoc->highest_tsn_inside_map); 5530 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5531 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37; 5532 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 5533 return; 5534 } 5535 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5536 5537 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5538 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5539 asoc->highest_tsn_inside_map = new_cum_tsn; 5540 5541 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5542 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5543 5544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5545 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5546 } 5547 } else { 5548 SCTP_TCB_LOCK_ASSERT(stcb); 5549 for (i = 0; i <= gap; i++) { 5550 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5551 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5552 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5553 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5554 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5555 } 5556 } 5557 } 5558 } 5559 /*************************************************************/ 5560 /* 2. Clear up re-assembly queue */ 5561 /*************************************************************/ 5562 5563 /* This is now done as part of clearing up the stream/seq */ 5564 if (asoc->idata_supported == 0) { 5565 uint16_t sid; 5566 5567 /* Flush all the un-ordered data based on cum-tsn */ 5568 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5569 for (sid = 0; sid < asoc->streamincnt; sid++) { 5570 strm = &asoc->strmin[sid]; 5571 if (!TAILQ_EMPTY(&strm->uno_inqueue)) { 5572 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn); 5573 } 5574 } 5575 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5576 } 5577 /*******************************************************/ 5578 /* 3. Update the PR-stream re-ordering queues and fix */ 5579 /* delivery issues as needed. */ 5580 /*******************************************************/ 5581 fwd_sz -= sizeof(*fwd); 5582 if (m && fwd_sz) { 5583 /* New method. */ 5584 unsigned int num_str; 5585 uint32_t mid; 5586 uint16_t sid; 5587 uint16_t ordered, flags; 5588 struct sctp_strseq *stseq, strseqbuf; 5589 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5590 5591 offset += sizeof(*fwd); 5592 5593 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5594 if (asoc->idata_supported) { 5595 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5596 } else { 5597 num_str = fwd_sz / sizeof(struct sctp_strseq); 5598 } 5599 for (i = 0; i < num_str; i++) { 5600 if (asoc->idata_supported) { 5601 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5602 sizeof(struct sctp_strseq_mid), 5603 (uint8_t *)&strseqbuf_m); 5604 offset += sizeof(struct sctp_strseq_mid); 5605 if (stseq_m == NULL) { 5606 break; 5607 } 5608 sid = ntohs(stseq_m->sid); 5609 mid = ntohl(stseq_m->mid); 5610 flags = ntohs(stseq_m->flags); 5611 if (flags & PR_SCTP_UNORDERED_FLAG) { 5612 ordered = 0; 5613 } else { 5614 ordered = 1; 5615 } 5616 } else { 5617 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5618 sizeof(struct sctp_strseq), 5619 (uint8_t *)&strseqbuf); 5620 offset += sizeof(struct sctp_strseq); 5621 if (stseq == NULL) { 5622 break; 5623 } 5624 sid = ntohs(stseq->sid); 5625 mid = (uint32_t)ntohs(stseq->ssn); 5626 ordered = 1; 5627 } 5628 /* Convert */ 5629 5630 /* now process */ 5631 5632 /* 5633 * Ok we now look for the stream/seq on the read 5634 * queue where its not all delivered. If we find it 5635 * we transmute the read entry into a PDI_ABORTED. 5636 */ 5637 if (sid >= asoc->streamincnt) { 5638 /* screwed up streams, stop! */ 5639 break; 5640 } 5641 if ((asoc->str_of_pdapi == sid) && 5642 (asoc->ssn_of_pdapi == mid)) { 5643 /* 5644 * If this is the one we were partially 5645 * delivering now then we no longer are. 5646 * Note this will change with the reassembly 5647 * re-write. 5648 */ 5649 asoc->fragmented_delivery_inprogress = 0; 5650 } 5651 strm = &asoc->strmin[sid]; 5652 if (ordered) { 5653 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) { 5654 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5655 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); 5656 } 5657 } 5658 } else { 5659 if (asoc->idata_supported) { 5660 TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) { 5661 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5662 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); 5663 } 5664 } 5665 } else { 5666 if (!TAILQ_EMPTY(&strm->uno_inqueue)) { 5667 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn); 5668 } 5669 } 5670 } 5671 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5672 if ((control->sinfo_stream == sid) && 5673 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5674 control->pdapi_aborted = 1; 5675 control->end_added = 1; 5676 if (control->on_strm_q == SCTP_ON_ORDERED) { 5677 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5678 if (asoc->size_on_all_streams >= control->length) { 5679 asoc->size_on_all_streams -= control->length; 5680 } else { 5681 #ifdef INVARIANTS 5682 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5683 #else 5684 asoc->size_on_all_streams = 0; 5685 #endif 5686 } 5687 sctp_ucount_decr(asoc->cnt_on_all_streams); 5688 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5689 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5690 #ifdef INVARIANTS 5691 } else if (control->on_strm_q) { 5692 panic("strm: %p ctl: %p unknown %d", 5693 strm, control, control->on_strm_q); 5694 #endif 5695 } 5696 control->on_strm_q = 0; 5697 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5698 stcb, 5699 SCTP_PARTIAL_DELIVERY_ABORTED, 5700 (void *)control, 5701 SCTP_SO_NOT_LOCKED); 5702 break; 5703 } else if ((control->sinfo_stream == sid) && 5704 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5705 /* We are past our victim SSN */ 5706 break; 5707 } 5708 } 5709 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5710 /* Update the sequence number */ 5711 strm->last_mid_delivered = mid; 5712 } 5713 /* now kick the stream the new way */ 5714 /* sa_ignore NO_NULL_CHK */ 5715 sctp_kick_prsctp_reorder_queue(stcb, strm); 5716 } 5717 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5718 } 5719 /* 5720 * Now slide thing forward. 5721 */ 5722 sctp_slide_mapping_arrays(stcb); 5723 } 5724