1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_header.h> 40 #include <netinet/sctp_pcb.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_uio.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_timer.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_bsd_addr.h> 49 #include <netinet/sctp_input.h> 50 #include <netinet/sctp_crc32.h> 51 #include <netinet/sctp_lock_bsd.h> 52 /* 53 * NOTES: On the outbound side of things I need to check the sack timer to 54 * see if I should generate a sack into the chunk queue (if I have data to 55 * send that is and will be sending it .. for bundling. 56 * 57 * The callback in sctp_usrreq.c will get called when the socket is read from. 58 * This will cause sctp_service_queues() to get called on the top entry in 59 * the list. 60 */ 61 static uint32_t 62 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 63 struct sctp_stream_in *strm, 64 struct sctp_tcb *stcb, 65 struct sctp_association *asoc, 66 struct sctp_tmit_chunk *chk, int hold_rlock); 67 68 void 69 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 70 { 71 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 72 } 73 74 /* Calculate what the rwnd would be */ 75 uint32_t 76 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 77 { 78 uint32_t calc = 0; 79 80 /* 81 * This is really set wrong with respect to a 1-2-m socket. Since 82 * the sb_cc is the count that everyone as put up. When we re-write 83 * sctp_soreceive then we will fix this so that ONLY this 84 * associations data is taken into account. 85 */ 86 if (stcb->sctp_socket == NULL) { 87 return (calc); 88 } 89 90 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 91 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 92 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 93 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 94 if (stcb->asoc.sb_cc == 0 && 95 asoc->cnt_on_reasm_queue == 0 && 96 asoc->cnt_on_all_streams == 0) { 97 /* Full rwnd granted */ 98 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 99 return (calc); 100 } 101 /* get actual space */ 102 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 103 /* 104 * take out what has NOT been put on socket queue and we yet hold 105 * for putting up. 106 */ 107 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 108 asoc->cnt_on_reasm_queue * MSIZE)); 109 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 110 asoc->cnt_on_all_streams * MSIZE)); 111 if (calc == 0) { 112 /* out of space */ 113 return (calc); 114 } 115 116 /* what is the overhead of all these rwnd's */ 117 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 118 /* 119 * If the window gets too small due to ctrl-stuff, reduce it to 1, 120 * even it is 0. SWS engaged 121 */ 122 if (calc < stcb->asoc.my_rwnd_control_len) { 123 calc = 1; 124 } 125 return (calc); 126 } 127 128 /* 129 * Build out our readq entry based on the incoming packet. 130 */ 131 struct sctp_queued_to_read * 132 sctp_build_readq_entry(struct sctp_tcb *stcb, 133 struct sctp_nets *net, 134 uint32_t tsn, uint32_t ppid, 135 uint32_t context, uint16_t sid, 136 uint32_t mid, uint8_t flags, 137 struct mbuf *dm) 138 { 139 struct sctp_queued_to_read *read_queue_e = NULL; 140 141 sctp_alloc_a_readq(stcb, read_queue_e); 142 if (read_queue_e == NULL) { 143 goto failed_build; 144 } 145 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 146 read_queue_e->sinfo_stream = sid; 147 read_queue_e->sinfo_flags = (flags << 8); 148 read_queue_e->sinfo_ppid = ppid; 149 read_queue_e->sinfo_context = context; 150 read_queue_e->sinfo_tsn = tsn; 151 read_queue_e->sinfo_cumtsn = tsn; 152 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 153 read_queue_e->mid = mid; 154 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 155 TAILQ_INIT(&read_queue_e->reasm); 156 read_queue_e->whoFrom = net; 157 atomic_add_int(&net->ref_count, 1); 158 read_queue_e->data = dm; 159 read_queue_e->stcb = stcb; 160 read_queue_e->port_from = stcb->rport; 161 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 162 read_queue_e->do_not_ref_stcb = 1; 163 } 164 failed_build: 165 return (read_queue_e); 166 } 167 168 struct mbuf * 169 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 170 { 171 struct sctp_extrcvinfo *seinfo; 172 struct sctp_sndrcvinfo *outinfo; 173 struct sctp_rcvinfo *rcvinfo; 174 struct sctp_nxtinfo *nxtinfo; 175 struct cmsghdr *cmh; 176 struct mbuf *ret; 177 int len; 178 int use_extended; 179 int provide_nxt; 180 181 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 182 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 184 /* user does not want any ancillary data */ 185 return (NULL); 186 } 187 188 len = 0; 189 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 190 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 191 } 192 seinfo = (struct sctp_extrcvinfo *)sinfo; 193 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 194 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 195 provide_nxt = 1; 196 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 197 } else { 198 provide_nxt = 0; 199 } 200 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 202 use_extended = 1; 203 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 204 } else { 205 use_extended = 0; 206 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 207 } 208 } else { 209 use_extended = 0; 210 } 211 212 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 213 if (ret == NULL) { 214 /* No space */ 215 return (ret); 216 } 217 SCTP_BUF_LEN(ret) = 0; 218 219 /* We need a CMSG header followed by the struct */ 220 cmh = mtod(ret, struct cmsghdr *); 221 /* 222 * Make sure that there is no un-initialized padding between the 223 * cmsg header and cmsg data and after the cmsg data. 224 */ 225 memset(cmh, 0, len); 226 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 227 cmh->cmsg_level = IPPROTO_SCTP; 228 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 229 cmh->cmsg_type = SCTP_RCVINFO; 230 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 231 rcvinfo->rcv_sid = sinfo->sinfo_stream; 232 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 233 rcvinfo->rcv_flags = sinfo->sinfo_flags; 234 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 235 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 236 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 237 rcvinfo->rcv_context = sinfo->sinfo_context; 238 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 239 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 240 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 241 } 242 if (provide_nxt) { 243 cmh->cmsg_level = IPPROTO_SCTP; 244 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 245 cmh->cmsg_type = SCTP_NXTINFO; 246 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 247 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 248 nxtinfo->nxt_flags = 0; 249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 250 nxtinfo->nxt_flags |= SCTP_UNORDERED; 251 } 252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 253 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 254 } 255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 256 nxtinfo->nxt_flags |= SCTP_COMPLETE; 257 } 258 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 259 nxtinfo->nxt_length = seinfo->serinfo_next_length; 260 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 261 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 262 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 263 } 264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 265 cmh->cmsg_level = IPPROTO_SCTP; 266 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 267 if (use_extended) { 268 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 269 cmh->cmsg_type = SCTP_EXTRCV; 270 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 271 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 272 } else { 273 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 274 cmh->cmsg_type = SCTP_SNDRCV; 275 *outinfo = *sinfo; 276 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 277 } 278 } 279 return (ret); 280 } 281 282 static void 283 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 284 { 285 uint32_t gap, i; 286 int in_r, in_nr; 287 288 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 289 return; 290 } 291 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 292 /* 293 * This tsn is behind the cum ack and thus we don't need to 294 * worry about it being moved from one to the other. 295 */ 296 return; 297 } 298 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 299 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 300 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 301 KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__)); 302 if (!in_nr) { 303 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 304 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 305 asoc->highest_tsn_inside_nr_map = tsn; 306 } 307 } 308 if (in_r) { 309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 310 if (tsn == asoc->highest_tsn_inside_map) { 311 /* We must back down to see what the new highest is. */ 312 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 313 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 314 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 315 asoc->highest_tsn_inside_map = i; 316 break; 317 } 318 } 319 if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) { 320 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 321 } 322 } 323 } 324 } 325 326 static int 327 sctp_place_control_in_stream(struct sctp_stream_in *strm, 328 struct sctp_association *asoc, 329 struct sctp_queued_to_read *control) 330 { 331 struct sctp_queued_to_read *at; 332 struct sctp_readhead *q; 333 uint8_t flags, unordered; 334 335 flags = (control->sinfo_flags >> 8); 336 unordered = flags & SCTP_DATA_UNORDERED; 337 if (unordered) { 338 q = &strm->uno_inqueue; 339 if (asoc->idata_supported == 0) { 340 if (!TAILQ_EMPTY(q)) { 341 /* 342 * Only one stream can be here in old style 343 * -- abort 344 */ 345 return (-1); 346 } 347 TAILQ_INSERT_TAIL(q, control, next_instrm); 348 control->on_strm_q = SCTP_ON_UNORDERED; 349 return (0); 350 } 351 } else { 352 q = &strm->inqueue; 353 } 354 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 355 control->end_added = 1; 356 control->first_frag_seen = 1; 357 control->last_frag_seen = 1; 358 } 359 if (TAILQ_EMPTY(q)) { 360 /* Empty queue */ 361 TAILQ_INSERT_HEAD(q, control, next_instrm); 362 if (unordered) { 363 control->on_strm_q = SCTP_ON_UNORDERED; 364 } else { 365 control->on_strm_q = SCTP_ON_ORDERED; 366 } 367 return (0); 368 } else { 369 TAILQ_FOREACH(at, q, next_instrm) { 370 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 371 /* 372 * one in queue is bigger than the new one, 373 * insert before this one 374 */ 375 TAILQ_INSERT_BEFORE(at, control, next_instrm); 376 if (unordered) { 377 control->on_strm_q = SCTP_ON_UNORDERED; 378 } else { 379 control->on_strm_q = SCTP_ON_ORDERED; 380 } 381 break; 382 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 383 /* 384 * Gak, He sent me a duplicate msg id 385 * number?? return -1 to abort. 386 */ 387 return (-1); 388 } else { 389 if (TAILQ_NEXT(at, next_instrm) == NULL) { 390 /* 391 * We are at the end, insert it 392 * after this one 393 */ 394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 395 sctp_log_strm_del(control, at, 396 SCTP_STR_LOG_FROM_INSERT_TL); 397 } 398 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 399 if (unordered) { 400 control->on_strm_q = SCTP_ON_UNORDERED; 401 } else { 402 control->on_strm_q = SCTP_ON_ORDERED; 403 } 404 break; 405 } 406 } 407 } 408 } 409 return (0); 410 } 411 412 static void 413 sctp_abort_in_reasm(struct sctp_tcb *stcb, 414 struct sctp_queued_to_read *control, 415 struct sctp_tmit_chunk *chk, 416 int *abort_flag, int opspot) 417 { 418 char msg[SCTP_DIAG_INFO_LEN]; 419 struct mbuf *oper; 420 421 if (stcb->asoc.idata_supported) { 422 SCTP_SNPRINTF(msg, sizeof(msg), 423 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 424 opspot, 425 control->fsn_included, 426 chk->rec.data.tsn, 427 chk->rec.data.sid, 428 chk->rec.data.fsn, chk->rec.data.mid); 429 } else { 430 SCTP_SNPRINTF(msg, sizeof(msg), 431 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 432 opspot, 433 control->fsn_included, 434 chk->rec.data.tsn, 435 chk->rec.data.sid, 436 chk->rec.data.fsn, 437 (uint16_t)chk->rec.data.mid); 438 } 439 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 440 sctp_m_freem(chk->data); 441 chk->data = NULL; 442 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 443 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 444 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED); 445 *abort_flag = 1; 446 } 447 448 static void 449 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 450 { 451 /* 452 * The control could not be placed and must be cleaned. 453 */ 454 struct sctp_tmit_chunk *chk, *nchk; 455 456 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 457 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 458 if (chk->data) 459 sctp_m_freem(chk->data); 460 chk->data = NULL; 461 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 462 } 463 sctp_free_remote_addr(control->whoFrom); 464 if (control->data) { 465 sctp_m_freem(control->data); 466 control->data = NULL; 467 } 468 sctp_free_a_readq(stcb, control); 469 } 470 471 /* 472 * Queue the chunk either right into the socket buffer if it is the next one 473 * to go OR put it in the correct place in the delivery queue. If we do 474 * append to the so_buf, keep doing so until we are out of order as 475 * long as the control's entered are non-fragmented. 476 */ 477 static void 478 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 479 struct sctp_association *asoc, 480 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 481 { 482 /* 483 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 484 * all the data in one stream this could happen quite rapidly. One 485 * could use the TSN to keep track of things, but this scheme breaks 486 * down in the other type of stream usage that could occur. Send a 487 * single msg to stream 0, send 4Billion messages to stream 1, now 488 * send a message to stream 0. You have a situation where the TSN 489 * has wrapped but not in the stream. Is this worth worrying about 490 * or should we just change our queue sort at the bottom to be by 491 * TSN. 492 * 493 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 494 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 495 * assignment this could happen... and I don't see how this would be 496 * a violation. So for now I am undecided an will leave the sort by 497 * SSN alone. Maybe a hybrid approach is the answer 498 * 499 */ 500 struct sctp_queued_to_read *at; 501 int queue_needed; 502 uint32_t nxt_todel; 503 struct mbuf *op_err; 504 struct sctp_stream_in *strm; 505 char msg[SCTP_DIAG_INFO_LEN]; 506 507 strm = &asoc->strmin[control->sinfo_stream]; 508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 509 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 510 } 511 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 512 /* The incoming sseq is behind where we last delivered? */ 513 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 514 strm->last_mid_delivered, control->mid); 515 /* 516 * throw it in the stream so it gets cleaned up in 517 * association destruction 518 */ 519 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 520 if (asoc->idata_supported) { 521 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 522 strm->last_mid_delivered, control->sinfo_tsn, 523 control->sinfo_stream, control->mid); 524 } else { 525 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 526 (uint16_t)strm->last_mid_delivered, 527 control->sinfo_tsn, 528 control->sinfo_stream, 529 (uint16_t)control->mid); 530 } 531 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 532 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 533 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 534 *abort_flag = 1; 535 return; 536 } 537 queue_needed = 1; 538 asoc->size_on_all_streams += control->length; 539 sctp_ucount_incr(asoc->cnt_on_all_streams); 540 nxt_todel = strm->last_mid_delivered + 1; 541 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 542 /* can be delivered right away? */ 543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 544 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 545 } 546 /* EY it wont be queued if it could be delivered directly */ 547 queue_needed = 0; 548 if (asoc->size_on_all_streams >= control->length) { 549 asoc->size_on_all_streams -= control->length; 550 } else { 551 #ifdef INVARIANTS 552 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 553 #else 554 asoc->size_on_all_streams = 0; 555 #endif 556 } 557 sctp_ucount_decr(asoc->cnt_on_all_streams); 558 strm->last_mid_delivered++; 559 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 560 sctp_add_to_readq(stcb->sctp_ep, stcb, 561 control, 562 &stcb->sctp_socket->so_rcv, 1, 563 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 564 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 565 /* all delivered */ 566 nxt_todel = strm->last_mid_delivered + 1; 567 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 568 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 569 if (control->on_strm_q == SCTP_ON_ORDERED) { 570 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 571 if (asoc->size_on_all_streams >= control->length) { 572 asoc->size_on_all_streams -= control->length; 573 } else { 574 #ifdef INVARIANTS 575 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 576 #else 577 asoc->size_on_all_streams = 0; 578 #endif 579 } 580 sctp_ucount_decr(asoc->cnt_on_all_streams); 581 #ifdef INVARIANTS 582 } else { 583 panic("Huh control: %p is on_strm_q: %d", 584 control, control->on_strm_q); 585 #endif 586 } 587 control->on_strm_q = 0; 588 strm->last_mid_delivered++; 589 /* 590 * We ignore the return of deliver_data here 591 * since we always can hold the chunk on the 592 * d-queue. And we have a finite number that 593 * can be delivered from the strq. 594 */ 595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 596 sctp_log_strm_del(control, NULL, 597 SCTP_STR_LOG_FROM_IMMED_DEL); 598 } 599 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 600 sctp_add_to_readq(stcb->sctp_ep, stcb, 601 control, 602 &stcb->sctp_socket->so_rcv, 1, 603 SCTP_READ_LOCK_NOT_HELD, 604 SCTP_SO_LOCKED); 605 continue; 606 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 607 *need_reasm = 1; 608 } 609 break; 610 } 611 } 612 if (queue_needed) { 613 /* 614 * Ok, we did not deliver this guy, find the correct place 615 * to put it on the queue. 616 */ 617 if (sctp_place_control_in_stream(strm, asoc, control)) { 618 SCTP_SNPRINTF(msg, sizeof(msg), 619 "Queue to str MID: %u duplicate", control->mid); 620 sctp_clean_up_control(stcb, control); 621 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 622 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 623 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 624 *abort_flag = 1; 625 } 626 } 627 } 628 629 static void 630 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 631 { 632 struct mbuf *m, *prev = NULL; 633 struct sctp_tcb *stcb; 634 635 stcb = control->stcb; 636 control->held_length = 0; 637 control->length = 0; 638 m = control->data; 639 while (m) { 640 if (SCTP_BUF_LEN(m) == 0) { 641 /* Skip mbufs with NO length */ 642 if (prev == NULL) { 643 /* First one */ 644 control->data = sctp_m_free(m); 645 m = control->data; 646 } else { 647 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 648 m = SCTP_BUF_NEXT(prev); 649 } 650 if (m == NULL) { 651 control->tail_mbuf = prev; 652 } 653 continue; 654 } 655 prev = m; 656 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 657 if (control->on_read_q) { 658 /* 659 * On read queue so we must increment the SB stuff, 660 * we assume caller has done any locks of SB. 661 */ 662 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 663 } 664 m = SCTP_BUF_NEXT(m); 665 } 666 if (prev) { 667 control->tail_mbuf = prev; 668 } 669 } 670 671 static void 672 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 673 { 674 struct mbuf *prev = NULL; 675 struct sctp_tcb *stcb; 676 677 stcb = control->stcb; 678 if (stcb == NULL) { 679 #ifdef INVARIANTS 680 panic("Control broken"); 681 #else 682 return; 683 #endif 684 } 685 if (control->tail_mbuf == NULL) { 686 /* TSNH */ 687 sctp_m_freem(control->data); 688 control->data = m; 689 sctp_setup_tail_pointer(control); 690 return; 691 } 692 control->tail_mbuf->m_next = m; 693 while (m) { 694 if (SCTP_BUF_LEN(m) == 0) { 695 /* Skip mbufs with NO length */ 696 if (prev == NULL) { 697 /* First one */ 698 control->tail_mbuf->m_next = sctp_m_free(m); 699 m = control->tail_mbuf->m_next; 700 } else { 701 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 702 m = SCTP_BUF_NEXT(prev); 703 } 704 if (m == NULL) { 705 control->tail_mbuf = prev; 706 } 707 continue; 708 } 709 prev = m; 710 if (control->on_read_q) { 711 /* 712 * On read queue so we must increment the SB stuff, 713 * we assume caller has done any locks of SB. 714 */ 715 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 716 } 717 *added += SCTP_BUF_LEN(m); 718 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 719 m = SCTP_BUF_NEXT(m); 720 } 721 if (prev) { 722 control->tail_mbuf = prev; 723 } 724 } 725 726 static void 727 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 728 { 729 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 730 nc->sinfo_stream = control->sinfo_stream; 731 nc->mid = control->mid; 732 TAILQ_INIT(&nc->reasm); 733 nc->top_fsn = control->top_fsn; 734 nc->mid = control->mid; 735 nc->sinfo_flags = control->sinfo_flags; 736 nc->sinfo_ppid = control->sinfo_ppid; 737 nc->sinfo_context = control->sinfo_context; 738 nc->fsn_included = 0xffffffff; 739 nc->sinfo_tsn = control->sinfo_tsn; 740 nc->sinfo_cumtsn = control->sinfo_cumtsn; 741 nc->sinfo_assoc_id = control->sinfo_assoc_id; 742 nc->whoFrom = control->whoFrom; 743 atomic_add_int(&nc->whoFrom->ref_count, 1); 744 nc->stcb = control->stcb; 745 nc->port_from = control->port_from; 746 nc->do_not_ref_stcb = control->do_not_ref_stcb; 747 } 748 749 static void 750 sctp_reset_a_control(struct sctp_queued_to_read *control, 751 struct sctp_inpcb *inp, uint32_t tsn) 752 { 753 control->fsn_included = tsn; 754 if (control->on_read_q) { 755 /* 756 * We have to purge it from there, hopefully this will work 757 * :-) 758 */ 759 TAILQ_REMOVE(&inp->read_queue, control, next); 760 control->on_read_q = 0; 761 } 762 } 763 764 static int 765 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 766 struct sctp_association *asoc, 767 struct sctp_stream_in *strm, 768 struct sctp_queued_to_read *control, 769 uint32_t pd_point, 770 int inp_read_lock_held) 771 { 772 /* 773 * Special handling for the old un-ordered data chunk. All the 774 * chunks/TSN's go to mid 0. So we have to do the old style watching 775 * to see if we have it all. If you return one, no other control 776 * entries on the un-ordered queue will be looked at. In theory 777 * there should be no others entries in reality, unless the guy is 778 * sending both unordered NDATA and unordered DATA... 779 */ 780 struct sctp_tmit_chunk *chk, *lchk, *tchk; 781 uint32_t fsn; 782 struct sctp_queued_to_read *nc; 783 int cnt_added; 784 785 if (control->first_frag_seen == 0) { 786 /* Nothing we can do, we have not seen the first piece yet */ 787 return (1); 788 } 789 /* Collapse any we can */ 790 cnt_added = 0; 791 restart: 792 fsn = control->fsn_included + 1; 793 /* Now what can we add? */ 794 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 795 if (chk->rec.data.fsn == fsn) { 796 /* Ok lets add it */ 797 sctp_alloc_a_readq(stcb, nc); 798 if (nc == NULL) { 799 break; 800 } 801 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 802 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 803 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held); 804 fsn++; 805 cnt_added++; 806 chk = NULL; 807 if (control->end_added) { 808 /* We are done */ 809 if (!TAILQ_EMPTY(&control->reasm)) { 810 /* 811 * Ok we have to move anything left 812 * on the control queue to a new 813 * control. 814 */ 815 sctp_build_readq_entry_from_ctl(nc, control); 816 tchk = TAILQ_FIRST(&control->reasm); 817 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 818 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 819 if (asoc->size_on_reasm_queue >= tchk->send_size) { 820 asoc->size_on_reasm_queue -= tchk->send_size; 821 } else { 822 #ifdef INVARIANTS 823 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 824 #else 825 asoc->size_on_reasm_queue = 0; 826 #endif 827 } 828 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 829 nc->first_frag_seen = 1; 830 nc->fsn_included = tchk->rec.data.fsn; 831 nc->data = tchk->data; 832 nc->sinfo_ppid = tchk->rec.data.ppid; 833 nc->sinfo_tsn = tchk->rec.data.tsn; 834 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 835 tchk->data = NULL; 836 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 837 sctp_setup_tail_pointer(nc); 838 tchk = TAILQ_FIRST(&control->reasm); 839 } 840 /* Spin the rest onto the queue */ 841 while (tchk) { 842 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 843 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 844 tchk = TAILQ_FIRST(&control->reasm); 845 } 846 /* 847 * Now lets add it to the queue 848 * after removing control 849 */ 850 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 851 nc->on_strm_q = SCTP_ON_UNORDERED; 852 if (control->on_strm_q) { 853 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 854 control->on_strm_q = 0; 855 } 856 } 857 if (control->pdapi_started) { 858 strm->pd_api_started = 0; 859 control->pdapi_started = 0; 860 } 861 if (control->on_strm_q) { 862 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 863 control->on_strm_q = 0; 864 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 865 } 866 if (control->on_read_q == 0) { 867 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 868 &stcb->sctp_socket->so_rcv, control->end_added, 869 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 870 } 871 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 872 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 873 /* 874 * Switch to the new guy and 875 * continue 876 */ 877 control = nc; 878 goto restart; 879 } else { 880 if (nc->on_strm_q == 0) { 881 sctp_free_a_readq(stcb, nc); 882 } 883 } 884 return (1); 885 } else { 886 sctp_free_a_readq(stcb, nc); 887 } 888 } else { 889 /* Can't add more */ 890 break; 891 } 892 } 893 if (cnt_added && strm->pd_api_started) { 894 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 895 } 896 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 897 strm->pd_api_started = 1; 898 control->pdapi_started = 1; 899 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 900 &stcb->sctp_socket->so_rcv, control->end_added, 901 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 902 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 903 return (0); 904 } else { 905 return (1); 906 } 907 } 908 909 static void 910 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 911 struct sctp_association *asoc, 912 struct sctp_queued_to_read *control, 913 struct sctp_tmit_chunk *chk, 914 int *abort_flag) 915 { 916 struct sctp_tmit_chunk *at; 917 int inserted; 918 919 /* 920 * Here we need to place the chunk into the control structure sorted 921 * in the correct order. 922 */ 923 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 924 /* Its the very first one. */ 925 SCTPDBG(SCTP_DEBUG_XXX, 926 "chunk is a first fsn: %u becomes fsn_included\n", 927 chk->rec.data.fsn); 928 at = TAILQ_FIRST(&control->reasm); 929 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { 930 /* 931 * The first chunk in the reassembly is a smaller 932 * TSN than this one, even though this has a first, 933 * it must be from a subsequent msg. 934 */ 935 goto place_chunk; 936 } 937 if (control->first_frag_seen) { 938 /* 939 * In old un-ordered we can reassembly on one 940 * control multiple messages. As long as the next 941 * FIRST is greater then the old first (TSN i.e. FSN 942 * wise) 943 */ 944 struct mbuf *tdata; 945 uint32_t tmp; 946 947 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 948 /* 949 * Easy way the start of a new guy beyond 950 * the lowest 951 */ 952 goto place_chunk; 953 } 954 if ((chk->rec.data.fsn == control->fsn_included) || 955 (control->pdapi_started)) { 956 /* 957 * Ok this should not happen, if it does we 958 * started the pd-api on the higher TSN 959 * (since the equals part is a TSN failure 960 * it must be that). 961 * 962 * We are completely hosed in that case 963 * since I have no way to recover. This 964 * really will only happen if we can get 965 * more TSN's higher before the 966 * pd-api-point. 967 */ 968 sctp_abort_in_reasm(stcb, control, chk, 969 abort_flag, 970 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 971 972 return; 973 } 974 /* 975 * Ok we have two firsts and the one we just got is 976 * smaller than the one we previously placed.. yuck! 977 * We must swap them out. 978 */ 979 /* swap the mbufs */ 980 tdata = control->data; 981 control->data = chk->data; 982 chk->data = tdata; 983 /* Save the lengths */ 984 chk->send_size = control->length; 985 /* Recompute length of control and tail pointer */ 986 sctp_setup_tail_pointer(control); 987 /* Fix the FSN included */ 988 tmp = control->fsn_included; 989 control->fsn_included = chk->rec.data.fsn; 990 chk->rec.data.fsn = tmp; 991 /* Fix the TSN included */ 992 tmp = control->sinfo_tsn; 993 control->sinfo_tsn = chk->rec.data.tsn; 994 chk->rec.data.tsn = tmp; 995 /* Fix the PPID included */ 996 tmp = control->sinfo_ppid; 997 control->sinfo_ppid = chk->rec.data.ppid; 998 chk->rec.data.ppid = tmp; 999 /* Fix tail pointer */ 1000 goto place_chunk; 1001 } 1002 control->first_frag_seen = 1; 1003 control->fsn_included = chk->rec.data.fsn; 1004 control->top_fsn = chk->rec.data.fsn; 1005 control->sinfo_tsn = chk->rec.data.tsn; 1006 control->sinfo_ppid = chk->rec.data.ppid; 1007 control->data = chk->data; 1008 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1009 chk->data = NULL; 1010 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1011 sctp_setup_tail_pointer(control); 1012 return; 1013 } 1014 place_chunk: 1015 inserted = 0; 1016 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1017 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1018 /* 1019 * This one in queue is bigger than the new one, 1020 * insert the new one before at. 1021 */ 1022 asoc->size_on_reasm_queue += chk->send_size; 1023 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1024 inserted = 1; 1025 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1026 break; 1027 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1028 /* 1029 * They sent a duplicate fsn number. This really 1030 * should not happen since the FSN is a TSN and it 1031 * should have been dropped earlier. 1032 */ 1033 sctp_abort_in_reasm(stcb, control, chk, 1034 abort_flag, 1035 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1036 return; 1037 } 1038 } 1039 if (inserted == 0) { 1040 /* Its at the end */ 1041 asoc->size_on_reasm_queue += chk->send_size; 1042 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1043 control->top_fsn = chk->rec.data.fsn; 1044 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1045 } 1046 } 1047 1048 static int 1049 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1050 struct sctp_stream_in *strm, int inp_read_lock_held) 1051 { 1052 /* 1053 * Given a stream, strm, see if any of the SSN's on it that are 1054 * fragmented are ready to deliver. If so go ahead and place them on 1055 * the read queue. In so placing if we have hit the end, then we 1056 * need to remove them from the stream's queue. 1057 */ 1058 struct sctp_queued_to_read *control, *nctl = NULL; 1059 uint32_t next_to_del; 1060 uint32_t pd_point; 1061 int ret = 0; 1062 1063 if (stcb->sctp_socket) { 1064 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1065 stcb->sctp_ep->partial_delivery_point); 1066 } else { 1067 pd_point = stcb->sctp_ep->partial_delivery_point; 1068 } 1069 control = TAILQ_FIRST(&strm->uno_inqueue); 1070 1071 if ((control != NULL) && 1072 (asoc->idata_supported == 0)) { 1073 /* Special handling needed for "old" data format */ 1074 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1075 goto done_un; 1076 } 1077 } 1078 if (strm->pd_api_started) { 1079 /* Can't add more */ 1080 return (0); 1081 } 1082 while (control) { 1083 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1084 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1085 nctl = TAILQ_NEXT(control, next_instrm); 1086 if (control->end_added) { 1087 /* We just put the last bit on */ 1088 if (control->on_strm_q) { 1089 #ifdef INVARIANTS 1090 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1091 panic("Huh control: %p on_q: %d -- not unordered?", 1092 control, control->on_strm_q); 1093 } 1094 #endif 1095 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1096 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1097 if (asoc->size_on_all_streams >= control->length) { 1098 asoc->size_on_all_streams -= control->length; 1099 } else { 1100 #ifdef INVARIANTS 1101 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1102 #else 1103 asoc->size_on_all_streams = 0; 1104 #endif 1105 } 1106 sctp_ucount_decr(asoc->cnt_on_all_streams); 1107 control->on_strm_q = 0; 1108 } 1109 if (control->on_read_q == 0) { 1110 sctp_add_to_readq(stcb->sctp_ep, stcb, 1111 control, 1112 &stcb->sctp_socket->so_rcv, control->end_added, 1113 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1114 } 1115 } else { 1116 /* Can we do a PD-API for this un-ordered guy? */ 1117 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1118 strm->pd_api_started = 1; 1119 control->pdapi_started = 1; 1120 sctp_add_to_readq(stcb->sctp_ep, stcb, 1121 control, 1122 &stcb->sctp_socket->so_rcv, control->end_added, 1123 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1124 1125 break; 1126 } 1127 } 1128 control = nctl; 1129 } 1130 done_un: 1131 control = TAILQ_FIRST(&strm->inqueue); 1132 if (strm->pd_api_started) { 1133 /* Can't add more */ 1134 return (0); 1135 } 1136 if (control == NULL) { 1137 return (ret); 1138 } 1139 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1140 /* 1141 * Ok the guy at the top was being partially delivered 1142 * completed, so we remove it. Note the pd_api flag was 1143 * taken off when the chunk was merged on in 1144 * sctp_queue_data_for_reasm below. 1145 */ 1146 nctl = TAILQ_NEXT(control, next_instrm); 1147 SCTPDBG(SCTP_DEBUG_XXX, 1148 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1149 control, control->end_added, control->mid, 1150 control->top_fsn, control->fsn_included, 1151 strm->last_mid_delivered); 1152 if (control->end_added) { 1153 if (control->on_strm_q) { 1154 #ifdef INVARIANTS 1155 if (control->on_strm_q != SCTP_ON_ORDERED) { 1156 panic("Huh control: %p on_q: %d -- not ordered?", 1157 control, control->on_strm_q); 1158 } 1159 #endif 1160 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1161 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1162 if (asoc->size_on_all_streams >= control->length) { 1163 asoc->size_on_all_streams -= control->length; 1164 } else { 1165 #ifdef INVARIANTS 1166 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1167 #else 1168 asoc->size_on_all_streams = 0; 1169 #endif 1170 } 1171 sctp_ucount_decr(asoc->cnt_on_all_streams); 1172 control->on_strm_q = 0; 1173 } 1174 if (strm->pd_api_started && control->pdapi_started) { 1175 control->pdapi_started = 0; 1176 strm->pd_api_started = 0; 1177 } 1178 if (control->on_read_q == 0) { 1179 sctp_add_to_readq(stcb->sctp_ep, stcb, 1180 control, 1181 &stcb->sctp_socket->so_rcv, control->end_added, 1182 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1183 } 1184 control = nctl; 1185 } 1186 } 1187 if (strm->pd_api_started) { 1188 /* 1189 * Can't add more must have gotten an un-ordered above being 1190 * partially delivered. 1191 */ 1192 return (0); 1193 } 1194 deliver_more: 1195 next_to_del = strm->last_mid_delivered + 1; 1196 if (control) { 1197 SCTPDBG(SCTP_DEBUG_XXX, 1198 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1199 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1200 next_to_del); 1201 nctl = TAILQ_NEXT(control, next_instrm); 1202 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1203 (control->first_frag_seen)) { 1204 int done; 1205 1206 /* Ok we can deliver it onto the stream. */ 1207 if (control->end_added) { 1208 /* We are done with it afterwards */ 1209 if (control->on_strm_q) { 1210 #ifdef INVARIANTS 1211 if (control->on_strm_q != SCTP_ON_ORDERED) { 1212 panic("Huh control: %p on_q: %d -- not ordered?", 1213 control, control->on_strm_q); 1214 } 1215 #endif 1216 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1217 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1218 if (asoc->size_on_all_streams >= control->length) { 1219 asoc->size_on_all_streams -= control->length; 1220 } else { 1221 #ifdef INVARIANTS 1222 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1223 #else 1224 asoc->size_on_all_streams = 0; 1225 #endif 1226 } 1227 sctp_ucount_decr(asoc->cnt_on_all_streams); 1228 control->on_strm_q = 0; 1229 } 1230 ret++; 1231 } 1232 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1233 /* 1234 * A singleton now slipping through - mark 1235 * it non-revokable too 1236 */ 1237 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1238 } else if (control->end_added == 0) { 1239 /* 1240 * Check if we can defer adding until its 1241 * all there 1242 */ 1243 if ((control->length < pd_point) || (strm->pd_api_started)) { 1244 /* 1245 * Don't need it or cannot add more 1246 * (one being delivered that way) 1247 */ 1248 goto out; 1249 } 1250 } 1251 done = (control->end_added) && (control->last_frag_seen); 1252 if (control->on_read_q == 0) { 1253 if (!done) { 1254 if (asoc->size_on_all_streams >= control->length) { 1255 asoc->size_on_all_streams -= control->length; 1256 } else { 1257 #ifdef INVARIANTS 1258 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1259 #else 1260 asoc->size_on_all_streams = 0; 1261 #endif 1262 } 1263 strm->pd_api_started = 1; 1264 control->pdapi_started = 1; 1265 } 1266 sctp_add_to_readq(stcb->sctp_ep, stcb, 1267 control, 1268 &stcb->sctp_socket->so_rcv, control->end_added, 1269 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1270 } 1271 strm->last_mid_delivered = next_to_del; 1272 if (done) { 1273 control = nctl; 1274 goto deliver_more; 1275 } 1276 } 1277 } 1278 out: 1279 return (ret); 1280 } 1281 1282 uint32_t 1283 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1284 struct sctp_stream_in *strm, 1285 struct sctp_tcb *stcb, struct sctp_association *asoc, 1286 struct sctp_tmit_chunk *chk, int hold_rlock) 1287 { 1288 /* 1289 * Given a control and a chunk, merge the data from the chk onto the 1290 * control and free up the chunk resources. 1291 */ 1292 uint32_t added = 0; 1293 bool i_locked = false; 1294 1295 if (control->on_read_q) { 1296 if (hold_rlock == 0) { 1297 /* Its being pd-api'd so we must do some locks. */ 1298 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1299 i_locked = true; 1300 } 1301 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 1302 goto out; 1303 } 1304 } 1305 if (control->data == NULL) { 1306 control->data = chk->data; 1307 sctp_setup_tail_pointer(control); 1308 } else { 1309 sctp_add_to_tail_pointer(control, chk->data, &added); 1310 } 1311 control->fsn_included = chk->rec.data.fsn; 1312 asoc->size_on_reasm_queue -= chk->send_size; 1313 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1314 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1315 chk->data = NULL; 1316 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1317 control->first_frag_seen = 1; 1318 control->sinfo_tsn = chk->rec.data.tsn; 1319 control->sinfo_ppid = chk->rec.data.ppid; 1320 } 1321 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1322 /* Its complete */ 1323 if ((control->on_strm_q) && (control->on_read_q)) { 1324 if (control->pdapi_started) { 1325 control->pdapi_started = 0; 1326 strm->pd_api_started = 0; 1327 } 1328 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1329 /* Unordered */ 1330 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1331 control->on_strm_q = 0; 1332 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1333 /* Ordered */ 1334 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1335 /* 1336 * Don't need to decrement 1337 * size_on_all_streams, since control is on 1338 * the read queue. 1339 */ 1340 sctp_ucount_decr(asoc->cnt_on_all_streams); 1341 control->on_strm_q = 0; 1342 #ifdef INVARIANTS 1343 } else if (control->on_strm_q) { 1344 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1345 control->on_strm_q); 1346 #endif 1347 } 1348 } 1349 control->end_added = 1; 1350 control->last_frag_seen = 1; 1351 } 1352 out: 1353 if (i_locked) { 1354 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1355 } 1356 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1357 return (added); 1358 } 1359 1360 /* 1361 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1362 * queue, see if anything can be delivered. If so pull it off (or as much as 1363 * we can. If we run out of space then we must dump what we can and set the 1364 * appropriate flag to say we queued what we could. 1365 */ 1366 static void 1367 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1368 struct sctp_queued_to_read *control, 1369 struct sctp_tmit_chunk *chk, 1370 int created_control, 1371 int *abort_flag, uint32_t tsn) 1372 { 1373 uint32_t next_fsn; 1374 struct sctp_tmit_chunk *at, *nat; 1375 struct sctp_stream_in *strm; 1376 int do_wakeup, unordered; 1377 uint32_t lenadded; 1378 1379 strm = &asoc->strmin[control->sinfo_stream]; 1380 /* 1381 * For old un-ordered data chunks. 1382 */ 1383 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1384 unordered = 1; 1385 } else { 1386 unordered = 0; 1387 } 1388 /* Must be added to the stream-in queue */ 1389 if (created_control) { 1390 if ((unordered == 0) || (asoc->idata_supported)) { 1391 sctp_ucount_incr(asoc->cnt_on_all_streams); 1392 } 1393 if (sctp_place_control_in_stream(strm, asoc, control)) { 1394 /* Duplicate SSN? */ 1395 sctp_abort_in_reasm(stcb, control, chk, 1396 abort_flag, 1397 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1398 sctp_clean_up_control(stcb, control); 1399 return; 1400 } 1401 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1402 /* 1403 * Ok we created this control and now lets validate 1404 * that its legal i.e. there is a B bit set, if not 1405 * and we have up to the cum-ack then its invalid. 1406 */ 1407 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1408 sctp_abort_in_reasm(stcb, control, chk, 1409 abort_flag, 1410 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1411 return; 1412 } 1413 } 1414 } 1415 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1416 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1417 return; 1418 } 1419 /* 1420 * Ok we must queue the chunk into the reasembly portion: o if its 1421 * the first it goes to the control mbuf. o if its not first but the 1422 * next in sequence it goes to the control, and each succeeding one 1423 * in order also goes. o if its not in order we place it on the list 1424 * in its place. 1425 */ 1426 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1427 /* Its the very first one. */ 1428 SCTPDBG(SCTP_DEBUG_XXX, 1429 "chunk is a first fsn: %u becomes fsn_included\n", 1430 chk->rec.data.fsn); 1431 if (control->first_frag_seen) { 1432 /* 1433 * Error on senders part, they either sent us two 1434 * data chunks with FIRST, or they sent two 1435 * un-ordered chunks that were fragmented at the 1436 * same time in the same stream. 1437 */ 1438 sctp_abort_in_reasm(stcb, control, chk, 1439 abort_flag, 1440 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1441 return; 1442 } 1443 control->first_frag_seen = 1; 1444 control->sinfo_ppid = chk->rec.data.ppid; 1445 control->sinfo_tsn = chk->rec.data.tsn; 1446 control->fsn_included = chk->rec.data.fsn; 1447 control->data = chk->data; 1448 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1449 chk->data = NULL; 1450 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1451 sctp_setup_tail_pointer(control); 1452 asoc->size_on_all_streams += control->length; 1453 } else { 1454 /* Place the chunk in our list */ 1455 int inserted = 0; 1456 1457 if (control->last_frag_seen == 0) { 1458 /* Still willing to raise highest FSN seen */ 1459 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1460 SCTPDBG(SCTP_DEBUG_XXX, 1461 "We have a new top_fsn: %u\n", 1462 chk->rec.data.fsn); 1463 control->top_fsn = chk->rec.data.fsn; 1464 } 1465 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1466 SCTPDBG(SCTP_DEBUG_XXX, 1467 "The last fsn is now in place fsn: %u\n", 1468 chk->rec.data.fsn); 1469 control->last_frag_seen = 1; 1470 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) { 1471 SCTPDBG(SCTP_DEBUG_XXX, 1472 "New fsn: %u is not at top_fsn: %u -- abort\n", 1473 chk->rec.data.fsn, 1474 control->top_fsn); 1475 sctp_abort_in_reasm(stcb, control, chk, 1476 abort_flag, 1477 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1478 return; 1479 } 1480 } 1481 if (asoc->idata_supported || control->first_frag_seen) { 1482 /* 1483 * For IDATA we always check since we know 1484 * that the first fragment is 0. For old 1485 * DATA we have to receive the first before 1486 * we know the first FSN (which is the TSN). 1487 */ 1488 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1489 /* 1490 * We have already delivered up to 1491 * this so its a dup 1492 */ 1493 sctp_abort_in_reasm(stcb, control, chk, 1494 abort_flag, 1495 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1496 return; 1497 } 1498 } 1499 } else { 1500 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1501 /* Second last? huh? */ 1502 SCTPDBG(SCTP_DEBUG_XXX, 1503 "Duplicate last fsn: %u (top: %u) -- abort\n", 1504 chk->rec.data.fsn, control->top_fsn); 1505 sctp_abort_in_reasm(stcb, control, 1506 chk, abort_flag, 1507 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1508 return; 1509 } 1510 if (asoc->idata_supported || control->first_frag_seen) { 1511 /* 1512 * For IDATA we always check since we know 1513 * that the first fragment is 0. For old 1514 * DATA we have to receive the first before 1515 * we know the first FSN (which is the TSN). 1516 */ 1517 1518 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1519 /* 1520 * We have already delivered up to 1521 * this so its a dup 1522 */ 1523 SCTPDBG(SCTP_DEBUG_XXX, 1524 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1525 chk->rec.data.fsn, control->fsn_included); 1526 sctp_abort_in_reasm(stcb, control, chk, 1527 abort_flag, 1528 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1529 return; 1530 } 1531 } 1532 /* 1533 * validate not beyond top FSN if we have seen last 1534 * one 1535 */ 1536 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1537 SCTPDBG(SCTP_DEBUG_XXX, 1538 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1539 chk->rec.data.fsn, 1540 control->top_fsn); 1541 sctp_abort_in_reasm(stcb, control, chk, 1542 abort_flag, 1543 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1544 return; 1545 } 1546 } 1547 /* 1548 * If we reach here, we need to place the new chunk in the 1549 * reassembly for this control. 1550 */ 1551 SCTPDBG(SCTP_DEBUG_XXX, 1552 "chunk is a not first fsn: %u needs to be inserted\n", 1553 chk->rec.data.fsn); 1554 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1555 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1556 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1557 /* Last not at the end? huh? */ 1558 SCTPDBG(SCTP_DEBUG_XXX, 1559 "Last fragment not last in list: -- abort\n"); 1560 sctp_abort_in_reasm(stcb, control, 1561 chk, abort_flag, 1562 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1563 return; 1564 } 1565 /* 1566 * This one in queue is bigger than the new 1567 * one, insert the new one before at. 1568 */ 1569 SCTPDBG(SCTP_DEBUG_XXX, 1570 "Insert it before fsn: %u\n", 1571 at->rec.data.fsn); 1572 asoc->size_on_reasm_queue += chk->send_size; 1573 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1574 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1575 inserted = 1; 1576 break; 1577 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1578 /* 1579 * Gak, He sent me a duplicate str seq 1580 * number 1581 */ 1582 /* 1583 * foo bar, I guess I will just free this 1584 * new guy, should we abort too? FIX ME 1585 * MAYBE? Or it COULD be that the SSN's have 1586 * wrapped. Maybe I should compare to TSN 1587 * somehow... sigh for now just blow away 1588 * the chunk! 1589 */ 1590 SCTPDBG(SCTP_DEBUG_XXX, 1591 "Duplicate to fsn: %u -- abort\n", 1592 at->rec.data.fsn); 1593 sctp_abort_in_reasm(stcb, control, 1594 chk, abort_flag, 1595 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1596 return; 1597 } 1598 } 1599 if (inserted == 0) { 1600 /* Goes on the end */ 1601 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1602 chk->rec.data.fsn); 1603 asoc->size_on_reasm_queue += chk->send_size; 1604 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1605 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1606 } 1607 } 1608 /* 1609 * Ok lets see if we can suck any up into the control structure that 1610 * are in seq if it makes sense. 1611 */ 1612 do_wakeup = 0; 1613 /* 1614 * If the first fragment has not been seen there is no sense in 1615 * looking. 1616 */ 1617 if (control->first_frag_seen) { 1618 next_fsn = control->fsn_included + 1; 1619 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1620 if (at->rec.data.fsn == next_fsn) { 1621 /* We can add this one now to the control */ 1622 SCTPDBG(SCTP_DEBUG_XXX, 1623 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1624 control, at, 1625 at->rec.data.fsn, 1626 next_fsn, control->fsn_included); 1627 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1628 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1629 if (control->on_read_q) { 1630 do_wakeup = 1; 1631 } else { 1632 /* 1633 * We only add to the 1634 * size-on-all-streams if its not on 1635 * the read q. The read q flag will 1636 * cause a sballoc so its accounted 1637 * for there. 1638 */ 1639 asoc->size_on_all_streams += lenadded; 1640 } 1641 next_fsn++; 1642 if (control->end_added && control->pdapi_started) { 1643 if (strm->pd_api_started) { 1644 strm->pd_api_started = 0; 1645 control->pdapi_started = 0; 1646 } 1647 if (control->on_read_q == 0) { 1648 sctp_add_to_readq(stcb->sctp_ep, stcb, 1649 control, 1650 &stcb->sctp_socket->so_rcv, control->end_added, 1651 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1652 } 1653 break; 1654 } 1655 } else { 1656 break; 1657 } 1658 } 1659 } 1660 if (do_wakeup) { 1661 /* Need to wakeup the reader */ 1662 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1663 } 1664 } 1665 1666 static struct sctp_queued_to_read * 1667 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1668 { 1669 struct sctp_queued_to_read *control; 1670 1671 if (ordered) { 1672 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1673 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1674 break; 1675 } 1676 } 1677 } else { 1678 if (idata_supported) { 1679 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1680 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1681 break; 1682 } 1683 } 1684 } else { 1685 control = TAILQ_FIRST(&strm->uno_inqueue); 1686 } 1687 } 1688 return (control); 1689 } 1690 1691 static int 1692 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1693 struct mbuf **m, int offset, int chk_length, 1694 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1695 int *break_flag, int last_chunk, uint8_t chk_type) 1696 { 1697 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ 1698 struct sctp_stream_in *strm; 1699 uint32_t tsn, fsn, gap, mid; 1700 struct mbuf *dmbuf; 1701 int the_len; 1702 int need_reasm_check = 0; 1703 uint16_t sid; 1704 struct mbuf *op_err; 1705 char msg[SCTP_DIAG_INFO_LEN]; 1706 struct sctp_queued_to_read *control, *ncontrol; 1707 uint32_t ppid; 1708 uint8_t chk_flags; 1709 struct sctp_stream_reset_list *liste; 1710 int ordered; 1711 size_t clen; 1712 int created_control = 0; 1713 1714 if (chk_type == SCTP_IDATA) { 1715 struct sctp_idata_chunk *chunk, chunk_buf; 1716 1717 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1718 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1719 chk_flags = chunk->ch.chunk_flags; 1720 clen = sizeof(struct sctp_idata_chunk); 1721 tsn = ntohl(chunk->dp.tsn); 1722 sid = ntohs(chunk->dp.sid); 1723 mid = ntohl(chunk->dp.mid); 1724 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1725 fsn = 0; 1726 ppid = chunk->dp.ppid_fsn.ppid; 1727 } else { 1728 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1729 ppid = 0xffffffff; /* Use as an invalid value. */ 1730 } 1731 } else { 1732 struct sctp_data_chunk *chunk, chunk_buf; 1733 1734 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1735 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1736 chk_flags = chunk->ch.chunk_flags; 1737 clen = sizeof(struct sctp_data_chunk); 1738 tsn = ntohl(chunk->dp.tsn); 1739 sid = ntohs(chunk->dp.sid); 1740 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1741 fsn = tsn; 1742 ppid = chunk->dp.ppid; 1743 } 1744 if ((size_t)chk_length == clen) { 1745 /* 1746 * Need to send an abort since we had a empty data chunk. 1747 */ 1748 op_err = sctp_generate_no_user_data_cause(tsn); 1749 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1750 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1751 *abort_flag = 1; 1752 return (0); 1753 } 1754 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1755 asoc->send_sack = 1; 1756 } 1757 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1758 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1759 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1760 } 1761 if (stcb == NULL) { 1762 return (0); 1763 } 1764 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1765 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1766 /* It is a duplicate */ 1767 SCTP_STAT_INCR(sctps_recvdupdata); 1768 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1769 /* Record a dup for the next outbound sack */ 1770 asoc->dup_tsns[asoc->numduptsns] = tsn; 1771 asoc->numduptsns++; 1772 } 1773 asoc->send_sack = 1; 1774 return (0); 1775 } 1776 /* Calculate the number of TSN's between the base and this TSN */ 1777 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1778 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1779 /* Can't hold the bit in the mapping at max array, toss it */ 1780 return (0); 1781 } 1782 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1783 SCTP_TCB_LOCK_ASSERT(stcb); 1784 if (sctp_expand_mapping_array(asoc, gap)) { 1785 /* Can't expand, drop it */ 1786 return (0); 1787 } 1788 } 1789 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1790 *high_tsn = tsn; 1791 } 1792 /* See if we have received this one already */ 1793 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1794 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1795 SCTP_STAT_INCR(sctps_recvdupdata); 1796 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1797 /* Record a dup for the next outbound sack */ 1798 asoc->dup_tsns[asoc->numduptsns] = tsn; 1799 asoc->numduptsns++; 1800 } 1801 asoc->send_sack = 1; 1802 return (0); 1803 } 1804 /* 1805 * Check to see about the GONE flag, duplicates would cause a sack 1806 * to be sent up above 1807 */ 1808 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1809 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1810 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1811 /* 1812 * wait a minute, this guy is gone, there is no longer a 1813 * receiver. Send peer an ABORT! 1814 */ 1815 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1816 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1817 *abort_flag = 1; 1818 return (0); 1819 } 1820 /* 1821 * Now before going further we see if there is room. If NOT then we 1822 * MAY let one through only IF this TSN is the one we are waiting 1823 * for on a partial delivery API. 1824 */ 1825 1826 /* Is the stream valid? */ 1827 if (sid >= asoc->streamincnt) { 1828 struct sctp_error_invalid_stream *cause; 1829 1830 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1831 0, M_NOWAIT, 1, MT_DATA); 1832 if (op_err != NULL) { 1833 /* add some space up front so prepend will work well */ 1834 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1835 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1836 /* 1837 * Error causes are just param's and this one has 1838 * two back to back phdr, one with the error type 1839 * and size, the other with the streamid and a rsvd 1840 */ 1841 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1842 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1843 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1844 cause->stream_id = htons(sid); 1845 cause->reserved = htons(0); 1846 sctp_queue_op_err(stcb, op_err); 1847 } 1848 SCTP_STAT_INCR(sctps_badsid); 1849 SCTP_TCB_LOCK_ASSERT(stcb); 1850 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1851 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1852 asoc->highest_tsn_inside_nr_map = tsn; 1853 } 1854 if (tsn == (asoc->cumulative_tsn + 1)) { 1855 /* Update cum-ack */ 1856 asoc->cumulative_tsn = tsn; 1857 } 1858 return (0); 1859 } 1860 /* 1861 * If its a fragmented message, lets see if we can find the control 1862 * on the reassembly queues. 1863 */ 1864 if ((chk_type == SCTP_IDATA) && 1865 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1866 (fsn == 0)) { 1867 /* 1868 * The first *must* be fsn 0, and other (middle/end) pieces 1869 * can *not* be fsn 0. XXX: This can happen in case of a 1870 * wrap around. Ignore is for now. 1871 */ 1872 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags); 1873 goto err_out; 1874 } 1875 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1876 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1877 chk_flags, control); 1878 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1879 /* See if we can find the re-assembly entity */ 1880 if (control != NULL) { 1881 /* We found something, does it belong? */ 1882 if (ordered && (mid != control->mid)) { 1883 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1884 err_out: 1885 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1886 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1887 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1888 *abort_flag = 1; 1889 return (0); 1890 } 1891 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1892 /* 1893 * We can't have a switched order with an 1894 * unordered chunk 1895 */ 1896 SCTP_SNPRINTF(msg, sizeof(msg), 1897 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1898 tsn); 1899 goto err_out; 1900 } 1901 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1902 /* 1903 * We can't have a switched unordered with a 1904 * ordered chunk 1905 */ 1906 SCTP_SNPRINTF(msg, sizeof(msg), 1907 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1908 tsn); 1909 goto err_out; 1910 } 1911 } 1912 } else { 1913 /* 1914 * Its a complete segment. Lets validate we don't have a 1915 * re-assembly going on with the same Stream/Seq (for 1916 * ordered) or in the same Stream for unordered. 1917 */ 1918 if (control != NULL) { 1919 if (ordered || asoc->idata_supported) { 1920 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1921 chk_flags, mid); 1922 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1923 goto err_out; 1924 } else { 1925 if ((tsn == control->fsn_included + 1) && 1926 (control->end_added == 0)) { 1927 SCTP_SNPRINTF(msg, sizeof(msg), 1928 "Illegal message sequence, missing end for MID: %8.8x", 1929 control->fsn_included); 1930 goto err_out; 1931 } else { 1932 control = NULL; 1933 } 1934 } 1935 } 1936 } 1937 /* now do the tests */ 1938 if (((asoc->cnt_on_all_streams + 1939 asoc->cnt_on_reasm_queue + 1940 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1941 (((int)asoc->my_rwnd) <= 0)) { 1942 /* 1943 * When we have NO room in the rwnd we check to make sure 1944 * the reader is doing its job... 1945 */ 1946 if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) { 1947 /* some to read, wake-up */ 1948 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1949 } 1950 /* now is it in the mapping array of what we have accepted? */ 1951 if (chk_type == SCTP_DATA) { 1952 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1953 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1954 /* Nope not in the valid range dump it */ 1955 dump_packet: 1956 sctp_set_rwnd(stcb, asoc); 1957 if ((asoc->cnt_on_all_streams + 1958 asoc->cnt_on_reasm_queue + 1959 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1960 SCTP_STAT_INCR(sctps_datadropchklmt); 1961 } else { 1962 SCTP_STAT_INCR(sctps_datadroprwnd); 1963 } 1964 *break_flag = 1; 1965 return (0); 1966 } 1967 } else { 1968 if (control == NULL) { 1969 goto dump_packet; 1970 } 1971 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1972 goto dump_packet; 1973 } 1974 } 1975 } 1976 #ifdef SCTP_ASOCLOG_OF_TSNS 1977 SCTP_TCB_LOCK_ASSERT(stcb); 1978 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1979 asoc->tsn_in_at = 0; 1980 asoc->tsn_in_wrapped = 1; 1981 } 1982 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1983 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 1984 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 1985 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1986 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1987 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1988 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1989 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1990 asoc->tsn_in_at++; 1991 #endif 1992 /* 1993 * Before we continue lets validate that we are not being fooled by 1994 * an evil attacker. We can only have Nk chunks based on our TSN 1995 * spread allowed by the mapping array N * 8 bits, so there is no 1996 * way our stream sequence numbers could have wrapped. We of course 1997 * only validate the FIRST fragment so the bit must be set. 1998 */ 1999 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 2000 (TAILQ_EMPTY(&asoc->resetHead)) && 2001 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 2002 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 2003 /* The incoming sseq is behind where we last delivered? */ 2004 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 2005 mid, asoc->strmin[sid].last_mid_delivered); 2006 2007 if (asoc->idata_supported) { 2008 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2009 asoc->strmin[sid].last_mid_delivered, 2010 tsn, 2011 sid, 2012 mid); 2013 } else { 2014 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2015 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2016 tsn, 2017 sid, 2018 (uint16_t)mid); 2019 } 2020 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2021 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2022 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2023 *abort_flag = 1; 2024 return (0); 2025 } 2026 if (chk_type == SCTP_IDATA) { 2027 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2028 } else { 2029 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2030 } 2031 if (last_chunk == 0) { 2032 if (chk_type == SCTP_IDATA) { 2033 dmbuf = SCTP_M_COPYM(*m, 2034 (offset + sizeof(struct sctp_idata_chunk)), 2035 the_len, M_NOWAIT); 2036 } else { 2037 dmbuf = SCTP_M_COPYM(*m, 2038 (offset + sizeof(struct sctp_data_chunk)), 2039 the_len, M_NOWAIT); 2040 } 2041 #ifdef SCTP_MBUF_LOGGING 2042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2043 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2044 } 2045 #endif 2046 } else { 2047 /* We can steal the last chunk */ 2048 int l_len; 2049 2050 dmbuf = *m; 2051 /* lop off the top part */ 2052 if (chk_type == SCTP_IDATA) { 2053 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2054 } else { 2055 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2056 } 2057 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2058 l_len = SCTP_BUF_LEN(dmbuf); 2059 } else { 2060 /* 2061 * need to count up the size hopefully does not hit 2062 * this to often :-0 2063 */ 2064 struct mbuf *lat; 2065 2066 l_len = 0; 2067 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2068 l_len += SCTP_BUF_LEN(lat); 2069 } 2070 } 2071 if (l_len > the_len) { 2072 /* Trim the end round bytes off too */ 2073 m_adj(dmbuf, -(l_len - the_len)); 2074 } 2075 } 2076 if (dmbuf == NULL) { 2077 SCTP_STAT_INCR(sctps_nomem); 2078 return (0); 2079 } 2080 /* 2081 * Now no matter what, we need a control, get one if we don't have 2082 * one (we may have gotten it above when we found the message was 2083 * fragmented 2084 */ 2085 if (control == NULL) { 2086 sctp_alloc_a_readq(stcb, control); 2087 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2088 ppid, 2089 sid, 2090 chk_flags, 2091 NULL, fsn, mid); 2092 if (control == NULL) { 2093 SCTP_STAT_INCR(sctps_nomem); 2094 return (0); 2095 } 2096 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2097 struct mbuf *mm; 2098 2099 control->data = dmbuf; 2100 control->tail_mbuf = NULL; 2101 for (mm = control->data; mm; mm = mm->m_next) { 2102 control->length += SCTP_BUF_LEN(mm); 2103 if (SCTP_BUF_NEXT(mm) == NULL) { 2104 control->tail_mbuf = mm; 2105 } 2106 } 2107 control->end_added = 1; 2108 control->last_frag_seen = 1; 2109 control->first_frag_seen = 1; 2110 control->fsn_included = fsn; 2111 control->top_fsn = fsn; 2112 } 2113 created_control = 1; 2114 } 2115 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2116 chk_flags, ordered, mid, control); 2117 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2118 TAILQ_EMPTY(&asoc->resetHead) && 2119 ((ordered == 0) || 2120 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2121 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2122 /* Candidate for express delivery */ 2123 /* 2124 * Its not fragmented, No PD-API is up, Nothing in the 2125 * delivery queue, Its un-ordered OR ordered and the next to 2126 * deliver AND nothing else is stuck on the stream queue, 2127 * And there is room for it in the socket buffer. Lets just 2128 * stuff it up the buffer.... 2129 */ 2130 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2131 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2132 asoc->highest_tsn_inside_nr_map = tsn; 2133 } 2134 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2135 control, mid); 2136 2137 sctp_add_to_readq(stcb->sctp_ep, stcb, 2138 control, &stcb->sctp_socket->so_rcv, 2139 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2140 2141 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2142 /* for ordered, bump what we delivered */ 2143 asoc->strmin[sid].last_mid_delivered++; 2144 } 2145 SCTP_STAT_INCR(sctps_recvexpress); 2146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2147 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2148 SCTP_STR_LOG_FROM_EXPRS_DEL); 2149 } 2150 control = NULL; 2151 goto finish_express_del; 2152 } 2153 2154 /* Now will we need a chunk too? */ 2155 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2156 sctp_alloc_a_chunk(stcb, chk); 2157 if (chk == NULL) { 2158 /* No memory so we drop the chunk */ 2159 SCTP_STAT_INCR(sctps_nomem); 2160 if (last_chunk == 0) { 2161 /* we copied it, free the copy */ 2162 sctp_m_freem(dmbuf); 2163 } 2164 return (0); 2165 } 2166 chk->rec.data.tsn = tsn; 2167 chk->no_fr_allowed = 0; 2168 chk->rec.data.fsn = fsn; 2169 chk->rec.data.mid = mid; 2170 chk->rec.data.sid = sid; 2171 chk->rec.data.ppid = ppid; 2172 chk->rec.data.context = stcb->asoc.context; 2173 chk->rec.data.doing_fast_retransmit = 0; 2174 chk->rec.data.rcv_flags = chk_flags; 2175 chk->asoc = asoc; 2176 chk->send_size = the_len; 2177 chk->whoTo = net; 2178 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2179 chk, 2180 control, mid); 2181 atomic_add_int(&net->ref_count, 1); 2182 chk->data = dmbuf; 2183 } 2184 /* Set the appropriate TSN mark */ 2185 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2186 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2187 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2188 asoc->highest_tsn_inside_nr_map = tsn; 2189 } 2190 } else { 2191 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2192 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2193 asoc->highest_tsn_inside_map = tsn; 2194 } 2195 } 2196 /* Now is it complete (i.e. not fragmented)? */ 2197 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2198 /* 2199 * Special check for when streams are resetting. We could be 2200 * more smart about this and check the actual stream to see 2201 * if it is not being reset.. that way we would not create a 2202 * HOLB when amongst streams being reset and those not being 2203 * reset. 2204 * 2205 */ 2206 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2207 SCTP_TSN_GT(tsn, liste->tsn)) { 2208 /* 2209 * yep its past where we need to reset... go ahead 2210 * and queue it. 2211 */ 2212 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2213 /* first one on */ 2214 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2215 } else { 2216 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2217 unsigned char inserted = 0; 2218 2219 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2220 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2221 continue; 2222 } else { 2223 /* found it */ 2224 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2225 inserted = 1; 2226 break; 2227 } 2228 } 2229 if (inserted == 0) { 2230 /* 2231 * must be put at end, use prevP 2232 * (all setup from loop) to setup 2233 * nextP. 2234 */ 2235 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2236 } 2237 } 2238 goto finish_express_del; 2239 } 2240 if (chk_flags & SCTP_DATA_UNORDERED) { 2241 /* queue directly into socket buffer */ 2242 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2243 control, mid); 2244 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2245 sctp_add_to_readq(stcb->sctp_ep, stcb, 2246 control, 2247 &stcb->sctp_socket->so_rcv, 1, 2248 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2249 2250 } else { 2251 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2252 mid); 2253 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2254 if (*abort_flag) { 2255 if (last_chunk) { 2256 *m = NULL; 2257 } 2258 return (0); 2259 } 2260 } 2261 goto finish_express_del; 2262 } 2263 /* If we reach here its a reassembly */ 2264 need_reasm_check = 1; 2265 SCTPDBG(SCTP_DEBUG_XXX, 2266 "Queue data to stream for reasm control: %p MID: %u\n", 2267 control, mid); 2268 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2269 if (*abort_flag) { 2270 /* 2271 * the assoc is now gone and chk was put onto the reasm 2272 * queue, which has all been freed. 2273 */ 2274 if (last_chunk) { 2275 *m = NULL; 2276 } 2277 return (0); 2278 } 2279 finish_express_del: 2280 /* Here we tidy up things */ 2281 if (tsn == (asoc->cumulative_tsn + 1)) { 2282 /* Update cum-ack */ 2283 asoc->cumulative_tsn = tsn; 2284 } 2285 if (last_chunk) { 2286 *m = NULL; 2287 } 2288 if (ordered) { 2289 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2290 } else { 2291 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2292 } 2293 SCTP_STAT_INCR(sctps_recvdata); 2294 /* Set it present please */ 2295 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2296 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2297 } 2298 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2299 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2300 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2301 } 2302 if (need_reasm_check) { 2303 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2304 need_reasm_check = 0; 2305 } 2306 /* check the special flag for stream resets */ 2307 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2308 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2309 /* 2310 * we have finished working through the backlogged TSN's now 2311 * time to reset streams. 1: call reset function. 2: free 2312 * pending_reply space 3: distribute any chunks in 2313 * pending_reply_queue. 2314 */ 2315 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2316 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2317 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2318 SCTP_FREE(liste, SCTP_M_STRESET); 2319 /* sa_ignore FREED_MEMORY */ 2320 liste = TAILQ_FIRST(&asoc->resetHead); 2321 if (TAILQ_EMPTY(&asoc->resetHead)) { 2322 /* All can be removed */ 2323 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2324 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2325 strm = &asoc->strmin[control->sinfo_stream]; 2326 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2327 if (*abort_flag) { 2328 return (0); 2329 } 2330 if (need_reasm_check) { 2331 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2332 need_reasm_check = 0; 2333 } 2334 } 2335 } else { 2336 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2337 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2338 break; 2339 } 2340 /* 2341 * if control->sinfo_tsn is <= liste->tsn we 2342 * can process it which is the NOT of 2343 * control->sinfo_tsn > liste->tsn 2344 */ 2345 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2346 strm = &asoc->strmin[control->sinfo_stream]; 2347 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2348 if (*abort_flag) { 2349 return (0); 2350 } 2351 if (need_reasm_check) { 2352 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2353 need_reasm_check = 0; 2354 } 2355 } 2356 } 2357 } 2358 return (1); 2359 } 2360 2361 static const int8_t sctp_map_lookup_tab[256] = { 2362 0, 1, 0, 2, 0, 1, 0, 3, 2363 0, 1, 0, 2, 0, 1, 0, 4, 2364 0, 1, 0, 2, 0, 1, 0, 3, 2365 0, 1, 0, 2, 0, 1, 0, 5, 2366 0, 1, 0, 2, 0, 1, 0, 3, 2367 0, 1, 0, 2, 0, 1, 0, 4, 2368 0, 1, 0, 2, 0, 1, 0, 3, 2369 0, 1, 0, 2, 0, 1, 0, 6, 2370 0, 1, 0, 2, 0, 1, 0, 3, 2371 0, 1, 0, 2, 0, 1, 0, 4, 2372 0, 1, 0, 2, 0, 1, 0, 3, 2373 0, 1, 0, 2, 0, 1, 0, 5, 2374 0, 1, 0, 2, 0, 1, 0, 3, 2375 0, 1, 0, 2, 0, 1, 0, 4, 2376 0, 1, 0, 2, 0, 1, 0, 3, 2377 0, 1, 0, 2, 0, 1, 0, 7, 2378 0, 1, 0, 2, 0, 1, 0, 3, 2379 0, 1, 0, 2, 0, 1, 0, 4, 2380 0, 1, 0, 2, 0, 1, 0, 3, 2381 0, 1, 0, 2, 0, 1, 0, 5, 2382 0, 1, 0, 2, 0, 1, 0, 3, 2383 0, 1, 0, 2, 0, 1, 0, 4, 2384 0, 1, 0, 2, 0, 1, 0, 3, 2385 0, 1, 0, 2, 0, 1, 0, 6, 2386 0, 1, 0, 2, 0, 1, 0, 3, 2387 0, 1, 0, 2, 0, 1, 0, 4, 2388 0, 1, 0, 2, 0, 1, 0, 3, 2389 0, 1, 0, 2, 0, 1, 0, 5, 2390 0, 1, 0, 2, 0, 1, 0, 3, 2391 0, 1, 0, 2, 0, 1, 0, 4, 2392 0, 1, 0, 2, 0, 1, 0, 3, 2393 0, 1, 0, 2, 0, 1, 0, 8 2394 }; 2395 2396 void 2397 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2398 { 2399 /* 2400 * Now we also need to check the mapping array in a couple of ways. 2401 * 1) Did we move the cum-ack point? 2402 * 2403 * When you first glance at this you might think that all entries 2404 * that make up the position of the cum-ack would be in the 2405 * nr-mapping array only.. i.e. things up to the cum-ack are always 2406 * deliverable. Thats true with one exception, when its a fragmented 2407 * message we may not deliver the data until some threshold (or all 2408 * of it) is in place. So we must OR the nr_mapping_array and 2409 * mapping_array to get a true picture of the cum-ack. 2410 */ 2411 struct sctp_association *asoc; 2412 int at; 2413 uint8_t val; 2414 int slide_from, slide_end, lgap, distance; 2415 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2416 2417 asoc = &stcb->asoc; 2418 2419 old_cumack = asoc->cumulative_tsn; 2420 old_base = asoc->mapping_array_base_tsn; 2421 old_highest = asoc->highest_tsn_inside_map; 2422 /* 2423 * We could probably improve this a small bit by calculating the 2424 * offset of the current cum-ack as the starting point. 2425 */ 2426 at = 0; 2427 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2428 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2429 if (val == 0xff) { 2430 at += 8; 2431 } else { 2432 /* there is a 0 bit */ 2433 at += sctp_map_lookup_tab[val]; 2434 break; 2435 } 2436 } 2437 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2438 2439 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2440 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2441 #ifdef INVARIANTS 2442 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2443 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2444 #else 2445 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2446 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2447 sctp_print_mapping_array(asoc); 2448 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2449 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2450 } 2451 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2452 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2453 #endif 2454 } 2455 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2456 highest_tsn = asoc->highest_tsn_inside_nr_map; 2457 } else { 2458 highest_tsn = asoc->highest_tsn_inside_map; 2459 } 2460 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2461 /* The complete array was completed by a single FR */ 2462 /* highest becomes the cum-ack */ 2463 int clr; 2464 #ifdef INVARIANTS 2465 unsigned int i; 2466 #endif 2467 2468 /* clear the array */ 2469 clr = ((at + 7) >> 3); 2470 if (clr > asoc->mapping_array_size) { 2471 clr = asoc->mapping_array_size; 2472 } 2473 memset(asoc->mapping_array, 0, clr); 2474 memset(asoc->nr_mapping_array, 0, clr); 2475 #ifdef INVARIANTS 2476 for (i = 0; i < asoc->mapping_array_size; i++) { 2477 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2478 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2479 sctp_print_mapping_array(asoc); 2480 } 2481 } 2482 #endif 2483 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2484 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2485 } else if (at >= 8) { 2486 /* we can slide the mapping array down */ 2487 /* slide_from holds where we hit the first NON 0xff byte */ 2488 2489 /* 2490 * now calculate the ceiling of the move using our highest 2491 * TSN value 2492 */ 2493 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2494 slide_end = (lgap >> 3); 2495 if (slide_end < slide_from) { 2496 sctp_print_mapping_array(asoc); 2497 #ifdef INVARIANTS 2498 panic("impossible slide"); 2499 #else 2500 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2501 lgap, slide_end, slide_from, at); 2502 return; 2503 #endif 2504 } 2505 if (slide_end > asoc->mapping_array_size) { 2506 #ifdef INVARIANTS 2507 panic("would overrun buffer"); 2508 #else 2509 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2510 asoc->mapping_array_size, slide_end); 2511 slide_end = asoc->mapping_array_size; 2512 #endif 2513 } 2514 distance = (slide_end - slide_from) + 1; 2515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2516 sctp_log_map(old_base, old_cumack, old_highest, 2517 SCTP_MAP_PREPARE_SLIDE); 2518 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2519 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2520 } 2521 if (distance + slide_from > asoc->mapping_array_size || 2522 distance < 0) { 2523 /* 2524 * Here we do NOT slide forward the array so that 2525 * hopefully when more data comes in to fill it up 2526 * we will be able to slide it forward. Really I 2527 * don't think this should happen :-0 2528 */ 2529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2530 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2531 (uint32_t)asoc->mapping_array_size, 2532 SCTP_MAP_SLIDE_NONE); 2533 } 2534 } else { 2535 int ii; 2536 2537 for (ii = 0; ii < distance; ii++) { 2538 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2539 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2540 } 2541 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2542 asoc->mapping_array[ii] = 0; 2543 asoc->nr_mapping_array[ii] = 0; 2544 } 2545 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2546 asoc->highest_tsn_inside_map += (slide_from << 3); 2547 } 2548 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2549 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2550 } 2551 asoc->mapping_array_base_tsn += (slide_from << 3); 2552 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2553 sctp_log_map(asoc->mapping_array_base_tsn, 2554 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2555 SCTP_MAP_SLIDE_RESULT); 2556 } 2557 } 2558 } 2559 } 2560 2561 void 2562 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2563 { 2564 struct sctp_association *asoc; 2565 uint32_t highest_tsn; 2566 int is_a_gap; 2567 2568 sctp_slide_mapping_arrays(stcb); 2569 asoc = &stcb->asoc; 2570 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2571 highest_tsn = asoc->highest_tsn_inside_nr_map; 2572 } else { 2573 highest_tsn = asoc->highest_tsn_inside_map; 2574 } 2575 /* Is there a gap now? */ 2576 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2577 2578 /* 2579 * Now we need to see if we need to queue a sack or just start the 2580 * timer (if allowed). 2581 */ 2582 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2583 /* 2584 * Ok special case, in SHUTDOWN-SENT case. here we maker 2585 * sure SACK timer is off and instead send a SHUTDOWN and a 2586 * SACK 2587 */ 2588 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2589 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2590 stcb->sctp_ep, stcb, NULL, 2591 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2592 } 2593 sctp_send_shutdown(stcb, 2594 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2595 if (is_a_gap) { 2596 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2597 } 2598 } else { 2599 /* 2600 * CMT DAC algorithm: increase number of packets received 2601 * since last ack 2602 */ 2603 stcb->asoc.cmt_dac_pkts_rcvd++; 2604 2605 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2606 * SACK */ 2607 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2608 * longer is one */ 2609 (stcb->asoc.numduptsns) || /* we have dup's */ 2610 (is_a_gap) || /* is still a gap */ 2611 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2612 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */ 2613 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2614 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2615 (stcb->asoc.send_sack == 0) && 2616 (stcb->asoc.numduptsns == 0) && 2617 (stcb->asoc.delayed_ack) && 2618 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2619 /* 2620 * CMT DAC algorithm: With CMT, delay acks 2621 * even in the face of reordering. 2622 * Therefore, if acks that do not have to be 2623 * sent because of the above reasons, will 2624 * be delayed. That is, acks that would have 2625 * been sent due to gap reports will be 2626 * delayed with DAC. Start the delayed ack 2627 * timer. 2628 */ 2629 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2630 stcb->sctp_ep, stcb, NULL); 2631 } else { 2632 /* 2633 * Ok we must build a SACK since the timer 2634 * is pending, we got our first packet OR 2635 * there are gaps or duplicates. 2636 */ 2637 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 2638 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 2639 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2640 } 2641 } else { 2642 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2643 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2644 stcb->sctp_ep, stcb, NULL); 2645 } 2646 } 2647 } 2648 } 2649 2650 int 2651 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2652 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2653 struct sctp_nets *net, uint32_t *high_tsn) 2654 { 2655 struct sctp_chunkhdr *ch, chunk_buf; 2656 struct sctp_association *asoc; 2657 int num_chunks = 0; /* number of control chunks processed */ 2658 int stop_proc = 0; 2659 int break_flag, last_chunk; 2660 int abort_flag = 0, was_a_gap; 2661 struct mbuf *m; 2662 uint32_t highest_tsn; 2663 uint16_t chk_length; 2664 2665 /* set the rwnd */ 2666 sctp_set_rwnd(stcb, &stcb->asoc); 2667 2668 m = *mm; 2669 SCTP_TCB_LOCK_ASSERT(stcb); 2670 asoc = &stcb->asoc; 2671 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2672 highest_tsn = asoc->highest_tsn_inside_nr_map; 2673 } else { 2674 highest_tsn = asoc->highest_tsn_inside_map; 2675 } 2676 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2677 /* 2678 * setup where we got the last DATA packet from for any SACK that 2679 * may need to go out. Don't bump the net. This is done ONLY when a 2680 * chunk is assigned. 2681 */ 2682 asoc->last_data_chunk_from = net; 2683 2684 /*- 2685 * Now before we proceed we must figure out if this is a wasted 2686 * cluster... i.e. it is a small packet sent in and yet the driver 2687 * underneath allocated a full cluster for it. If so we must copy it 2688 * to a smaller mbuf and free up the cluster mbuf. This will help 2689 * with cluster starvation. 2690 */ 2691 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2692 /* we only handle mbufs that are singletons.. not chains */ 2693 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2694 if (m) { 2695 /* ok lets see if we can copy the data up */ 2696 caddr_t *from, *to; 2697 2698 /* get the pointers and copy */ 2699 to = mtod(m, caddr_t *); 2700 from = mtod((*mm), caddr_t *); 2701 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2702 /* copy the length and free up the old */ 2703 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2704 sctp_m_freem(*mm); 2705 /* success, back copy */ 2706 *mm = m; 2707 } else { 2708 /* We are in trouble in the mbuf world .. yikes */ 2709 m = *mm; 2710 } 2711 } 2712 /* get pointer to the first chunk header */ 2713 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2714 sizeof(struct sctp_chunkhdr), 2715 (uint8_t *)&chunk_buf); 2716 if (ch == NULL) { 2717 return (1); 2718 } 2719 /* 2720 * process all DATA chunks... 2721 */ 2722 *high_tsn = asoc->cumulative_tsn; 2723 break_flag = 0; 2724 asoc->data_pkts_seen++; 2725 while (stop_proc == 0) { 2726 /* validate chunk length */ 2727 chk_length = ntohs(ch->chunk_length); 2728 if (length - *offset < chk_length) { 2729 /* all done, mutulated chunk */ 2730 stop_proc = 1; 2731 continue; 2732 } 2733 if ((asoc->idata_supported == 1) && 2734 (ch->chunk_type == SCTP_DATA)) { 2735 struct mbuf *op_err; 2736 char msg[SCTP_DIAG_INFO_LEN]; 2737 2738 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2739 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2740 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 2741 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2742 return (2); 2743 } 2744 if ((asoc->idata_supported == 0) && 2745 (ch->chunk_type == SCTP_IDATA)) { 2746 struct mbuf *op_err; 2747 char msg[SCTP_DIAG_INFO_LEN]; 2748 2749 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2750 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2751 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22; 2752 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2753 return (2); 2754 } 2755 if ((ch->chunk_type == SCTP_DATA) || 2756 (ch->chunk_type == SCTP_IDATA)) { 2757 uint16_t clen; 2758 2759 if (ch->chunk_type == SCTP_DATA) { 2760 clen = sizeof(struct sctp_data_chunk); 2761 } else { 2762 clen = sizeof(struct sctp_idata_chunk); 2763 } 2764 if (chk_length < clen) { 2765 /* 2766 * Need to send an abort since we had a 2767 * invalid data chunk. 2768 */ 2769 struct mbuf *op_err; 2770 char msg[SCTP_DIAG_INFO_LEN]; 2771 2772 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u", 2773 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2774 chk_length); 2775 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2776 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23; 2777 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2778 return (2); 2779 } 2780 #ifdef SCTP_AUDITING_ENABLED 2781 sctp_audit_log(0xB1, 0); 2782 #endif 2783 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2784 last_chunk = 1; 2785 } else { 2786 last_chunk = 0; 2787 } 2788 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2789 chk_length, net, high_tsn, &abort_flag, &break_flag, 2790 last_chunk, ch->chunk_type)) { 2791 num_chunks++; 2792 } 2793 if (abort_flag) 2794 return (2); 2795 2796 if (break_flag) { 2797 /* 2798 * Set because of out of rwnd space and no 2799 * drop rep space left. 2800 */ 2801 stop_proc = 1; 2802 continue; 2803 } 2804 } else { 2805 /* not a data chunk in the data region */ 2806 switch (ch->chunk_type) { 2807 case SCTP_INITIATION: 2808 case SCTP_INITIATION_ACK: 2809 case SCTP_SELECTIVE_ACK: 2810 case SCTP_NR_SELECTIVE_ACK: 2811 case SCTP_HEARTBEAT_REQUEST: 2812 case SCTP_HEARTBEAT_ACK: 2813 case SCTP_ABORT_ASSOCIATION: 2814 case SCTP_SHUTDOWN: 2815 case SCTP_SHUTDOWN_ACK: 2816 case SCTP_OPERATION_ERROR: 2817 case SCTP_COOKIE_ECHO: 2818 case SCTP_COOKIE_ACK: 2819 case SCTP_ECN_ECHO: 2820 case SCTP_ECN_CWR: 2821 case SCTP_SHUTDOWN_COMPLETE: 2822 case SCTP_AUTHENTICATION: 2823 case SCTP_ASCONF_ACK: 2824 case SCTP_PACKET_DROPPED: 2825 case SCTP_STREAM_RESET: 2826 case SCTP_FORWARD_CUM_TSN: 2827 case SCTP_ASCONF: 2828 { 2829 /* 2830 * Now, what do we do with KNOWN 2831 * chunks that are NOT in the right 2832 * place? 2833 * 2834 * For now, I do nothing but ignore 2835 * them. We may later want to add 2836 * sysctl stuff to switch out and do 2837 * either an ABORT() or possibly 2838 * process them. 2839 */ 2840 struct mbuf *op_err; 2841 char msg[SCTP_DIAG_INFO_LEN]; 2842 2843 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2844 ch->chunk_type); 2845 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2846 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2847 return (2); 2848 } 2849 default: 2850 /* 2851 * Unknown chunk type: use bit rules after 2852 * checking length 2853 */ 2854 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2855 /* 2856 * Need to send an abort since we 2857 * had a invalid chunk. 2858 */ 2859 struct mbuf *op_err; 2860 char msg[SCTP_DIAG_INFO_LEN]; 2861 2862 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length); 2863 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2864 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 2865 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2866 return (2); 2867 } 2868 if (ch->chunk_type & 0x40) { 2869 /* Add a error report to the queue */ 2870 struct mbuf *op_err; 2871 struct sctp_gen_error_cause *cause; 2872 2873 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2874 0, M_NOWAIT, 1, MT_DATA); 2875 if (op_err != NULL) { 2876 cause = mtod(op_err, struct sctp_gen_error_cause *); 2877 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2878 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2879 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2880 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2881 if (SCTP_BUF_NEXT(op_err) != NULL) { 2882 sctp_queue_op_err(stcb, op_err); 2883 } else { 2884 sctp_m_freem(op_err); 2885 } 2886 } 2887 } 2888 if ((ch->chunk_type & 0x80) == 0) { 2889 /* discard the rest of this packet */ 2890 stop_proc = 1; 2891 } /* else skip this bad chunk and 2892 * continue... */ 2893 break; 2894 } /* switch of chunk type */ 2895 } 2896 *offset += SCTP_SIZE32(chk_length); 2897 if ((*offset >= length) || stop_proc) { 2898 /* no more data left in the mbuf chain */ 2899 stop_proc = 1; 2900 continue; 2901 } 2902 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2903 sizeof(struct sctp_chunkhdr), 2904 (uint8_t *)&chunk_buf); 2905 if (ch == NULL) { 2906 *offset = length; 2907 stop_proc = 1; 2908 continue; 2909 } 2910 } 2911 if (break_flag) { 2912 /* 2913 * we need to report rwnd overrun drops. 2914 */ 2915 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2916 } 2917 if (num_chunks) { 2918 /* 2919 * Did we get data, if so update the time for auto-close and 2920 * give peer credit for being alive. 2921 */ 2922 SCTP_STAT_INCR(sctps_recvpktwithdata); 2923 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2924 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2925 stcb->asoc.overall_error_count, 2926 0, 2927 SCTP_FROM_SCTP_INDATA, 2928 __LINE__); 2929 } 2930 stcb->asoc.overall_error_count = 0; 2931 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2932 } 2933 /* now service all of the reassm queue if needed */ 2934 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2935 /* Assure that we ack right away */ 2936 stcb->asoc.send_sack = 1; 2937 } 2938 /* Start a sack timer or QUEUE a SACK for sending */ 2939 sctp_sack_check(stcb, was_a_gap); 2940 return (0); 2941 } 2942 2943 static int 2944 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2945 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2946 int *num_frs, 2947 uint32_t *biggest_newly_acked_tsn, 2948 uint32_t *this_sack_lowest_newack, 2949 int *rto_ok) 2950 { 2951 struct sctp_tmit_chunk *tp1; 2952 unsigned int theTSN; 2953 int j, wake_him = 0, circled = 0; 2954 2955 /* Recover the tp1 we last saw */ 2956 tp1 = *p_tp1; 2957 if (tp1 == NULL) { 2958 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2959 } 2960 for (j = frag_strt; j <= frag_end; j++) { 2961 theTSN = j + last_tsn; 2962 while (tp1) { 2963 if (tp1->rec.data.doing_fast_retransmit) 2964 (*num_frs) += 1; 2965 2966 /*- 2967 * CMT: CUCv2 algorithm. For each TSN being 2968 * processed from the sent queue, track the 2969 * next expected pseudo-cumack, or 2970 * rtx_pseudo_cumack, if required. Separate 2971 * cumack trackers for first transmissions, 2972 * and retransmissions. 2973 */ 2974 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2975 (tp1->whoTo->find_pseudo_cumack == 1) && 2976 (tp1->snd_count == 1)) { 2977 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 2978 tp1->whoTo->find_pseudo_cumack = 0; 2979 } 2980 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2981 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2982 (tp1->snd_count > 1)) { 2983 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 2984 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2985 } 2986 if (tp1->rec.data.tsn == theTSN) { 2987 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2988 /*- 2989 * must be held until 2990 * cum-ack passes 2991 */ 2992 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2993 /*- 2994 * If it is less than RESEND, it is 2995 * now no-longer in flight. 2996 * Higher values may already be set 2997 * via previous Gap Ack Blocks... 2998 * i.e. ACKED or RESEND. 2999 */ 3000 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3001 *biggest_newly_acked_tsn)) { 3002 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 3003 } 3004 /*- 3005 * CMT: SFR algo (and HTNA) - set 3006 * saw_newack to 1 for dest being 3007 * newly acked. update 3008 * this_sack_highest_newack if 3009 * appropriate. 3010 */ 3011 if (tp1->rec.data.chunk_was_revoked == 0) 3012 tp1->whoTo->saw_newack = 1; 3013 3014 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3015 tp1->whoTo->this_sack_highest_newack)) { 3016 tp1->whoTo->this_sack_highest_newack = 3017 tp1->rec.data.tsn; 3018 } 3019 /*- 3020 * CMT DAC algo: also update 3021 * this_sack_lowest_newack 3022 */ 3023 if (*this_sack_lowest_newack == 0) { 3024 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3025 sctp_log_sack(*this_sack_lowest_newack, 3026 last_tsn, 3027 tp1->rec.data.tsn, 3028 0, 3029 0, 3030 SCTP_LOG_TSN_ACKED); 3031 } 3032 *this_sack_lowest_newack = tp1->rec.data.tsn; 3033 } 3034 /*- 3035 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3036 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3037 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3038 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3039 * Separate pseudo_cumack trackers for first transmissions and 3040 * retransmissions. 3041 */ 3042 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3043 if (tp1->rec.data.chunk_was_revoked == 0) { 3044 tp1->whoTo->new_pseudo_cumack = 1; 3045 } 3046 tp1->whoTo->find_pseudo_cumack = 1; 3047 } 3048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3049 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3050 } 3051 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3052 if (tp1->rec.data.chunk_was_revoked == 0) { 3053 tp1->whoTo->new_pseudo_cumack = 1; 3054 } 3055 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3056 } 3057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3058 sctp_log_sack(*biggest_newly_acked_tsn, 3059 last_tsn, 3060 tp1->rec.data.tsn, 3061 frag_strt, 3062 frag_end, 3063 SCTP_LOG_TSN_ACKED); 3064 } 3065 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3066 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3067 tp1->whoTo->flight_size, 3068 tp1->book_size, 3069 (uint32_t)(uintptr_t)tp1->whoTo, 3070 tp1->rec.data.tsn); 3071 } 3072 sctp_flight_size_decrease(tp1); 3073 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3074 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3075 tp1); 3076 } 3077 sctp_total_flight_decrease(stcb, tp1); 3078 3079 tp1->whoTo->net_ack += tp1->send_size; 3080 if (tp1->snd_count < 2) { 3081 /*- 3082 * True non-retransmitted chunk 3083 */ 3084 tp1->whoTo->net_ack2 += tp1->send_size; 3085 3086 /*- 3087 * update RTO too ? 3088 */ 3089 if (tp1->do_rtt) { 3090 if (*rto_ok && 3091 sctp_calculate_rto(stcb, 3092 &stcb->asoc, 3093 tp1->whoTo, 3094 &tp1->sent_rcv_time, 3095 SCTP_RTT_FROM_DATA)) { 3096 *rto_ok = 0; 3097 } 3098 if (tp1->whoTo->rto_needed == 0) { 3099 tp1->whoTo->rto_needed = 1; 3100 } 3101 tp1->do_rtt = 0; 3102 } 3103 } 3104 } 3105 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3106 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3107 stcb->asoc.this_sack_highest_gap)) { 3108 stcb->asoc.this_sack_highest_gap = 3109 tp1->rec.data.tsn; 3110 } 3111 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3112 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3113 #ifdef SCTP_AUDITING_ENABLED 3114 sctp_audit_log(0xB2, 3115 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3116 #endif 3117 } 3118 } 3119 /*- 3120 * All chunks NOT UNSENT fall through here and are marked 3121 * (leave PR-SCTP ones that are to skip alone though) 3122 */ 3123 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3124 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3125 tp1->sent = SCTP_DATAGRAM_MARKED; 3126 } 3127 if (tp1->rec.data.chunk_was_revoked) { 3128 /* deflate the cwnd */ 3129 tp1->whoTo->cwnd -= tp1->book_size; 3130 tp1->rec.data.chunk_was_revoked = 0; 3131 } 3132 /* NR Sack code here */ 3133 if (nr_sacking && 3134 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3135 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3136 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3137 #ifdef INVARIANTS 3138 } else { 3139 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3140 #endif 3141 } 3142 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3143 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3144 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3145 stcb->asoc.trigger_reset = 1; 3146 } 3147 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3148 if (tp1->data) { 3149 /* 3150 * sa_ignore 3151 * NO_NULL_CHK 3152 */ 3153 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3154 sctp_m_freem(tp1->data); 3155 tp1->data = NULL; 3156 } 3157 wake_him++; 3158 } 3159 } 3160 break; 3161 } /* if (tp1->tsn == theTSN) */ 3162 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3163 break; 3164 } 3165 tp1 = TAILQ_NEXT(tp1, sctp_next); 3166 if ((tp1 == NULL) && (circled == 0)) { 3167 circled++; 3168 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3169 } 3170 } /* end while (tp1) */ 3171 if (tp1 == NULL) { 3172 circled = 0; 3173 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3174 } 3175 /* In case the fragments were not in order we must reset */ 3176 } /* end for (j = fragStart */ 3177 *p_tp1 = tp1; 3178 return (wake_him); /* Return value only used for nr-sack */ 3179 } 3180 3181 static int 3182 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3183 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3184 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3185 int num_seg, int num_nr_seg, int *rto_ok) 3186 { 3187 struct sctp_gap_ack_block *frag, block; 3188 struct sctp_tmit_chunk *tp1; 3189 int i; 3190 int num_frs = 0; 3191 int chunk_freed; 3192 int non_revocable; 3193 uint16_t frag_strt, frag_end, prev_frag_end; 3194 3195 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3196 prev_frag_end = 0; 3197 chunk_freed = 0; 3198 3199 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3200 if (i == num_seg) { 3201 prev_frag_end = 0; 3202 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3203 } 3204 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3205 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3206 *offset += sizeof(block); 3207 if (frag == NULL) { 3208 return (chunk_freed); 3209 } 3210 frag_strt = ntohs(frag->start); 3211 frag_end = ntohs(frag->end); 3212 3213 if (frag_strt > frag_end) { 3214 /* This gap report is malformed, skip it. */ 3215 continue; 3216 } 3217 if (frag_strt <= prev_frag_end) { 3218 /* This gap report is not in order, so restart. */ 3219 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3220 } 3221 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3222 *biggest_tsn_acked = last_tsn + frag_end; 3223 } 3224 if (i < num_seg) { 3225 non_revocable = 0; 3226 } else { 3227 non_revocable = 1; 3228 } 3229 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3230 non_revocable, &num_frs, biggest_newly_acked_tsn, 3231 this_sack_lowest_newack, rto_ok)) { 3232 chunk_freed = 1; 3233 } 3234 prev_frag_end = frag_end; 3235 } 3236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3237 if (num_frs) 3238 sctp_log_fr(*biggest_tsn_acked, 3239 *biggest_newly_acked_tsn, 3240 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3241 } 3242 return (chunk_freed); 3243 } 3244 3245 static void 3246 sctp_check_for_revoked(struct sctp_tcb *stcb, 3247 struct sctp_association *asoc, uint32_t cumack, 3248 uint32_t biggest_tsn_acked) 3249 { 3250 struct sctp_tmit_chunk *tp1; 3251 3252 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3253 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3254 /* 3255 * ok this guy is either ACK or MARKED. If it is 3256 * ACKED it has been previously acked but not this 3257 * time i.e. revoked. If it is MARKED it was ACK'ed 3258 * again. 3259 */ 3260 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3261 break; 3262 } 3263 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3264 /* it has been revoked */ 3265 tp1->sent = SCTP_DATAGRAM_SENT; 3266 tp1->rec.data.chunk_was_revoked = 1; 3267 /* 3268 * We must add this stuff back in to assure 3269 * timers and such get started. 3270 */ 3271 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3272 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3273 tp1->whoTo->flight_size, 3274 tp1->book_size, 3275 (uint32_t)(uintptr_t)tp1->whoTo, 3276 tp1->rec.data.tsn); 3277 } 3278 sctp_flight_size_increase(tp1); 3279 sctp_total_flight_increase(stcb, tp1); 3280 /* 3281 * We inflate the cwnd to compensate for our 3282 * artificial inflation of the flight_size. 3283 */ 3284 tp1->whoTo->cwnd += tp1->book_size; 3285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3286 sctp_log_sack(asoc->last_acked_seq, 3287 cumack, 3288 tp1->rec.data.tsn, 3289 0, 3290 0, 3291 SCTP_LOG_TSN_REVOKED); 3292 } 3293 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3294 /* it has been re-acked in this SACK */ 3295 tp1->sent = SCTP_DATAGRAM_ACKED; 3296 } 3297 } 3298 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3299 break; 3300 } 3301 } 3302 3303 static void 3304 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3305 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3306 { 3307 struct sctp_tmit_chunk *tp1; 3308 int strike_flag = 0; 3309 struct timeval now; 3310 uint32_t sending_seq; 3311 struct sctp_nets *net; 3312 int num_dests_sacked = 0; 3313 3314 /* 3315 * select the sending_seq, this is either the next thing ready to be 3316 * sent but not transmitted, OR, the next seq we assign. 3317 */ 3318 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3319 if (tp1 == NULL) { 3320 sending_seq = asoc->sending_seq; 3321 } else { 3322 sending_seq = tp1->rec.data.tsn; 3323 } 3324 3325 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3326 if ((asoc->sctp_cmt_on_off > 0) && 3327 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3328 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3329 if (net->saw_newack) 3330 num_dests_sacked++; 3331 } 3332 } 3333 if (stcb->asoc.prsctp_supported) { 3334 (void)SCTP_GETTIME_TIMEVAL(&now); 3335 } 3336 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3337 strike_flag = 0; 3338 if (tp1->no_fr_allowed) { 3339 /* this one had a timeout or something */ 3340 continue; 3341 } 3342 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3343 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3344 sctp_log_fr(biggest_tsn_newly_acked, 3345 tp1->rec.data.tsn, 3346 tp1->sent, 3347 SCTP_FR_LOG_CHECK_STRIKE); 3348 } 3349 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3350 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3351 /* done */ 3352 break; 3353 } 3354 if (stcb->asoc.prsctp_supported) { 3355 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3356 /* Is it expired? */ 3357 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3358 /* Yes so drop it */ 3359 if (tp1->data != NULL) { 3360 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3361 SCTP_SO_NOT_LOCKED); 3362 } 3363 continue; 3364 } 3365 } 3366 } 3367 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3368 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3369 /* we are beyond the tsn in the sack */ 3370 break; 3371 } 3372 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3373 /* either a RESEND, ACKED, or MARKED */ 3374 /* skip */ 3375 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3376 /* Continue strikin FWD-TSN chunks */ 3377 tp1->rec.data.fwd_tsn_cnt++; 3378 } 3379 continue; 3380 } 3381 /* 3382 * CMT : SFR algo (covers part of DAC and HTNA as well) 3383 */ 3384 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3385 /* 3386 * No new acks were received for data sent to this 3387 * dest. Therefore, according to the SFR algo for 3388 * CMT, no data sent to this dest can be marked for 3389 * FR using this SACK. 3390 */ 3391 continue; 3392 } else if (tp1->whoTo && 3393 SCTP_TSN_GT(tp1->rec.data.tsn, 3394 tp1->whoTo->this_sack_highest_newack) && 3395 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3396 /* 3397 * CMT: New acks were received for data sent to this 3398 * dest. But no new acks were seen for data sent 3399 * after tp1. Therefore, according to the SFR algo 3400 * for CMT, tp1 cannot be marked for FR using this 3401 * SACK. This step covers part of the DAC algo and 3402 * the HTNA algo as well. 3403 */ 3404 continue; 3405 } 3406 /* 3407 * Here we check to see if we were have already done a FR 3408 * and if so we see if the biggest TSN we saw in the sack is 3409 * smaller than the recovery point. If so we don't strike 3410 * the tsn... otherwise we CAN strike the TSN. 3411 */ 3412 /* 3413 * @@@ JRI: Check for CMT if (accum_moved && 3414 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3415 * 0)) { 3416 */ 3417 if (accum_moved && asoc->fast_retran_loss_recovery) { 3418 /* 3419 * Strike the TSN if in fast-recovery and cum-ack 3420 * moved. 3421 */ 3422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3423 sctp_log_fr(biggest_tsn_newly_acked, 3424 tp1->rec.data.tsn, 3425 tp1->sent, 3426 SCTP_FR_LOG_STRIKE_CHUNK); 3427 } 3428 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3429 tp1->sent++; 3430 } 3431 if ((asoc->sctp_cmt_on_off > 0) && 3432 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3433 /* 3434 * CMT DAC algorithm: If SACK flag is set to 3435 * 0, then lowest_newack test will not pass 3436 * because it would have been set to the 3437 * cumack earlier. If not already to be 3438 * rtx'd, If not a mixed sack and if tp1 is 3439 * not between two sacked TSNs, then mark by 3440 * one more. NOTE that we are marking by one 3441 * additional time since the SACK DAC flag 3442 * indicates that two packets have been 3443 * received after this missing TSN. 3444 */ 3445 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3446 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3448 sctp_log_fr(16 + num_dests_sacked, 3449 tp1->rec.data.tsn, 3450 tp1->sent, 3451 SCTP_FR_LOG_STRIKE_CHUNK); 3452 } 3453 tp1->sent++; 3454 } 3455 } 3456 } else if ((tp1->rec.data.doing_fast_retransmit) && 3457 (asoc->sctp_cmt_on_off == 0)) { 3458 /* 3459 * For those that have done a FR we must take 3460 * special consideration if we strike. I.e the 3461 * biggest_newly_acked must be higher than the 3462 * sending_seq at the time we did the FR. 3463 */ 3464 if ( 3465 #ifdef SCTP_FR_TO_ALTERNATE 3466 /* 3467 * If FR's go to new networks, then we must only do 3468 * this for singly homed asoc's. However if the FR's 3469 * go to the same network (Armando's work) then its 3470 * ok to FR multiple times. 3471 */ 3472 (asoc->numnets < 2) 3473 #else 3474 (1) 3475 #endif 3476 ) { 3477 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3478 tp1->rec.data.fast_retran_tsn)) { 3479 /* 3480 * Strike the TSN, since this ack is 3481 * beyond where things were when we 3482 * did a FR. 3483 */ 3484 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3485 sctp_log_fr(biggest_tsn_newly_acked, 3486 tp1->rec.data.tsn, 3487 tp1->sent, 3488 SCTP_FR_LOG_STRIKE_CHUNK); 3489 } 3490 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3491 tp1->sent++; 3492 } 3493 strike_flag = 1; 3494 if ((asoc->sctp_cmt_on_off > 0) && 3495 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3496 /* 3497 * CMT DAC algorithm: If 3498 * SACK flag is set to 0, 3499 * then lowest_newack test 3500 * will not pass because it 3501 * would have been set to 3502 * the cumack earlier. If 3503 * not already to be rtx'd, 3504 * If not a mixed sack and 3505 * if tp1 is not between two 3506 * sacked TSNs, then mark by 3507 * one more. NOTE that we 3508 * are marking by one 3509 * additional time since the 3510 * SACK DAC flag indicates 3511 * that two packets have 3512 * been received after this 3513 * missing TSN. 3514 */ 3515 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3516 (num_dests_sacked == 1) && 3517 SCTP_TSN_GT(this_sack_lowest_newack, 3518 tp1->rec.data.tsn)) { 3519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3520 sctp_log_fr(32 + num_dests_sacked, 3521 tp1->rec.data.tsn, 3522 tp1->sent, 3523 SCTP_FR_LOG_STRIKE_CHUNK); 3524 } 3525 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3526 tp1->sent++; 3527 } 3528 } 3529 } 3530 } 3531 } 3532 /* 3533 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3534 * algo covers HTNA. 3535 */ 3536 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3537 biggest_tsn_newly_acked)) { 3538 /* 3539 * We don't strike these: This is the HTNA 3540 * algorithm i.e. we don't strike If our TSN is 3541 * larger than the Highest TSN Newly Acked. 3542 */ 3543 ; 3544 } else { 3545 /* Strike the TSN */ 3546 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3547 sctp_log_fr(biggest_tsn_newly_acked, 3548 tp1->rec.data.tsn, 3549 tp1->sent, 3550 SCTP_FR_LOG_STRIKE_CHUNK); 3551 } 3552 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3553 tp1->sent++; 3554 } 3555 if ((asoc->sctp_cmt_on_off > 0) && 3556 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3557 /* 3558 * CMT DAC algorithm: If SACK flag is set to 3559 * 0, then lowest_newack test will not pass 3560 * because it would have been set to the 3561 * cumack earlier. If not already to be 3562 * rtx'd, If not a mixed sack and if tp1 is 3563 * not between two sacked TSNs, then mark by 3564 * one more. NOTE that we are marking by one 3565 * additional time since the SACK DAC flag 3566 * indicates that two packets have been 3567 * received after this missing TSN. 3568 */ 3569 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3570 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3572 sctp_log_fr(48 + num_dests_sacked, 3573 tp1->rec.data.tsn, 3574 tp1->sent, 3575 SCTP_FR_LOG_STRIKE_CHUNK); 3576 } 3577 tp1->sent++; 3578 } 3579 } 3580 } 3581 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3582 struct sctp_nets *alt; 3583 3584 /* fix counts and things */ 3585 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3586 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3587 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3588 tp1->book_size, 3589 (uint32_t)(uintptr_t)tp1->whoTo, 3590 tp1->rec.data.tsn); 3591 } 3592 if (tp1->whoTo) { 3593 tp1->whoTo->net_ack++; 3594 sctp_flight_size_decrease(tp1); 3595 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3596 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3597 tp1); 3598 } 3599 } 3600 3601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3602 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3603 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3604 } 3605 /* add back to the rwnd */ 3606 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3607 3608 /* remove from the total flight */ 3609 sctp_total_flight_decrease(stcb, tp1); 3610 3611 if ((stcb->asoc.prsctp_supported) && 3612 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3613 /* 3614 * Has it been retransmitted tv_sec times? - 3615 * we store the retran count there. 3616 */ 3617 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3618 /* Yes, so drop it */ 3619 if (tp1->data != NULL) { 3620 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3621 SCTP_SO_NOT_LOCKED); 3622 } 3623 /* Make sure to flag we had a FR */ 3624 if (tp1->whoTo != NULL) { 3625 tp1->whoTo->net_ack++; 3626 } 3627 continue; 3628 } 3629 } 3630 /* 3631 * SCTP_PRINTF("OK, we are now ready to FR this 3632 * guy\n"); 3633 */ 3634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3635 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3636 0, SCTP_FR_MARKED); 3637 } 3638 if (strike_flag) { 3639 /* This is a subsequent FR */ 3640 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3641 } 3642 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3643 if (asoc->sctp_cmt_on_off > 0) { 3644 /* 3645 * CMT: Using RTX_SSTHRESH policy for CMT. 3646 * If CMT is being used, then pick dest with 3647 * largest ssthresh for any retransmission. 3648 */ 3649 tp1->no_fr_allowed = 1; 3650 alt = tp1->whoTo; 3651 /* sa_ignore NO_NULL_CHK */ 3652 if (asoc->sctp_cmt_pf > 0) { 3653 /* 3654 * JRS 5/18/07 - If CMT PF is on, 3655 * use the PF version of 3656 * find_alt_net() 3657 */ 3658 alt = sctp_find_alternate_net(stcb, alt, 2); 3659 } else { 3660 /* 3661 * JRS 5/18/07 - If only CMT is on, 3662 * use the CMT version of 3663 * find_alt_net() 3664 */ 3665 /* sa_ignore NO_NULL_CHK */ 3666 alt = sctp_find_alternate_net(stcb, alt, 1); 3667 } 3668 if (alt == NULL) { 3669 alt = tp1->whoTo; 3670 } 3671 /* 3672 * CUCv2: If a different dest is picked for 3673 * the retransmission, then new 3674 * (rtx-)pseudo_cumack needs to be tracked 3675 * for orig dest. Let CUCv2 track new (rtx-) 3676 * pseudo-cumack always. 3677 */ 3678 if (tp1->whoTo) { 3679 tp1->whoTo->find_pseudo_cumack = 1; 3680 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3681 } 3682 } else { /* CMT is OFF */ 3683 #ifdef SCTP_FR_TO_ALTERNATE 3684 /* Can we find an alternate? */ 3685 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3686 #else 3687 /* 3688 * default behavior is to NOT retransmit 3689 * FR's to an alternate. Armando Caro's 3690 * paper details why. 3691 */ 3692 alt = tp1->whoTo; 3693 #endif 3694 } 3695 3696 tp1->rec.data.doing_fast_retransmit = 1; 3697 /* mark the sending seq for possible subsequent FR's */ 3698 /* 3699 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3700 * (uint32_t)tpi->rec.data.tsn); 3701 */ 3702 if (TAILQ_EMPTY(&asoc->send_queue)) { 3703 /* 3704 * If the queue of send is empty then its 3705 * the next sequence number that will be 3706 * assigned so we subtract one from this to 3707 * get the one we last sent. 3708 */ 3709 tp1->rec.data.fast_retran_tsn = sending_seq; 3710 } else { 3711 /* 3712 * If there are chunks on the send queue 3713 * (unsent data that has made it from the 3714 * stream queues but not out the door, we 3715 * take the first one (which will have the 3716 * lowest TSN) and subtract one to get the 3717 * one we last sent. 3718 */ 3719 struct sctp_tmit_chunk *ttt; 3720 3721 ttt = TAILQ_FIRST(&asoc->send_queue); 3722 tp1->rec.data.fast_retran_tsn = 3723 ttt->rec.data.tsn; 3724 } 3725 3726 if (tp1->do_rtt) { 3727 /* 3728 * this guy had a RTO calculation pending on 3729 * it, cancel it 3730 */ 3731 if ((tp1->whoTo != NULL) && 3732 (tp1->whoTo->rto_needed == 0)) { 3733 tp1->whoTo->rto_needed = 1; 3734 } 3735 tp1->do_rtt = 0; 3736 } 3737 if (alt != tp1->whoTo) { 3738 /* yes, there is an alternate. */ 3739 sctp_free_remote_addr(tp1->whoTo); 3740 /* sa_ignore FREED_MEMORY */ 3741 tp1->whoTo = alt; 3742 atomic_add_int(&alt->ref_count, 1); 3743 } 3744 } 3745 } 3746 } 3747 3748 struct sctp_tmit_chunk * 3749 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3750 struct sctp_association *asoc) 3751 { 3752 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3753 struct timeval now; 3754 int now_filled = 0; 3755 3756 if (asoc->prsctp_supported == 0) { 3757 return (NULL); 3758 } 3759 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3760 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3761 tp1->sent != SCTP_DATAGRAM_RESEND && 3762 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3763 /* no chance to advance, out of here */ 3764 break; 3765 } 3766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3767 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3768 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3769 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3770 asoc->advanced_peer_ack_point, 3771 tp1->rec.data.tsn, 0, 0); 3772 } 3773 } 3774 if (!PR_SCTP_ENABLED(tp1->flags)) { 3775 /* 3776 * We can't fwd-tsn past any that are reliable aka 3777 * retransmitted until the asoc fails. 3778 */ 3779 break; 3780 } 3781 if (!now_filled) { 3782 (void)SCTP_GETTIME_TIMEVAL(&now); 3783 now_filled = 1; 3784 } 3785 /* 3786 * now we got a chunk which is marked for another 3787 * retransmission to a PR-stream but has run out its chances 3788 * already maybe OR has been marked to skip now. Can we skip 3789 * it if its a resend? 3790 */ 3791 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3792 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3793 /* 3794 * Now is this one marked for resend and its time is 3795 * now up? 3796 */ 3797 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3798 /* Yes so drop it */ 3799 if (tp1->data) { 3800 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3801 1, SCTP_SO_NOT_LOCKED); 3802 } 3803 } else { 3804 /* 3805 * No, we are done when hit one for resend 3806 * whos time as not expired. 3807 */ 3808 break; 3809 } 3810 } 3811 /* 3812 * Ok now if this chunk is marked to drop it we can clean up 3813 * the chunk, advance our peer ack point and we can check 3814 * the next chunk. 3815 */ 3816 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3817 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3818 /* advance PeerAckPoint goes forward */ 3819 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3820 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3821 a_adv = tp1; 3822 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3823 /* No update but we do save the chk */ 3824 a_adv = tp1; 3825 } 3826 } else { 3827 /* 3828 * If it is still in RESEND we can advance no 3829 * further 3830 */ 3831 break; 3832 } 3833 } 3834 return (a_adv); 3835 } 3836 3837 static int 3838 sctp_fs_audit(struct sctp_association *asoc) 3839 { 3840 struct sctp_tmit_chunk *chk; 3841 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3842 int ret; 3843 #ifndef INVARIANTS 3844 int entry_flight, entry_cnt; 3845 #endif 3846 3847 ret = 0; 3848 #ifndef INVARIANTS 3849 entry_flight = asoc->total_flight; 3850 entry_cnt = asoc->total_flight_count; 3851 #endif 3852 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3853 return (0); 3854 3855 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3856 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3857 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3858 chk->rec.data.tsn, 3859 chk->send_size, 3860 chk->snd_count); 3861 inflight++; 3862 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3863 resend++; 3864 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3865 inbetween++; 3866 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3867 above++; 3868 } else { 3869 acked++; 3870 } 3871 } 3872 3873 if ((inflight > 0) || (inbetween > 0)) { 3874 #ifdef INVARIANTS 3875 panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d", 3876 inflight, inbetween, resend, above, acked); 3877 #else 3878 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3879 entry_flight, entry_cnt); 3880 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3881 inflight, inbetween, resend, above, acked); 3882 ret = 1; 3883 #endif 3884 } 3885 return (ret); 3886 } 3887 3888 static void 3889 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3890 struct sctp_association *asoc, 3891 struct sctp_tmit_chunk *tp1) 3892 { 3893 tp1->window_probe = 0; 3894 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3895 /* TSN's skipped we do NOT move back. */ 3896 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3897 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3898 tp1->book_size, 3899 (uint32_t)(uintptr_t)tp1->whoTo, 3900 tp1->rec.data.tsn); 3901 return; 3902 } 3903 /* First setup this by shrinking flight */ 3904 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3905 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3906 tp1); 3907 } 3908 sctp_flight_size_decrease(tp1); 3909 sctp_total_flight_decrease(stcb, tp1); 3910 /* Now mark for resend */ 3911 tp1->sent = SCTP_DATAGRAM_RESEND; 3912 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3913 3914 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3915 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3916 tp1->whoTo->flight_size, 3917 tp1->book_size, 3918 (uint32_t)(uintptr_t)tp1->whoTo, 3919 tp1->rec.data.tsn); 3920 } 3921 } 3922 3923 void 3924 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3925 uint32_t rwnd, int *abort_now, int ecne_seen) 3926 { 3927 struct sctp_nets *net; 3928 struct sctp_association *asoc; 3929 struct sctp_tmit_chunk *tp1, *tp2; 3930 uint32_t old_rwnd; 3931 int win_probe_recovery = 0; 3932 int win_probe_recovered = 0; 3933 int j, done_once = 0; 3934 int rto_ok = 1; 3935 uint32_t send_s; 3936 3937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3938 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3939 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3940 } 3941 SCTP_TCB_LOCK_ASSERT(stcb); 3942 #ifdef SCTP_ASOCLOG_OF_TSNS 3943 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3944 stcb->asoc.cumack_log_at++; 3945 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3946 stcb->asoc.cumack_log_at = 0; 3947 } 3948 #endif 3949 asoc = &stcb->asoc; 3950 old_rwnd = asoc->peers_rwnd; 3951 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3952 /* old ack */ 3953 return; 3954 } else if (asoc->last_acked_seq == cumack) { 3955 /* Window update sack */ 3956 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3957 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3958 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3959 /* SWS sender side engages */ 3960 asoc->peers_rwnd = 0; 3961 } 3962 if (asoc->peers_rwnd > old_rwnd) { 3963 goto again; 3964 } 3965 return; 3966 } 3967 3968 /* First setup for CC stuff */ 3969 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3970 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3971 /* Drag along the window_tsn for cwr's */ 3972 net->cwr_window_tsn = cumack; 3973 } 3974 net->prev_cwnd = net->cwnd; 3975 net->net_ack = 0; 3976 net->net_ack2 = 0; 3977 3978 /* 3979 * CMT: Reset CUC and Fast recovery algo variables before 3980 * SACK processing 3981 */ 3982 net->new_pseudo_cumack = 0; 3983 net->will_exit_fast_recovery = 0; 3984 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3985 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3986 } 3987 } 3988 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3989 tp1 = TAILQ_LAST(&asoc->sent_queue, 3990 sctpchunk_listhead); 3991 send_s = tp1->rec.data.tsn + 1; 3992 } else { 3993 send_s = asoc->sending_seq; 3994 } 3995 if (SCTP_TSN_GE(cumack, send_s)) { 3996 struct mbuf *op_err; 3997 char msg[SCTP_DIAG_INFO_LEN]; 3998 3999 *abort_now = 1; 4000 /* XXX */ 4001 SCTP_SNPRINTF(msg, sizeof(msg), 4002 "Cum ack %8.8x greater or equal than TSN %8.8x", 4003 cumack, send_s); 4004 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4005 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4006 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4007 return; 4008 } 4009 asoc->this_sack_highest_gap = cumack; 4010 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4011 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4012 stcb->asoc.overall_error_count, 4013 0, 4014 SCTP_FROM_SCTP_INDATA, 4015 __LINE__); 4016 } 4017 stcb->asoc.overall_error_count = 0; 4018 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4019 /* process the new consecutive TSN first */ 4020 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4021 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4022 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4023 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4024 } 4025 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4026 /* 4027 * If it is less than ACKED, it is 4028 * now no-longer in flight. Higher 4029 * values may occur during marking 4030 */ 4031 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4032 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4033 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4034 tp1->whoTo->flight_size, 4035 tp1->book_size, 4036 (uint32_t)(uintptr_t)tp1->whoTo, 4037 tp1->rec.data.tsn); 4038 } 4039 sctp_flight_size_decrease(tp1); 4040 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4041 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4042 tp1); 4043 } 4044 /* sa_ignore NO_NULL_CHK */ 4045 sctp_total_flight_decrease(stcb, tp1); 4046 } 4047 tp1->whoTo->net_ack += tp1->send_size; 4048 if (tp1->snd_count < 2) { 4049 /* 4050 * True non-retransmitted 4051 * chunk 4052 */ 4053 tp1->whoTo->net_ack2 += 4054 tp1->send_size; 4055 4056 /* update RTO too? */ 4057 if (tp1->do_rtt) { 4058 if (rto_ok && 4059 sctp_calculate_rto(stcb, 4060 &stcb->asoc, 4061 tp1->whoTo, 4062 &tp1->sent_rcv_time, 4063 SCTP_RTT_FROM_DATA)) { 4064 rto_ok = 0; 4065 } 4066 if (tp1->whoTo->rto_needed == 0) { 4067 tp1->whoTo->rto_needed = 1; 4068 } 4069 tp1->do_rtt = 0; 4070 } 4071 } 4072 /* 4073 * CMT: CUCv2 algorithm. From the 4074 * cumack'd TSNs, for each TSN being 4075 * acked for the first time, set the 4076 * following variables for the 4077 * corresp destination. 4078 * new_pseudo_cumack will trigger a 4079 * cwnd update. 4080 * find_(rtx_)pseudo_cumack will 4081 * trigger search for the next 4082 * expected (rtx-)pseudo-cumack. 4083 */ 4084 tp1->whoTo->new_pseudo_cumack = 1; 4085 tp1->whoTo->find_pseudo_cumack = 1; 4086 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4088 /* sa_ignore NO_NULL_CHK */ 4089 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4090 } 4091 } 4092 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4093 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4094 } 4095 if (tp1->rec.data.chunk_was_revoked) { 4096 /* deflate the cwnd */ 4097 tp1->whoTo->cwnd -= tp1->book_size; 4098 tp1->rec.data.chunk_was_revoked = 0; 4099 } 4100 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4101 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4102 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4103 #ifdef INVARIANTS 4104 } else { 4105 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4106 #endif 4107 } 4108 } 4109 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4110 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4111 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4112 asoc->trigger_reset = 1; 4113 } 4114 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4115 if (tp1->data) { 4116 /* sa_ignore NO_NULL_CHK */ 4117 sctp_free_bufspace(stcb, asoc, tp1, 1); 4118 sctp_m_freem(tp1->data); 4119 tp1->data = NULL; 4120 } 4121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4122 sctp_log_sack(asoc->last_acked_seq, 4123 cumack, 4124 tp1->rec.data.tsn, 4125 0, 4126 0, 4127 SCTP_LOG_FREE_SENT); 4128 } 4129 asoc->sent_queue_cnt--; 4130 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4131 } else { 4132 break; 4133 } 4134 } 4135 } 4136 /* sa_ignore NO_NULL_CHK */ 4137 if (stcb->sctp_socket) { 4138 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4140 /* sa_ignore NO_NULL_CHK */ 4141 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4142 } 4143 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4144 } else { 4145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4146 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4147 } 4148 } 4149 4150 /* JRS - Use the congestion control given in the CC module */ 4151 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4152 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4153 if (net->net_ack2 > 0) { 4154 /* 4155 * Karn's rule applies to clearing error 4156 * count, this is optional. 4157 */ 4158 net->error_count = 0; 4159 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 4160 /* addr came good */ 4161 net->dest_state |= SCTP_ADDR_REACHABLE; 4162 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4163 0, (void *)net, SCTP_SO_NOT_LOCKED); 4164 } 4165 if (net == stcb->asoc.primary_destination) { 4166 if (stcb->asoc.alternate) { 4167 /* 4168 * release the alternate, 4169 * primary is good 4170 */ 4171 sctp_free_remote_addr(stcb->asoc.alternate); 4172 stcb->asoc.alternate = NULL; 4173 } 4174 } 4175 if (net->dest_state & SCTP_ADDR_PF) { 4176 net->dest_state &= ~SCTP_ADDR_PF; 4177 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4178 stcb->sctp_ep, stcb, net, 4179 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4180 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4181 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4182 /* Done with this net */ 4183 net->net_ack = 0; 4184 } 4185 /* restore any doubled timers */ 4186 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4187 if (net->RTO < stcb->asoc.minrto) { 4188 net->RTO = stcb->asoc.minrto; 4189 } 4190 if (net->RTO > stcb->asoc.maxrto) { 4191 net->RTO = stcb->asoc.maxrto; 4192 } 4193 } 4194 } 4195 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4196 } 4197 asoc->last_acked_seq = cumack; 4198 4199 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4200 /* nothing left in-flight */ 4201 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4202 net->flight_size = 0; 4203 net->partial_bytes_acked = 0; 4204 } 4205 asoc->total_flight = 0; 4206 asoc->total_flight_count = 0; 4207 } 4208 4209 /* RWND update */ 4210 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4211 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4212 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4213 /* SWS sender side engages */ 4214 asoc->peers_rwnd = 0; 4215 } 4216 if (asoc->peers_rwnd > old_rwnd) { 4217 win_probe_recovery = 1; 4218 } 4219 /* Now assure a timer where data is queued at */ 4220 again: 4221 j = 0; 4222 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4223 if (win_probe_recovery && (net->window_probe)) { 4224 win_probe_recovered = 1; 4225 /* 4226 * Find first chunk that was used with window probe 4227 * and clear the sent 4228 */ 4229 /* sa_ignore FREED_MEMORY */ 4230 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4231 if (tp1->window_probe) { 4232 /* move back to data send queue */ 4233 sctp_window_probe_recovery(stcb, asoc, tp1); 4234 break; 4235 } 4236 } 4237 } 4238 if (net->flight_size) { 4239 j++; 4240 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4241 if (net->window_probe) { 4242 net->window_probe = 0; 4243 } 4244 } else { 4245 if (net->window_probe) { 4246 /* 4247 * In window probes we must assure a timer 4248 * is still running there 4249 */ 4250 net->window_probe = 0; 4251 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4252 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4253 } 4254 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4255 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4256 stcb, net, 4257 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4258 } 4259 } 4260 } 4261 if ((j == 0) && 4262 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4263 (asoc->sent_queue_retran_cnt == 0) && 4264 (win_probe_recovered == 0) && 4265 (done_once == 0)) { 4266 /* 4267 * huh, this should not happen unless all packets are 4268 * PR-SCTP and marked to skip of course. 4269 */ 4270 if (sctp_fs_audit(asoc)) { 4271 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4272 net->flight_size = 0; 4273 } 4274 asoc->total_flight = 0; 4275 asoc->total_flight_count = 0; 4276 asoc->sent_queue_retran_cnt = 0; 4277 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4278 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4279 sctp_flight_size_increase(tp1); 4280 sctp_total_flight_increase(stcb, tp1); 4281 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4282 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4283 } 4284 } 4285 } 4286 done_once = 1; 4287 goto again; 4288 } 4289 /**********************************/ 4290 /* Now what about shutdown issues */ 4291 /**********************************/ 4292 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4293 /* nothing left on sendqueue.. consider done */ 4294 /* clean up */ 4295 if ((asoc->stream_queue_cnt == 1) && 4296 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4297 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4298 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4299 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4300 } 4301 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4302 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4303 (asoc->stream_queue_cnt == 1) && 4304 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4305 struct mbuf *op_err; 4306 4307 *abort_now = 1; 4308 /* XXX */ 4309 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4310 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28; 4311 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4312 return; 4313 } 4314 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4315 (asoc->stream_queue_cnt == 0)) { 4316 struct sctp_nets *netp; 4317 4318 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4319 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4320 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4321 } 4322 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4323 sctp_stop_timers_for_shutdown(stcb); 4324 if (asoc->alternate) { 4325 netp = asoc->alternate; 4326 } else { 4327 netp = asoc->primary_destination; 4328 } 4329 sctp_send_shutdown(stcb, netp); 4330 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4331 stcb->sctp_ep, stcb, netp); 4332 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4333 stcb->sctp_ep, stcb, NULL); 4334 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4335 (asoc->stream_queue_cnt == 0)) { 4336 struct sctp_nets *netp; 4337 4338 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4339 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 4340 sctp_stop_timers_for_shutdown(stcb); 4341 if (asoc->alternate) { 4342 netp = asoc->alternate; 4343 } else { 4344 netp = asoc->primary_destination; 4345 } 4346 sctp_send_shutdown_ack(stcb, netp); 4347 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4348 stcb->sctp_ep, stcb, netp); 4349 } 4350 } 4351 /*********************************************/ 4352 /* Here we perform PR-SCTP procedures */ 4353 /* (section 4.2) */ 4354 /*********************************************/ 4355 /* C1. update advancedPeerAckPoint */ 4356 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4357 asoc->advanced_peer_ack_point = cumack; 4358 } 4359 /* PR-Sctp issues need to be addressed too */ 4360 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4361 struct sctp_tmit_chunk *lchk; 4362 uint32_t old_adv_peer_ack_point; 4363 4364 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4365 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4366 /* C3. See if we need to send a Fwd-TSN */ 4367 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4368 /* 4369 * ISSUE with ECN, see FWD-TSN processing. 4370 */ 4371 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4372 send_forward_tsn(stcb, asoc); 4373 } else if (lchk) { 4374 /* try to FR fwd-tsn's that get lost too */ 4375 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4376 send_forward_tsn(stcb, asoc); 4377 } 4378 } 4379 } 4380 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 4381 if (lchk->whoTo != NULL) { 4382 break; 4383 } 4384 } 4385 if (lchk != NULL) { 4386 /* Assure a timer is up */ 4387 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4388 stcb->sctp_ep, stcb, lchk->whoTo); 4389 } 4390 } 4391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4392 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4393 rwnd, 4394 stcb->asoc.peers_rwnd, 4395 stcb->asoc.total_flight, 4396 stcb->asoc.total_output_queue_size); 4397 } 4398 } 4399 4400 void 4401 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4402 struct sctp_tcb *stcb, 4403 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4404 int *abort_now, uint8_t flags, 4405 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4406 { 4407 struct sctp_association *asoc; 4408 struct sctp_tmit_chunk *tp1, *tp2; 4409 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4410 uint16_t wake_him = 0; 4411 uint32_t send_s = 0; 4412 long j; 4413 int accum_moved = 0; 4414 int will_exit_fast_recovery = 0; 4415 uint32_t a_rwnd, old_rwnd; 4416 int win_probe_recovery = 0; 4417 int win_probe_recovered = 0; 4418 struct sctp_nets *net = NULL; 4419 int done_once; 4420 int rto_ok = 1; 4421 uint8_t reneged_all = 0; 4422 uint8_t cmt_dac_flag; 4423 4424 /* 4425 * we take any chance we can to service our queues since we cannot 4426 * get awoken when the socket is read from :< 4427 */ 4428 /* 4429 * Now perform the actual SACK handling: 1) Verify that it is not an 4430 * old sack, if so discard. 2) If there is nothing left in the send 4431 * queue (cum-ack is equal to last acked) then you have a duplicate 4432 * too, update any rwnd change and verify no timers are running. 4433 * then return. 3) Process any new consecutive data i.e. cum-ack 4434 * moved process these first and note that it moved. 4) Process any 4435 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4436 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4437 * sync up flightsizes and things, stop all timers and also check 4438 * for shutdown_pending state. If so then go ahead and send off the 4439 * shutdown. If in shutdown recv, send off the shutdown-ack and 4440 * start that timer, Ret. 9) Strike any non-acked things and do FR 4441 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4442 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4443 * if in shutdown_recv state. 4444 */ 4445 SCTP_TCB_LOCK_ASSERT(stcb); 4446 /* CMT DAC algo */ 4447 this_sack_lowest_newack = 0; 4448 SCTP_STAT_INCR(sctps_slowpath_sack); 4449 last_tsn = cum_ack; 4450 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4451 #ifdef SCTP_ASOCLOG_OF_TSNS 4452 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4453 stcb->asoc.cumack_log_at++; 4454 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4455 stcb->asoc.cumack_log_at = 0; 4456 } 4457 #endif 4458 a_rwnd = rwnd; 4459 4460 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4461 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4462 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4463 } 4464 4465 old_rwnd = stcb->asoc.peers_rwnd; 4466 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4467 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4468 stcb->asoc.overall_error_count, 4469 0, 4470 SCTP_FROM_SCTP_INDATA, 4471 __LINE__); 4472 } 4473 stcb->asoc.overall_error_count = 0; 4474 asoc = &stcb->asoc; 4475 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4476 sctp_log_sack(asoc->last_acked_seq, 4477 cum_ack, 4478 0, 4479 num_seg, 4480 num_dup, 4481 SCTP_LOG_NEW_SACK); 4482 } 4483 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4484 uint16_t i; 4485 uint32_t *dupdata, dblock; 4486 4487 for (i = 0; i < num_dup; i++) { 4488 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4489 sizeof(uint32_t), (uint8_t *)&dblock); 4490 if (dupdata == NULL) { 4491 break; 4492 } 4493 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4494 } 4495 } 4496 /* reality check */ 4497 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4498 tp1 = TAILQ_LAST(&asoc->sent_queue, 4499 sctpchunk_listhead); 4500 send_s = tp1->rec.data.tsn + 1; 4501 } else { 4502 tp1 = NULL; 4503 send_s = asoc->sending_seq; 4504 } 4505 if (SCTP_TSN_GE(cum_ack, send_s)) { 4506 struct mbuf *op_err; 4507 char msg[SCTP_DIAG_INFO_LEN]; 4508 4509 /* 4510 * no way, we have not even sent this TSN out yet. Peer is 4511 * hopelessly messed up with us. 4512 */ 4513 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4514 cum_ack, send_s); 4515 if (tp1) { 4516 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4517 tp1->rec.data.tsn, (void *)tp1); 4518 } 4519 hopeless_peer: 4520 *abort_now = 1; 4521 /* XXX */ 4522 SCTP_SNPRINTF(msg, sizeof(msg), 4523 "Cum ack %8.8x greater or equal than TSN %8.8x", 4524 cum_ack, send_s); 4525 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4526 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29; 4527 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4528 return; 4529 } 4530 /**********************/ 4531 /* 1) check the range */ 4532 /**********************/ 4533 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4534 /* acking something behind */ 4535 return; 4536 } 4537 4538 /* update the Rwnd of the peer */ 4539 if (TAILQ_EMPTY(&asoc->sent_queue) && 4540 TAILQ_EMPTY(&asoc->send_queue) && 4541 (asoc->stream_queue_cnt == 0)) { 4542 /* nothing left on send/sent and strmq */ 4543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4544 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4545 asoc->peers_rwnd, 0, 0, a_rwnd); 4546 } 4547 asoc->peers_rwnd = a_rwnd; 4548 if (asoc->sent_queue_retran_cnt) { 4549 asoc->sent_queue_retran_cnt = 0; 4550 } 4551 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4552 /* SWS sender side engages */ 4553 asoc->peers_rwnd = 0; 4554 } 4555 /* stop any timers */ 4556 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4557 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4558 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4559 net->partial_bytes_acked = 0; 4560 net->flight_size = 0; 4561 } 4562 asoc->total_flight = 0; 4563 asoc->total_flight_count = 0; 4564 return; 4565 } 4566 /* 4567 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4568 * things. The total byte count acked is tracked in netAckSz AND 4569 * netAck2 is used to track the total bytes acked that are un- 4570 * ambiguous and were never retransmitted. We track these on a per 4571 * destination address basis. 4572 */ 4573 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4574 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4575 /* Drag along the window_tsn for cwr's */ 4576 net->cwr_window_tsn = cum_ack; 4577 } 4578 net->prev_cwnd = net->cwnd; 4579 net->net_ack = 0; 4580 net->net_ack2 = 0; 4581 4582 /* 4583 * CMT: Reset CUC and Fast recovery algo variables before 4584 * SACK processing 4585 */ 4586 net->new_pseudo_cumack = 0; 4587 net->will_exit_fast_recovery = 0; 4588 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4589 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4590 } 4591 4592 /* 4593 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4594 * to be greater than the cumack. Also reset saw_newack to 0 4595 * for all dests. 4596 */ 4597 net->saw_newack = 0; 4598 net->this_sack_highest_newack = last_tsn; 4599 } 4600 /* process the new consecutive TSN first */ 4601 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4602 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4603 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4604 accum_moved = 1; 4605 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4606 /* 4607 * If it is less than ACKED, it is 4608 * now no-longer in flight. Higher 4609 * values may occur during marking 4610 */ 4611 if ((tp1->whoTo->dest_state & 4612 SCTP_ADDR_UNCONFIRMED) && 4613 (tp1->snd_count < 2)) { 4614 /* 4615 * If there was no retran 4616 * and the address is 4617 * un-confirmed and we sent 4618 * there and are now 4619 * sacked.. its confirmed, 4620 * mark it so. 4621 */ 4622 tp1->whoTo->dest_state &= 4623 ~SCTP_ADDR_UNCONFIRMED; 4624 } 4625 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4626 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4627 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4628 tp1->whoTo->flight_size, 4629 tp1->book_size, 4630 (uint32_t)(uintptr_t)tp1->whoTo, 4631 tp1->rec.data.tsn); 4632 } 4633 sctp_flight_size_decrease(tp1); 4634 sctp_total_flight_decrease(stcb, tp1); 4635 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4636 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4637 tp1); 4638 } 4639 } 4640 tp1->whoTo->net_ack += tp1->send_size; 4641 4642 /* CMT SFR and DAC algos */ 4643 this_sack_lowest_newack = tp1->rec.data.tsn; 4644 tp1->whoTo->saw_newack = 1; 4645 4646 if (tp1->snd_count < 2) { 4647 /* 4648 * True non-retransmitted 4649 * chunk 4650 */ 4651 tp1->whoTo->net_ack2 += 4652 tp1->send_size; 4653 4654 /* update RTO too? */ 4655 if (tp1->do_rtt) { 4656 if (rto_ok && 4657 sctp_calculate_rto(stcb, 4658 &stcb->asoc, 4659 tp1->whoTo, 4660 &tp1->sent_rcv_time, 4661 SCTP_RTT_FROM_DATA)) { 4662 rto_ok = 0; 4663 } 4664 if (tp1->whoTo->rto_needed == 0) { 4665 tp1->whoTo->rto_needed = 1; 4666 } 4667 tp1->do_rtt = 0; 4668 } 4669 } 4670 /* 4671 * CMT: CUCv2 algorithm. From the 4672 * cumack'd TSNs, for each TSN being 4673 * acked for the first time, set the 4674 * following variables for the 4675 * corresp destination. 4676 * new_pseudo_cumack will trigger a 4677 * cwnd update. 4678 * find_(rtx_)pseudo_cumack will 4679 * trigger search for the next 4680 * expected (rtx-)pseudo-cumack. 4681 */ 4682 tp1->whoTo->new_pseudo_cumack = 1; 4683 tp1->whoTo->find_pseudo_cumack = 1; 4684 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4686 sctp_log_sack(asoc->last_acked_seq, 4687 cum_ack, 4688 tp1->rec.data.tsn, 4689 0, 4690 0, 4691 SCTP_LOG_TSN_ACKED); 4692 } 4693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4694 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4695 } 4696 } 4697 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4698 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4699 #ifdef SCTP_AUDITING_ENABLED 4700 sctp_audit_log(0xB3, 4701 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4702 #endif 4703 } 4704 if (tp1->rec.data.chunk_was_revoked) { 4705 /* deflate the cwnd */ 4706 tp1->whoTo->cwnd -= tp1->book_size; 4707 tp1->rec.data.chunk_was_revoked = 0; 4708 } 4709 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4710 tp1->sent = SCTP_DATAGRAM_ACKED; 4711 } 4712 } 4713 } else { 4714 break; 4715 } 4716 } 4717 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4718 /* always set this up to cum-ack */ 4719 asoc->this_sack_highest_gap = last_tsn; 4720 4721 if ((num_seg > 0) || (num_nr_seg > 0)) { 4722 /* 4723 * thisSackHighestGap will increase while handling NEW 4724 * segments this_sack_highest_newack will increase while 4725 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4726 * used for CMT DAC algo. saw_newack will also change. 4727 */ 4728 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4729 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4730 num_seg, num_nr_seg, &rto_ok)) { 4731 wake_him++; 4732 } 4733 /* 4734 * validate the biggest_tsn_acked in the gap acks if strict 4735 * adherence is wanted. 4736 */ 4737 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4738 /* 4739 * peer is either confused or we are under attack. 4740 * We must abort. 4741 */ 4742 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4743 biggest_tsn_acked, send_s); 4744 goto hopeless_peer; 4745 } 4746 } 4747 /*******************************************/ 4748 /* cancel ALL T3-send timer if accum moved */ 4749 /*******************************************/ 4750 if (asoc->sctp_cmt_on_off > 0) { 4751 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4752 if (net->new_pseudo_cumack) 4753 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4754 stcb, net, 4755 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4756 } 4757 } else { 4758 if (accum_moved) { 4759 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4760 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4761 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 4762 } 4763 } 4764 } 4765 /********************************************/ 4766 /* drop the acked chunks from the sentqueue */ 4767 /********************************************/ 4768 asoc->last_acked_seq = cum_ack; 4769 4770 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4771 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4772 break; 4773 } 4774 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4775 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4776 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4777 #ifdef INVARIANTS 4778 } else { 4779 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4780 #endif 4781 } 4782 } 4783 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4784 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4785 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4786 asoc->trigger_reset = 1; 4787 } 4788 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4789 if (PR_SCTP_ENABLED(tp1->flags)) { 4790 if (asoc->pr_sctp_cnt != 0) 4791 asoc->pr_sctp_cnt--; 4792 } 4793 asoc->sent_queue_cnt--; 4794 if (tp1->data) { 4795 /* sa_ignore NO_NULL_CHK */ 4796 sctp_free_bufspace(stcb, asoc, tp1, 1); 4797 sctp_m_freem(tp1->data); 4798 tp1->data = NULL; 4799 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4800 asoc->sent_queue_cnt_removeable--; 4801 } 4802 } 4803 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4804 sctp_log_sack(asoc->last_acked_seq, 4805 cum_ack, 4806 tp1->rec.data.tsn, 4807 0, 4808 0, 4809 SCTP_LOG_FREE_SENT); 4810 } 4811 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4812 wake_him++; 4813 } 4814 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4815 #ifdef INVARIANTS 4816 panic("Warning flight size is positive and should be 0"); 4817 #else 4818 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4819 asoc->total_flight); 4820 #endif 4821 asoc->total_flight = 0; 4822 } 4823 4824 /* sa_ignore NO_NULL_CHK */ 4825 if ((wake_him) && (stcb->sctp_socket)) { 4826 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4827 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4828 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4829 } 4830 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4831 } else { 4832 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4833 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4834 } 4835 } 4836 4837 if (asoc->fast_retran_loss_recovery && accum_moved) { 4838 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4839 /* Setup so we will exit RFC2582 fast recovery */ 4840 will_exit_fast_recovery = 1; 4841 } 4842 } 4843 /* 4844 * Check for revoked fragments: 4845 * 4846 * if Previous sack - Had no frags then we can't have any revoked if 4847 * Previous sack - Had frag's then - If we now have frags aka 4848 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4849 * some of them. else - The peer revoked all ACKED fragments, since 4850 * we had some before and now we have NONE. 4851 */ 4852 4853 if (num_seg) { 4854 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4855 asoc->saw_sack_with_frags = 1; 4856 } else if (asoc->saw_sack_with_frags) { 4857 int cnt_revoked = 0; 4858 4859 /* Peer revoked all dg's marked or acked */ 4860 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4861 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4862 tp1->sent = SCTP_DATAGRAM_SENT; 4863 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4864 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4865 tp1->whoTo->flight_size, 4866 tp1->book_size, 4867 (uint32_t)(uintptr_t)tp1->whoTo, 4868 tp1->rec.data.tsn); 4869 } 4870 sctp_flight_size_increase(tp1); 4871 sctp_total_flight_increase(stcb, tp1); 4872 tp1->rec.data.chunk_was_revoked = 1; 4873 /* 4874 * To ensure that this increase in 4875 * flightsize, which is artificial, does not 4876 * throttle the sender, we also increase the 4877 * cwnd artificially. 4878 */ 4879 tp1->whoTo->cwnd += tp1->book_size; 4880 cnt_revoked++; 4881 } 4882 } 4883 if (cnt_revoked) { 4884 reneged_all = 1; 4885 } 4886 asoc->saw_sack_with_frags = 0; 4887 } 4888 if (num_nr_seg > 0) 4889 asoc->saw_sack_with_nr_frags = 1; 4890 else 4891 asoc->saw_sack_with_nr_frags = 0; 4892 4893 /* JRS - Use the congestion control given in the CC module */ 4894 if (ecne_seen == 0) { 4895 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4896 if (net->net_ack2 > 0) { 4897 /* 4898 * Karn's rule applies to clearing error 4899 * count, this is optional. 4900 */ 4901 net->error_count = 0; 4902 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 4903 /* addr came good */ 4904 net->dest_state |= SCTP_ADDR_REACHABLE; 4905 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4906 0, (void *)net, SCTP_SO_NOT_LOCKED); 4907 } 4908 4909 if (net == stcb->asoc.primary_destination) { 4910 if (stcb->asoc.alternate) { 4911 /* 4912 * release the alternate, 4913 * primary is good 4914 */ 4915 sctp_free_remote_addr(stcb->asoc.alternate); 4916 stcb->asoc.alternate = NULL; 4917 } 4918 } 4919 4920 if (net->dest_state & SCTP_ADDR_PF) { 4921 net->dest_state &= ~SCTP_ADDR_PF; 4922 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4923 stcb->sctp_ep, stcb, net, 4924 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 4925 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4926 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4927 /* Done with this net */ 4928 net->net_ack = 0; 4929 } 4930 /* restore any doubled timers */ 4931 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4932 if (net->RTO < stcb->asoc.minrto) { 4933 net->RTO = stcb->asoc.minrto; 4934 } 4935 if (net->RTO > stcb->asoc.maxrto) { 4936 net->RTO = stcb->asoc.maxrto; 4937 } 4938 } 4939 } 4940 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4941 } 4942 4943 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4944 /* nothing left in-flight */ 4945 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4946 /* stop all timers */ 4947 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4948 stcb, net, 4949 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34); 4950 net->flight_size = 0; 4951 net->partial_bytes_acked = 0; 4952 } 4953 asoc->total_flight = 0; 4954 asoc->total_flight_count = 0; 4955 } 4956 4957 /**********************************/ 4958 /* Now what about shutdown issues */ 4959 /**********************************/ 4960 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4961 /* nothing left on sendqueue.. consider done */ 4962 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4963 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4964 asoc->peers_rwnd, 0, 0, a_rwnd); 4965 } 4966 asoc->peers_rwnd = a_rwnd; 4967 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4968 /* SWS sender side engages */ 4969 asoc->peers_rwnd = 0; 4970 } 4971 /* clean up */ 4972 if ((asoc->stream_queue_cnt == 1) && 4973 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4974 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4975 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4976 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4977 } 4978 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4979 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4980 (asoc->stream_queue_cnt == 1) && 4981 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4982 struct mbuf *op_err; 4983 4984 *abort_now = 1; 4985 /* XXX */ 4986 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4987 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35; 4988 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4989 return; 4990 } 4991 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4992 (asoc->stream_queue_cnt == 0)) { 4993 struct sctp_nets *netp; 4994 4995 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4996 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4997 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4998 } 4999 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 5000 sctp_stop_timers_for_shutdown(stcb); 5001 if (asoc->alternate) { 5002 netp = asoc->alternate; 5003 } else { 5004 netp = asoc->primary_destination; 5005 } 5006 sctp_send_shutdown(stcb, netp); 5007 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5008 stcb->sctp_ep, stcb, netp); 5009 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5010 stcb->sctp_ep, stcb, NULL); 5011 return; 5012 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5013 (asoc->stream_queue_cnt == 0)) { 5014 struct sctp_nets *netp; 5015 5016 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5017 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 5018 sctp_stop_timers_for_shutdown(stcb); 5019 if (asoc->alternate) { 5020 netp = asoc->alternate; 5021 } else { 5022 netp = asoc->primary_destination; 5023 } 5024 sctp_send_shutdown_ack(stcb, netp); 5025 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5026 stcb->sctp_ep, stcb, netp); 5027 return; 5028 } 5029 } 5030 /* 5031 * Now here we are going to recycle net_ack for a different use... 5032 * HEADS UP. 5033 */ 5034 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5035 net->net_ack = 0; 5036 } 5037 5038 /* 5039 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5040 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5041 * automatically ensure that. 5042 */ 5043 if ((asoc->sctp_cmt_on_off > 0) && 5044 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5045 (cmt_dac_flag == 0)) { 5046 this_sack_lowest_newack = cum_ack; 5047 } 5048 if ((num_seg > 0) || (num_nr_seg > 0)) { 5049 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5050 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5051 } 5052 /* JRS - Use the congestion control given in the CC module */ 5053 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5054 5055 /* Now are we exiting loss recovery ? */ 5056 if (will_exit_fast_recovery) { 5057 /* Ok, we must exit fast recovery */ 5058 asoc->fast_retran_loss_recovery = 0; 5059 } 5060 if ((asoc->sat_t3_loss_recovery) && 5061 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5062 /* end satellite t3 loss recovery */ 5063 asoc->sat_t3_loss_recovery = 0; 5064 } 5065 /* 5066 * CMT Fast recovery 5067 */ 5068 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5069 if (net->will_exit_fast_recovery) { 5070 /* Ok, we must exit fast recovery */ 5071 net->fast_retran_loss_recovery = 0; 5072 } 5073 } 5074 5075 /* Adjust and set the new rwnd value */ 5076 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5077 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5078 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5079 } 5080 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5081 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5082 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5083 /* SWS sender side engages */ 5084 asoc->peers_rwnd = 0; 5085 } 5086 if (asoc->peers_rwnd > old_rwnd) { 5087 win_probe_recovery = 1; 5088 } 5089 5090 /* 5091 * Now we must setup so we have a timer up for anyone with 5092 * outstanding data. 5093 */ 5094 done_once = 0; 5095 again: 5096 j = 0; 5097 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5098 if (win_probe_recovery && (net->window_probe)) { 5099 win_probe_recovered = 1; 5100 /*- 5101 * Find first chunk that was used with 5102 * window probe and clear the event. Put 5103 * it back into the send queue as if has 5104 * not been sent. 5105 */ 5106 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5107 if (tp1->window_probe) { 5108 sctp_window_probe_recovery(stcb, asoc, tp1); 5109 break; 5110 } 5111 } 5112 } 5113 if (net->flight_size) { 5114 j++; 5115 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5116 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5117 stcb->sctp_ep, stcb, net); 5118 } 5119 if (net->window_probe) { 5120 net->window_probe = 0; 5121 } 5122 } else { 5123 if (net->window_probe) { 5124 /* 5125 * In window probes we must assure a timer 5126 * is still running there 5127 */ 5128 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5129 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5130 stcb->sctp_ep, stcb, net); 5131 } 5132 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5133 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5134 stcb, net, 5135 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36); 5136 } 5137 } 5138 } 5139 if ((j == 0) && 5140 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5141 (asoc->sent_queue_retran_cnt == 0) && 5142 (win_probe_recovered == 0) && 5143 (done_once == 0)) { 5144 /* 5145 * huh, this should not happen unless all packets are 5146 * PR-SCTP and marked to skip of course. 5147 */ 5148 if (sctp_fs_audit(asoc)) { 5149 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5150 net->flight_size = 0; 5151 } 5152 asoc->total_flight = 0; 5153 asoc->total_flight_count = 0; 5154 asoc->sent_queue_retran_cnt = 0; 5155 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5156 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5157 sctp_flight_size_increase(tp1); 5158 sctp_total_flight_increase(stcb, tp1); 5159 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5160 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5161 } 5162 } 5163 } 5164 done_once = 1; 5165 goto again; 5166 } 5167 /*********************************************/ 5168 /* Here we perform PR-SCTP procedures */ 5169 /* (section 4.2) */ 5170 /*********************************************/ 5171 /* C1. update advancedPeerAckPoint */ 5172 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5173 asoc->advanced_peer_ack_point = cum_ack; 5174 } 5175 /* C2. try to further move advancedPeerAckPoint ahead */ 5176 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5177 struct sctp_tmit_chunk *lchk; 5178 uint32_t old_adv_peer_ack_point; 5179 5180 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5181 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5182 /* C3. See if we need to send a Fwd-TSN */ 5183 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5184 /* 5185 * ISSUE with ECN, see FWD-TSN processing. 5186 */ 5187 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5188 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5189 0xee, cum_ack, asoc->advanced_peer_ack_point, 5190 old_adv_peer_ack_point); 5191 } 5192 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5193 send_forward_tsn(stcb, asoc); 5194 } else if (lchk) { 5195 /* try to FR fwd-tsn's that get lost too */ 5196 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5197 send_forward_tsn(stcb, asoc); 5198 } 5199 } 5200 } 5201 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 5202 if (lchk->whoTo != NULL) { 5203 break; 5204 } 5205 } 5206 if (lchk != NULL) { 5207 /* Assure a timer is up */ 5208 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5209 stcb->sctp_ep, stcb, lchk->whoTo); 5210 } 5211 } 5212 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5213 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5214 a_rwnd, 5215 stcb->asoc.peers_rwnd, 5216 stcb->asoc.total_flight, 5217 stcb->asoc.total_output_queue_size); 5218 } 5219 } 5220 5221 void 5222 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5223 { 5224 /* Copy cum-ack */ 5225 uint32_t cum_ack, a_rwnd; 5226 5227 cum_ack = ntohl(cp->cumulative_tsn_ack); 5228 /* Arrange so a_rwnd does NOT change */ 5229 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5230 5231 /* Now call the express sack handling */ 5232 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5233 } 5234 5235 static void 5236 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5237 struct sctp_stream_in *strmin) 5238 { 5239 struct sctp_queued_to_read *control, *ncontrol; 5240 struct sctp_association *asoc; 5241 uint32_t mid; 5242 int need_reasm_check = 0; 5243 5244 KASSERT(stcb != NULL, ("stcb == NULL")); 5245 SCTP_TCB_LOCK_ASSERT(stcb); 5246 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 5247 5248 asoc = &stcb->asoc; 5249 mid = strmin->last_mid_delivered; 5250 /* 5251 * First deliver anything prior to and including the stream no that 5252 * came in. 5253 */ 5254 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5255 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5256 /* this is deliverable now */ 5257 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5258 if (control->on_strm_q) { 5259 if (control->on_strm_q == SCTP_ON_ORDERED) { 5260 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5261 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5262 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5263 #ifdef INVARIANTS 5264 } else { 5265 panic("strmin: %p ctl: %p unknown %d", 5266 strmin, control, control->on_strm_q); 5267 #endif 5268 } 5269 control->on_strm_q = 0; 5270 } 5271 /* subtract pending on streams */ 5272 if (asoc->size_on_all_streams >= control->length) { 5273 asoc->size_on_all_streams -= control->length; 5274 } else { 5275 #ifdef INVARIANTS 5276 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5277 #else 5278 asoc->size_on_all_streams = 0; 5279 #endif 5280 } 5281 sctp_ucount_decr(asoc->cnt_on_all_streams); 5282 /* deliver it to at least the delivery-q */ 5283 if (stcb->sctp_socket) { 5284 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5285 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 5286 &stcb->sctp_socket->so_rcv, 1, 5287 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5288 } 5289 } else { 5290 /* Its a fragmented message */ 5291 if (control->first_frag_seen) { 5292 /* 5293 * Make it so this is next to 5294 * deliver, we restore later 5295 */ 5296 strmin->last_mid_delivered = control->mid - 1; 5297 need_reasm_check = 1; 5298 break; 5299 } 5300 } 5301 } else { 5302 /* no more delivery now. */ 5303 break; 5304 } 5305 } 5306 if (need_reasm_check) { 5307 int ret; 5308 5309 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5310 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5311 /* Restore the next to deliver unless we are ahead */ 5312 strmin->last_mid_delivered = mid; 5313 } 5314 if (ret == 0) { 5315 /* Left the front Partial one on */ 5316 return; 5317 } 5318 need_reasm_check = 0; 5319 } 5320 /* 5321 * now we must deliver things in queue the normal way if any are 5322 * now ready. 5323 */ 5324 mid = strmin->last_mid_delivered + 1; 5325 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5326 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5327 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5328 /* this is deliverable now */ 5329 if (control->on_strm_q) { 5330 if (control->on_strm_q == SCTP_ON_ORDERED) { 5331 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5332 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5333 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5334 #ifdef INVARIANTS 5335 } else { 5336 panic("strmin: %p ctl: %p unknown %d", 5337 strmin, control, control->on_strm_q); 5338 #endif 5339 } 5340 control->on_strm_q = 0; 5341 } 5342 /* subtract pending on streams */ 5343 if (asoc->size_on_all_streams >= control->length) { 5344 asoc->size_on_all_streams -= control->length; 5345 } else { 5346 #ifdef INVARIANTS 5347 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5348 #else 5349 asoc->size_on_all_streams = 0; 5350 #endif 5351 } 5352 sctp_ucount_decr(asoc->cnt_on_all_streams); 5353 /* deliver it to at least the delivery-q */ 5354 strmin->last_mid_delivered = control->mid; 5355 if (stcb->sctp_socket) { 5356 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5357 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 5358 &stcb->sctp_socket->so_rcv, 1, 5359 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5360 } 5361 mid = strmin->last_mid_delivered + 1; 5362 } else { 5363 /* Its a fragmented message */ 5364 if (control->first_frag_seen) { 5365 /* 5366 * Make it so this is next to 5367 * deliver 5368 */ 5369 strmin->last_mid_delivered = control->mid - 1; 5370 need_reasm_check = 1; 5371 break; 5372 } 5373 } 5374 } else { 5375 break; 5376 } 5377 } 5378 if (need_reasm_check) { 5379 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5380 } 5381 } 5382 5383 static void 5384 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5385 struct sctp_association *asoc, struct sctp_stream_in *strm, 5386 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn) 5387 { 5388 struct sctp_tmit_chunk *chk, *nchk; 5389 5390 /* 5391 * For now large messages held on the stream reasm that are complete 5392 * will be tossed too. We could in theory do more work to spin 5393 * through and stop after dumping one msg aka seeing the start of a 5394 * new msg at the head, and call the delivery function... to see if 5395 * it can be delivered... But for now we just dump everything on the 5396 * queue. 5397 */ 5398 5399 KASSERT(stcb != NULL, ("stcb == NULL")); 5400 SCTP_TCB_LOCK_ASSERT(stcb); 5401 SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep); 5402 5403 if (!asoc->idata_supported && !ordered && 5404 control->first_frag_seen && 5405 SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5406 return; 5407 } 5408 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5409 /* Purge hanging chunks */ 5410 if (!asoc->idata_supported && !ordered) { 5411 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5412 break; 5413 } 5414 } 5415 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5416 if (asoc->size_on_reasm_queue >= chk->send_size) { 5417 asoc->size_on_reasm_queue -= chk->send_size; 5418 } else { 5419 #ifdef INVARIANTS 5420 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5421 #else 5422 asoc->size_on_reasm_queue = 0; 5423 #endif 5424 } 5425 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5426 if (chk->data) { 5427 sctp_m_freem(chk->data); 5428 chk->data = NULL; 5429 } 5430 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5431 } 5432 if (!TAILQ_EMPTY(&control->reasm)) { 5433 /* This has to be old data, unordered */ 5434 if (control->data) { 5435 sctp_m_freem(control->data); 5436 control->data = NULL; 5437 } 5438 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5439 chk = TAILQ_FIRST(&control->reasm); 5440 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5441 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5442 sctp_add_chk_to_control(control, strm, stcb, asoc, 5443 chk, SCTP_READ_LOCK_HELD); 5444 } 5445 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5446 return; 5447 } 5448 if (control->on_strm_q == SCTP_ON_ORDERED) { 5449 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5450 if (asoc->size_on_all_streams >= control->length) { 5451 asoc->size_on_all_streams -= control->length; 5452 } else { 5453 #ifdef INVARIANTS 5454 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5455 #else 5456 asoc->size_on_all_streams = 0; 5457 #endif 5458 } 5459 sctp_ucount_decr(asoc->cnt_on_all_streams); 5460 control->on_strm_q = 0; 5461 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5462 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5463 control->on_strm_q = 0; 5464 #ifdef INVARIANTS 5465 } else if (control->on_strm_q) { 5466 panic("strm: %p ctl: %p unknown %d", 5467 strm, control, control->on_strm_q); 5468 #endif 5469 } 5470 control->on_strm_q = 0; 5471 if (control->on_read_q == 0) { 5472 sctp_free_remote_addr(control->whoFrom); 5473 if (control->data) { 5474 sctp_m_freem(control->data); 5475 control->data = NULL; 5476 } 5477 sctp_free_a_readq(stcb, control); 5478 } 5479 } 5480 5481 void 5482 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5483 struct sctp_forward_tsn_chunk *fwd, 5484 int *abort_flag, struct mbuf *m, int offset) 5485 { 5486 /* The pr-sctp fwd tsn */ 5487 /* 5488 * here we will perform all the data receiver side steps for 5489 * processing FwdTSN, as required in by pr-sctp draft: 5490 * 5491 * Assume we get FwdTSN(x): 5492 * 5493 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5494 * + others we have 3) examine and update re-ordering queue on 5495 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5496 * report where we are. 5497 */ 5498 struct sctp_association *asoc; 5499 uint32_t new_cum_tsn, gap; 5500 unsigned int i, fwd_sz, m_size; 5501 struct sctp_stream_in *strm; 5502 struct sctp_queued_to_read *control, *ncontrol; 5503 5504 asoc = &stcb->asoc; 5505 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5506 SCTPDBG(SCTP_DEBUG_INDATA1, 5507 "Bad size too small/big fwd-tsn\n"); 5508 return; 5509 } 5510 m_size = (stcb->asoc.mapping_array_size << 3); 5511 /*************************************************************/ 5512 /* 1. Here we update local cumTSN and shift the bitmap array */ 5513 /*************************************************************/ 5514 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5515 5516 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5517 /* Already got there ... */ 5518 return; 5519 } 5520 /* 5521 * now we know the new TSN is more advanced, let's find the actual 5522 * gap 5523 */ 5524 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5525 asoc->cumulative_tsn = new_cum_tsn; 5526 if (gap >= m_size) { 5527 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5528 struct mbuf *op_err; 5529 char msg[SCTP_DIAG_INFO_LEN]; 5530 5531 /* 5532 * out of range (of single byte chunks in the rwnd I 5533 * give out). This must be an attacker. 5534 */ 5535 *abort_flag = 1; 5536 SCTP_SNPRINTF(msg, sizeof(msg), 5537 "New cum ack %8.8x too high, highest TSN %8.8x", 5538 new_cum_tsn, asoc->highest_tsn_inside_map); 5539 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5540 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37; 5541 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 5542 return; 5543 } 5544 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5545 5546 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5547 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5548 asoc->highest_tsn_inside_map = new_cum_tsn; 5549 5550 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5551 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5552 5553 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5554 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5555 } 5556 } else { 5557 SCTP_TCB_LOCK_ASSERT(stcb); 5558 for (i = 0; i <= gap; i++) { 5559 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5560 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5561 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5562 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5563 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5564 } 5565 } 5566 } 5567 } 5568 /*************************************************************/ 5569 /* 2. Clear up re-assembly queue */ 5570 /*************************************************************/ 5571 5572 /* This is now done as part of clearing up the stream/seq */ 5573 if (asoc->idata_supported == 0) { 5574 uint16_t sid; 5575 5576 /* Flush all the un-ordered data based on cum-tsn */ 5577 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5578 for (sid = 0; sid < asoc->streamincnt; sid++) { 5579 strm = &asoc->strmin[sid]; 5580 if (!TAILQ_EMPTY(&strm->uno_inqueue)) { 5581 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn); 5582 } 5583 } 5584 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5585 } 5586 /*******************************************************/ 5587 /* 3. Update the PR-stream re-ordering queues and fix */ 5588 /* delivery issues as needed. */ 5589 /*******************************************************/ 5590 fwd_sz -= sizeof(*fwd); 5591 if (m && fwd_sz) { 5592 /* New method. */ 5593 unsigned int num_str; 5594 uint32_t mid; 5595 uint16_t sid; 5596 uint16_t ordered, flags; 5597 struct sctp_strseq *stseq, strseqbuf; 5598 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5599 5600 offset += sizeof(*fwd); 5601 5602 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5603 if (asoc->idata_supported) { 5604 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5605 } else { 5606 num_str = fwd_sz / sizeof(struct sctp_strseq); 5607 } 5608 for (i = 0; i < num_str; i++) { 5609 if (asoc->idata_supported) { 5610 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5611 sizeof(struct sctp_strseq_mid), 5612 (uint8_t *)&strseqbuf_m); 5613 offset += sizeof(struct sctp_strseq_mid); 5614 if (stseq_m == NULL) { 5615 break; 5616 } 5617 sid = ntohs(stseq_m->sid); 5618 mid = ntohl(stseq_m->mid); 5619 flags = ntohs(stseq_m->flags); 5620 if (flags & PR_SCTP_UNORDERED_FLAG) { 5621 ordered = 0; 5622 } else { 5623 ordered = 1; 5624 } 5625 } else { 5626 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5627 sizeof(struct sctp_strseq), 5628 (uint8_t *)&strseqbuf); 5629 offset += sizeof(struct sctp_strseq); 5630 if (stseq == NULL) { 5631 break; 5632 } 5633 sid = ntohs(stseq->sid); 5634 mid = (uint32_t)ntohs(stseq->ssn); 5635 ordered = 1; 5636 } 5637 /* Convert */ 5638 5639 /* now process */ 5640 5641 /* 5642 * Ok we now look for the stream/seq on the read 5643 * queue where its not all delivered. If we find it 5644 * we transmute the read entry into a PDI_ABORTED. 5645 */ 5646 if (sid >= asoc->streamincnt) { 5647 /* screwed up streams, stop! */ 5648 break; 5649 } 5650 if ((asoc->str_of_pdapi == sid) && 5651 (asoc->ssn_of_pdapi == mid)) { 5652 /* 5653 * If this is the one we were partially 5654 * delivering now then we no longer are. 5655 * Note this will change with the reassembly 5656 * re-write. 5657 */ 5658 asoc->fragmented_delivery_inprogress = 0; 5659 } 5660 strm = &asoc->strmin[sid]; 5661 if (ordered) { 5662 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) { 5663 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5664 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); 5665 } 5666 } 5667 } else { 5668 if (asoc->idata_supported) { 5669 TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) { 5670 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5671 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); 5672 } 5673 } 5674 } else { 5675 if (!TAILQ_EMPTY(&strm->uno_inqueue)) { 5676 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn); 5677 } 5678 } 5679 } 5680 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5681 if ((control->sinfo_stream == sid) && 5682 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5683 control->pdapi_aborted = 1; 5684 control->end_added = 1; 5685 if (control->on_strm_q == SCTP_ON_ORDERED) { 5686 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5687 if (asoc->size_on_all_streams >= control->length) { 5688 asoc->size_on_all_streams -= control->length; 5689 } else { 5690 #ifdef INVARIANTS 5691 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5692 #else 5693 asoc->size_on_all_streams = 0; 5694 #endif 5695 } 5696 sctp_ucount_decr(asoc->cnt_on_all_streams); 5697 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5698 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5699 #ifdef INVARIANTS 5700 } else if (control->on_strm_q) { 5701 panic("strm: %p ctl: %p unknown %d", 5702 strm, control, control->on_strm_q); 5703 #endif 5704 } 5705 control->on_strm_q = 0; 5706 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5707 stcb, 5708 SCTP_PARTIAL_DELIVERY_ABORTED, 5709 (void *)control, 5710 SCTP_SO_NOT_LOCKED); 5711 break; 5712 } else if ((control->sinfo_stream == sid) && 5713 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5714 /* We are past our victim SSN */ 5715 break; 5716 } 5717 } 5718 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5719 /* Update the sequence number */ 5720 strm->last_mid_delivered = mid; 5721 } 5722 /* now kick the stream the new way */ 5723 /* sa_ignore NO_NULL_CHK */ 5724 sctp_kick_prsctp_reorder_queue(stcb, strm); 5725 } 5726 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5727 } 5728 /* 5729 * Now slide thing forward. 5730 */ 5731 sctp_slide_mapping_arrays(stcb); 5732 } 5733