1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <netinet/sctp_os.h> 36 #include <sys/proc.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_header.h> 40 #include <netinet/sctp_pcb.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_uio.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_timer.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_indata.h> 48 #include <netinet/sctp_bsd_addr.h> 49 #include <netinet/sctp_input.h> 50 #include <netinet/sctp_crc32.h> 51 #include <netinet/sctp_lock_bsd.h> 52 /* 53 * NOTES: On the outbound side of things I need to check the sack timer to 54 * see if I should generate a sack into the chunk queue (if I have data to 55 * send that is and will be sending it .. for bundling. 56 * 57 * The callback in sctp_usrreq.c will get called when the socket is read from. 58 * This will cause sctp_service_queues() to get called on the top entry in 59 * the list. 60 */ 61 static uint32_t 62 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 63 struct sctp_stream_in *strm, 64 struct sctp_tcb *stcb, 65 struct sctp_association *asoc, 66 struct sctp_tmit_chunk *chk, int hold_rlock); 67 68 void 69 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 70 { 71 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 72 } 73 74 /* Calculate what the rwnd would be */ 75 uint32_t 76 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 77 { 78 uint32_t calc = 0; 79 80 /* 81 * This is really set wrong with respect to a 1-2-m socket. Since 82 * the sb_cc is the count that everyone as put up. When we re-write 83 * sctp_soreceive then we will fix this so that ONLY this 84 * associations data is taken into account. 85 */ 86 if (stcb->sctp_socket == NULL) { 87 return (calc); 88 } 89 90 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 91 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 92 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 93 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 94 if (stcb->asoc.sb_cc == 0 && 95 asoc->cnt_on_reasm_queue == 0 && 96 asoc->cnt_on_all_streams == 0) { 97 /* Full rwnd granted */ 98 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 99 return (calc); 100 } 101 /* get actual space */ 102 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 103 /* 104 * take out what has NOT been put on socket queue and we yet hold 105 * for putting up. 106 */ 107 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 108 asoc->cnt_on_reasm_queue * MSIZE)); 109 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 110 asoc->cnt_on_all_streams * MSIZE)); 111 if (calc == 0) { 112 /* out of space */ 113 return (calc); 114 } 115 116 /* what is the overhead of all these rwnd's */ 117 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 118 /* 119 * If the window gets too small due to ctrl-stuff, reduce it to 1, 120 * even it is 0. SWS engaged 121 */ 122 if (calc < stcb->asoc.my_rwnd_control_len) { 123 calc = 1; 124 } 125 return (calc); 126 } 127 128 /* 129 * Build out our readq entry based on the incoming packet. 130 */ 131 struct sctp_queued_to_read * 132 sctp_build_readq_entry(struct sctp_tcb *stcb, 133 struct sctp_nets *net, 134 uint32_t tsn, uint32_t ppid, 135 uint32_t context, uint16_t sid, 136 uint32_t mid, uint8_t flags, 137 struct mbuf *dm) 138 { 139 struct sctp_queued_to_read *read_queue_e = NULL; 140 141 sctp_alloc_a_readq(stcb, read_queue_e); 142 if (read_queue_e == NULL) { 143 goto failed_build; 144 } 145 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 146 read_queue_e->sinfo_stream = sid; 147 read_queue_e->sinfo_flags = (flags << 8); 148 read_queue_e->sinfo_ppid = ppid; 149 read_queue_e->sinfo_context = context; 150 read_queue_e->sinfo_tsn = tsn; 151 read_queue_e->sinfo_cumtsn = tsn; 152 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 153 read_queue_e->mid = mid; 154 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 155 TAILQ_INIT(&read_queue_e->reasm); 156 read_queue_e->whoFrom = net; 157 atomic_add_int(&net->ref_count, 1); 158 read_queue_e->data = dm; 159 read_queue_e->stcb = stcb; 160 read_queue_e->port_from = stcb->rport; 161 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 162 read_queue_e->do_not_ref_stcb = 1; 163 } 164 failed_build: 165 return (read_queue_e); 166 } 167 168 struct mbuf * 169 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 170 { 171 struct sctp_extrcvinfo *seinfo; 172 struct sctp_sndrcvinfo *outinfo; 173 struct sctp_rcvinfo *rcvinfo; 174 struct sctp_nxtinfo *nxtinfo; 175 struct cmsghdr *cmh; 176 struct mbuf *ret; 177 int len; 178 int use_extended; 179 int provide_nxt; 180 181 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 182 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 184 /* user does not want any ancillary data */ 185 return (NULL); 186 } 187 188 len = 0; 189 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 190 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 191 } 192 seinfo = (struct sctp_extrcvinfo *)sinfo; 193 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 194 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 195 provide_nxt = 1; 196 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 197 } else { 198 provide_nxt = 0; 199 } 200 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 202 use_extended = 1; 203 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 204 } else { 205 use_extended = 0; 206 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 207 } 208 } else { 209 use_extended = 0; 210 } 211 212 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 213 if (ret == NULL) { 214 /* No space */ 215 return (ret); 216 } 217 SCTP_BUF_LEN(ret) = 0; 218 219 /* We need a CMSG header followed by the struct */ 220 cmh = mtod(ret, struct cmsghdr *); 221 /* 222 * Make sure that there is no un-initialized padding between the 223 * cmsg header and cmsg data and after the cmsg data. 224 */ 225 memset(cmh, 0, len); 226 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 227 cmh->cmsg_level = IPPROTO_SCTP; 228 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 229 cmh->cmsg_type = SCTP_RCVINFO; 230 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 231 rcvinfo->rcv_sid = sinfo->sinfo_stream; 232 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 233 rcvinfo->rcv_flags = sinfo->sinfo_flags; 234 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 235 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 236 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 237 rcvinfo->rcv_context = sinfo->sinfo_context; 238 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 239 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 240 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 241 } 242 if (provide_nxt) { 243 cmh->cmsg_level = IPPROTO_SCTP; 244 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 245 cmh->cmsg_type = SCTP_NXTINFO; 246 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 247 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 248 nxtinfo->nxt_flags = 0; 249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 250 nxtinfo->nxt_flags |= SCTP_UNORDERED; 251 } 252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 253 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 254 } 255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 256 nxtinfo->nxt_flags |= SCTP_COMPLETE; 257 } 258 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 259 nxtinfo->nxt_length = seinfo->serinfo_next_length; 260 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 261 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 262 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 263 } 264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 265 cmh->cmsg_level = IPPROTO_SCTP; 266 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 267 if (use_extended) { 268 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 269 cmh->cmsg_type = SCTP_EXTRCV; 270 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 271 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 272 } else { 273 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 274 cmh->cmsg_type = SCTP_SNDRCV; 275 *outinfo = *sinfo; 276 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 277 } 278 } 279 return (ret); 280 } 281 282 static void 283 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 284 { 285 uint32_t gap, i; 286 int in_r, in_nr; 287 288 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 289 return; 290 } 291 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 292 /* 293 * This tsn is behind the cum ack and thus we don't need to 294 * worry about it being moved from one to the other. 295 */ 296 return; 297 } 298 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 299 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 300 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 301 KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__)); 302 if (!in_nr) { 303 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 304 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 305 asoc->highest_tsn_inside_nr_map = tsn; 306 } 307 } 308 if (in_r) { 309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 310 if (tsn == asoc->highest_tsn_inside_map) { 311 /* We must back down to see what the new highest is. */ 312 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 313 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 314 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 315 asoc->highest_tsn_inside_map = i; 316 break; 317 } 318 } 319 if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) { 320 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 321 } 322 } 323 } 324 } 325 326 static int 327 sctp_place_control_in_stream(struct sctp_stream_in *strm, 328 struct sctp_association *asoc, 329 struct sctp_queued_to_read *control) 330 { 331 struct sctp_queued_to_read *at; 332 struct sctp_readhead *q; 333 uint8_t flags, unordered; 334 335 flags = (control->sinfo_flags >> 8); 336 unordered = flags & SCTP_DATA_UNORDERED; 337 if (unordered) { 338 q = &strm->uno_inqueue; 339 if (asoc->idata_supported == 0) { 340 if (!TAILQ_EMPTY(q)) { 341 /* 342 * Only one stream can be here in old style 343 * -- abort 344 */ 345 return (-1); 346 } 347 TAILQ_INSERT_TAIL(q, control, next_instrm); 348 control->on_strm_q = SCTP_ON_UNORDERED; 349 return (0); 350 } 351 } else { 352 q = &strm->inqueue; 353 } 354 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 355 control->end_added = 1; 356 control->first_frag_seen = 1; 357 control->last_frag_seen = 1; 358 } 359 if (TAILQ_EMPTY(q)) { 360 /* Empty queue */ 361 TAILQ_INSERT_HEAD(q, control, next_instrm); 362 if (unordered) { 363 control->on_strm_q = SCTP_ON_UNORDERED; 364 } else { 365 control->on_strm_q = SCTP_ON_ORDERED; 366 } 367 return (0); 368 } else { 369 TAILQ_FOREACH(at, q, next_instrm) { 370 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 371 /* 372 * one in queue is bigger than the new one, 373 * insert before this one 374 */ 375 TAILQ_INSERT_BEFORE(at, control, next_instrm); 376 if (unordered) { 377 control->on_strm_q = SCTP_ON_UNORDERED; 378 } else { 379 control->on_strm_q = SCTP_ON_ORDERED; 380 } 381 break; 382 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 383 /* 384 * Gak, He sent me a duplicate msg id 385 * number?? return -1 to abort. 386 */ 387 return (-1); 388 } else { 389 if (TAILQ_NEXT(at, next_instrm) == NULL) { 390 /* 391 * We are at the end, insert it 392 * after this one 393 */ 394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 395 sctp_log_strm_del(control, at, 396 SCTP_STR_LOG_FROM_INSERT_TL); 397 } 398 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 399 if (unordered) { 400 control->on_strm_q = SCTP_ON_UNORDERED; 401 } else { 402 control->on_strm_q = SCTP_ON_ORDERED; 403 } 404 break; 405 } 406 } 407 } 408 } 409 return (0); 410 } 411 412 static void 413 sctp_abort_in_reasm(struct sctp_tcb *stcb, 414 struct sctp_queued_to_read *control, 415 struct sctp_tmit_chunk *chk, 416 int *abort_flag, int opspot) 417 { 418 char msg[SCTP_DIAG_INFO_LEN]; 419 struct mbuf *oper; 420 421 if (stcb->asoc.idata_supported) { 422 SCTP_SNPRINTF(msg, sizeof(msg), 423 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 424 opspot, 425 control->fsn_included, 426 chk->rec.data.tsn, 427 chk->rec.data.sid, 428 chk->rec.data.fsn, chk->rec.data.mid); 429 } else { 430 SCTP_SNPRINTF(msg, sizeof(msg), 431 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 432 opspot, 433 control->fsn_included, 434 chk->rec.data.tsn, 435 chk->rec.data.sid, 436 chk->rec.data.fsn, 437 (uint16_t)chk->rec.data.mid); 438 } 439 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 440 sctp_m_freem(chk->data); 441 chk->data = NULL; 442 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 443 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 444 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED); 445 *abort_flag = 1; 446 } 447 448 static void 449 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 450 { 451 /* 452 * The control could not be placed and must be cleaned. 453 */ 454 struct sctp_tmit_chunk *chk, *nchk; 455 456 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 457 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 458 if (chk->data) 459 sctp_m_freem(chk->data); 460 chk->data = NULL; 461 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 462 } 463 sctp_free_remote_addr(control->whoFrom); 464 if (control->data) { 465 sctp_m_freem(control->data); 466 control->data = NULL; 467 } 468 sctp_free_a_readq(stcb, control); 469 } 470 471 /* 472 * Queue the chunk either right into the socket buffer if it is the next one 473 * to go OR put it in the correct place in the delivery queue. If we do 474 * append to the so_buf, keep doing so until we are out of order as 475 * long as the control's entered are non-fragmented. 476 */ 477 static void 478 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 479 struct sctp_association *asoc, 480 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 481 { 482 /* 483 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 484 * all the data in one stream this could happen quite rapidly. One 485 * could use the TSN to keep track of things, but this scheme breaks 486 * down in the other type of stream usage that could occur. Send a 487 * single msg to stream 0, send 4Billion messages to stream 1, now 488 * send a message to stream 0. You have a situation where the TSN 489 * has wrapped but not in the stream. Is this worth worrying about 490 * or should we just change our queue sort at the bottom to be by 491 * TSN. 492 * 493 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 494 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 495 * assignment this could happen... and I don't see how this would be 496 * a violation. So for now I am undecided an will leave the sort by 497 * SSN alone. Maybe a hybrid approach is the answer 498 * 499 */ 500 struct sctp_queued_to_read *at; 501 int queue_needed; 502 uint32_t nxt_todel; 503 struct mbuf *op_err; 504 struct sctp_stream_in *strm; 505 char msg[SCTP_DIAG_INFO_LEN]; 506 507 strm = &asoc->strmin[control->sinfo_stream]; 508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 509 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 510 } 511 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 512 /* The incoming sseq is behind where we last delivered? */ 513 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 514 strm->last_mid_delivered, control->mid); 515 /* 516 * throw it in the stream so it gets cleaned up in 517 * association destruction 518 */ 519 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 520 if (asoc->idata_supported) { 521 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 522 strm->last_mid_delivered, control->sinfo_tsn, 523 control->sinfo_stream, control->mid); 524 } else { 525 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 526 (uint16_t)strm->last_mid_delivered, 527 control->sinfo_tsn, 528 control->sinfo_stream, 529 (uint16_t)control->mid); 530 } 531 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 532 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 533 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 534 *abort_flag = 1; 535 return; 536 } 537 queue_needed = 1; 538 asoc->size_on_all_streams += control->length; 539 sctp_ucount_incr(asoc->cnt_on_all_streams); 540 nxt_todel = strm->last_mid_delivered + 1; 541 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 542 /* can be delivered right away? */ 543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 544 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 545 } 546 /* EY it wont be queued if it could be delivered directly */ 547 queue_needed = 0; 548 if (asoc->size_on_all_streams >= control->length) { 549 asoc->size_on_all_streams -= control->length; 550 } else { 551 #ifdef INVARIANTS 552 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 553 #else 554 asoc->size_on_all_streams = 0; 555 #endif 556 } 557 sctp_ucount_decr(asoc->cnt_on_all_streams); 558 strm->last_mid_delivered++; 559 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 560 sctp_add_to_readq(stcb->sctp_ep, stcb, 561 control, 562 &stcb->sctp_socket->so_rcv, 1, 563 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 564 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 565 /* all delivered */ 566 nxt_todel = strm->last_mid_delivered + 1; 567 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 568 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 569 if (control->on_strm_q == SCTP_ON_ORDERED) { 570 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 571 if (asoc->size_on_all_streams >= control->length) { 572 asoc->size_on_all_streams -= control->length; 573 } else { 574 #ifdef INVARIANTS 575 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 576 #else 577 asoc->size_on_all_streams = 0; 578 #endif 579 } 580 sctp_ucount_decr(asoc->cnt_on_all_streams); 581 #ifdef INVARIANTS 582 } else { 583 panic("Huh control: %p is on_strm_q: %d", 584 control, control->on_strm_q); 585 #endif 586 } 587 control->on_strm_q = 0; 588 strm->last_mid_delivered++; 589 /* 590 * We ignore the return of deliver_data here 591 * since we always can hold the chunk on the 592 * d-queue. And we have a finite number that 593 * can be delivered from the strq. 594 */ 595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 596 sctp_log_strm_del(control, NULL, 597 SCTP_STR_LOG_FROM_IMMED_DEL); 598 } 599 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 600 sctp_add_to_readq(stcb->sctp_ep, stcb, 601 control, 602 &stcb->sctp_socket->so_rcv, 1, 603 SCTP_READ_LOCK_NOT_HELD, 604 SCTP_SO_LOCKED); 605 continue; 606 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 607 *need_reasm = 1; 608 } 609 break; 610 } 611 } 612 if (queue_needed) { 613 /* 614 * Ok, we did not deliver this guy, find the correct place 615 * to put it on the queue. 616 */ 617 if (sctp_place_control_in_stream(strm, asoc, control)) { 618 SCTP_SNPRINTF(msg, sizeof(msg), 619 "Queue to str MID: %u duplicate", control->mid); 620 sctp_clean_up_control(stcb, control); 621 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 622 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 623 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 624 *abort_flag = 1; 625 } 626 } 627 } 628 629 static void 630 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 631 { 632 struct mbuf *m, *prev = NULL; 633 struct sctp_tcb *stcb; 634 635 stcb = control->stcb; 636 control->held_length = 0; 637 control->length = 0; 638 m = control->data; 639 while (m) { 640 if (SCTP_BUF_LEN(m) == 0) { 641 /* Skip mbufs with NO length */ 642 if (prev == NULL) { 643 /* First one */ 644 control->data = sctp_m_free(m); 645 m = control->data; 646 } else { 647 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 648 m = SCTP_BUF_NEXT(prev); 649 } 650 if (m == NULL) { 651 control->tail_mbuf = prev; 652 } 653 continue; 654 } 655 prev = m; 656 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 657 if (control->on_read_q) { 658 /* 659 * On read queue so we must increment the SB stuff, 660 * we assume caller has done any locks of SB. 661 */ 662 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 663 } 664 m = SCTP_BUF_NEXT(m); 665 } 666 if (prev) { 667 control->tail_mbuf = prev; 668 } 669 } 670 671 static void 672 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 673 { 674 struct mbuf *prev = NULL; 675 struct sctp_tcb *stcb; 676 677 stcb = control->stcb; 678 if (stcb == NULL) { 679 #ifdef INVARIANTS 680 panic("Control broken"); 681 #else 682 return; 683 #endif 684 } 685 if (control->tail_mbuf == NULL) { 686 /* TSNH */ 687 sctp_m_freem(control->data); 688 control->data = m; 689 sctp_setup_tail_pointer(control); 690 return; 691 } 692 control->tail_mbuf->m_next = m; 693 while (m) { 694 if (SCTP_BUF_LEN(m) == 0) { 695 /* Skip mbufs with NO length */ 696 if (prev == NULL) { 697 /* First one */ 698 control->tail_mbuf->m_next = sctp_m_free(m); 699 m = control->tail_mbuf->m_next; 700 } else { 701 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 702 m = SCTP_BUF_NEXT(prev); 703 } 704 if (m == NULL) { 705 control->tail_mbuf = prev; 706 } 707 continue; 708 } 709 prev = m; 710 if (control->on_read_q) { 711 /* 712 * On read queue so we must increment the SB stuff, 713 * we assume caller has done any locks of SB. 714 */ 715 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 716 } 717 *added += SCTP_BUF_LEN(m); 718 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 719 m = SCTP_BUF_NEXT(m); 720 } 721 if (prev) { 722 control->tail_mbuf = prev; 723 } 724 } 725 726 static void 727 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 728 { 729 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 730 nc->sinfo_stream = control->sinfo_stream; 731 nc->mid = control->mid; 732 TAILQ_INIT(&nc->reasm); 733 nc->top_fsn = control->top_fsn; 734 nc->mid = control->mid; 735 nc->sinfo_flags = control->sinfo_flags; 736 nc->sinfo_ppid = control->sinfo_ppid; 737 nc->sinfo_context = control->sinfo_context; 738 nc->fsn_included = 0xffffffff; 739 nc->sinfo_tsn = control->sinfo_tsn; 740 nc->sinfo_cumtsn = control->sinfo_cumtsn; 741 nc->sinfo_assoc_id = control->sinfo_assoc_id; 742 nc->whoFrom = control->whoFrom; 743 atomic_add_int(&nc->whoFrom->ref_count, 1); 744 nc->stcb = control->stcb; 745 nc->port_from = control->port_from; 746 nc->do_not_ref_stcb = control->do_not_ref_stcb; 747 } 748 749 static void 750 sctp_reset_a_control(struct sctp_queued_to_read *control, 751 struct sctp_inpcb *inp, uint32_t tsn) 752 { 753 control->fsn_included = tsn; 754 if (control->on_read_q) { 755 /* 756 * We have to purge it from there, hopefully this will work 757 * :-) 758 */ 759 TAILQ_REMOVE(&inp->read_queue, control, next); 760 control->on_read_q = 0; 761 } 762 } 763 764 static int 765 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 766 struct sctp_association *asoc, 767 struct sctp_stream_in *strm, 768 struct sctp_queued_to_read *control, 769 uint32_t pd_point, 770 int inp_read_lock_held) 771 { 772 /* 773 * Special handling for the old un-ordered data chunk. All the 774 * chunks/TSN's go to mid 0. So we have to do the old style watching 775 * to see if we have it all. If you return one, no other control 776 * entries on the un-ordered queue will be looked at. In theory 777 * there should be no others entries in reality, unless the guy is 778 * sending both unordered NDATA and unordered DATA... 779 */ 780 struct sctp_tmit_chunk *chk, *lchk, *tchk; 781 uint32_t fsn; 782 struct sctp_queued_to_read *nc; 783 int cnt_added; 784 785 if (control->first_frag_seen == 0) { 786 /* Nothing we can do, we have not seen the first piece yet */ 787 return (1); 788 } 789 /* Collapse any we can */ 790 cnt_added = 0; 791 restart: 792 fsn = control->fsn_included + 1; 793 /* Now what can we add? */ 794 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 795 if (chk->rec.data.fsn == fsn) { 796 /* Ok lets add it */ 797 sctp_alloc_a_readq(stcb, nc); 798 if (nc == NULL) { 799 break; 800 } 801 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 802 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 803 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held); 804 fsn++; 805 cnt_added++; 806 chk = NULL; 807 if (control->end_added) { 808 /* We are done */ 809 if (!TAILQ_EMPTY(&control->reasm)) { 810 /* 811 * Ok we have to move anything left 812 * on the control queue to a new 813 * control. 814 */ 815 sctp_build_readq_entry_from_ctl(nc, control); 816 tchk = TAILQ_FIRST(&control->reasm); 817 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 818 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 819 if (asoc->size_on_reasm_queue >= tchk->send_size) { 820 asoc->size_on_reasm_queue -= tchk->send_size; 821 } else { 822 #ifdef INVARIANTS 823 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 824 #else 825 asoc->size_on_reasm_queue = 0; 826 #endif 827 } 828 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 829 nc->first_frag_seen = 1; 830 nc->fsn_included = tchk->rec.data.fsn; 831 nc->data = tchk->data; 832 nc->sinfo_ppid = tchk->rec.data.ppid; 833 nc->sinfo_tsn = tchk->rec.data.tsn; 834 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 835 tchk->data = NULL; 836 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 837 sctp_setup_tail_pointer(nc); 838 tchk = TAILQ_FIRST(&control->reasm); 839 } 840 /* Spin the rest onto the queue */ 841 while (tchk) { 842 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 843 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 844 tchk = TAILQ_FIRST(&control->reasm); 845 } 846 /* 847 * Now lets add it to the queue 848 * after removing control 849 */ 850 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 851 nc->on_strm_q = SCTP_ON_UNORDERED; 852 if (control->on_strm_q) { 853 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 854 control->on_strm_q = 0; 855 } 856 } 857 if (control->pdapi_started) { 858 strm->pd_api_started = 0; 859 control->pdapi_started = 0; 860 } 861 if (control->on_strm_q) { 862 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 863 control->on_strm_q = 0; 864 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 865 } 866 if (control->on_read_q == 0) { 867 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 868 &stcb->sctp_socket->so_rcv, control->end_added, 869 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 870 } 871 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 872 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 873 /* 874 * Switch to the new guy and 875 * continue 876 */ 877 control = nc; 878 goto restart; 879 } else { 880 if (nc->on_strm_q == 0) { 881 sctp_free_a_readq(stcb, nc); 882 } 883 } 884 return (1); 885 } else { 886 sctp_free_a_readq(stcb, nc); 887 } 888 } else { 889 /* Can't add more */ 890 break; 891 } 892 } 893 if (cnt_added && strm->pd_api_started) { 894 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 895 } 896 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 897 strm->pd_api_started = 1; 898 control->pdapi_started = 1; 899 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 900 &stcb->sctp_socket->so_rcv, control->end_added, 901 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 902 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 903 return (0); 904 } else { 905 return (1); 906 } 907 } 908 909 static void 910 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 911 struct sctp_association *asoc, 912 struct sctp_queued_to_read *control, 913 struct sctp_tmit_chunk *chk, 914 int *abort_flag) 915 { 916 struct sctp_tmit_chunk *at; 917 int inserted; 918 919 /* 920 * Here we need to place the chunk into the control structure sorted 921 * in the correct order. 922 */ 923 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 924 /* Its the very first one. */ 925 SCTPDBG(SCTP_DEBUG_XXX, 926 "chunk is a first fsn: %u becomes fsn_included\n", 927 chk->rec.data.fsn); 928 at = TAILQ_FIRST(&control->reasm); 929 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { 930 /* 931 * The first chunk in the reassembly is a smaller 932 * TSN than this one, even though this has a first, 933 * it must be from a subsequent msg. 934 */ 935 goto place_chunk; 936 } 937 if (control->first_frag_seen) { 938 /* 939 * In old un-ordered we can reassembly on one 940 * control multiple messages. As long as the next 941 * FIRST is greater then the old first (TSN i.e. FSN 942 * wise) 943 */ 944 struct mbuf *tdata; 945 uint32_t tmp; 946 947 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 948 /* 949 * Easy way the start of a new guy beyond 950 * the lowest 951 */ 952 goto place_chunk; 953 } 954 if ((chk->rec.data.fsn == control->fsn_included) || 955 (control->pdapi_started)) { 956 /* 957 * Ok this should not happen, if it does we 958 * started the pd-api on the higher TSN 959 * (since the equals part is a TSN failure 960 * it must be that). 961 * 962 * We are completely hosed in that case 963 * since I have no way to recover. This 964 * really will only happen if we can get 965 * more TSN's higher before the 966 * pd-api-point. 967 */ 968 sctp_abort_in_reasm(stcb, control, chk, 969 abort_flag, 970 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 971 972 return; 973 } 974 /* 975 * Ok we have two firsts and the one we just got is 976 * smaller than the one we previously placed.. yuck! 977 * We must swap them out. 978 */ 979 /* swap the mbufs */ 980 tdata = control->data; 981 control->data = chk->data; 982 chk->data = tdata; 983 /* Save the lengths */ 984 chk->send_size = control->length; 985 /* Recompute length of control and tail pointer */ 986 sctp_setup_tail_pointer(control); 987 /* Fix the FSN included */ 988 tmp = control->fsn_included; 989 control->fsn_included = chk->rec.data.fsn; 990 chk->rec.data.fsn = tmp; 991 /* Fix the TSN included */ 992 tmp = control->sinfo_tsn; 993 control->sinfo_tsn = chk->rec.data.tsn; 994 chk->rec.data.tsn = tmp; 995 /* Fix the PPID included */ 996 tmp = control->sinfo_ppid; 997 control->sinfo_ppid = chk->rec.data.ppid; 998 chk->rec.data.ppid = tmp; 999 /* Fix tail pointer */ 1000 goto place_chunk; 1001 } 1002 control->first_frag_seen = 1; 1003 control->fsn_included = chk->rec.data.fsn; 1004 control->top_fsn = chk->rec.data.fsn; 1005 control->sinfo_tsn = chk->rec.data.tsn; 1006 control->sinfo_ppid = chk->rec.data.ppid; 1007 control->data = chk->data; 1008 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1009 chk->data = NULL; 1010 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1011 sctp_setup_tail_pointer(control); 1012 return; 1013 } 1014 place_chunk: 1015 inserted = 0; 1016 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1017 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1018 /* 1019 * This one in queue is bigger than the new one, 1020 * insert the new one before at. 1021 */ 1022 asoc->size_on_reasm_queue += chk->send_size; 1023 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1024 inserted = 1; 1025 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1026 break; 1027 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1028 /* 1029 * They sent a duplicate fsn number. This really 1030 * should not happen since the FSN is a TSN and it 1031 * should have been dropped earlier. 1032 */ 1033 sctp_abort_in_reasm(stcb, control, chk, 1034 abort_flag, 1035 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1036 return; 1037 } 1038 } 1039 if (inserted == 0) { 1040 /* Its at the end */ 1041 asoc->size_on_reasm_queue += chk->send_size; 1042 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1043 control->top_fsn = chk->rec.data.fsn; 1044 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1045 } 1046 } 1047 1048 static int 1049 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1050 struct sctp_stream_in *strm, int inp_read_lock_held) 1051 { 1052 /* 1053 * Given a stream, strm, see if any of the SSN's on it that are 1054 * fragmented are ready to deliver. If so go ahead and place them on 1055 * the read queue. In so placing if we have hit the end, then we 1056 * need to remove them from the stream's queue. 1057 */ 1058 struct sctp_queued_to_read *control, *nctl = NULL; 1059 uint32_t next_to_del; 1060 uint32_t pd_point; 1061 int ret = 0; 1062 1063 if (stcb->sctp_socket) { 1064 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1065 stcb->sctp_ep->partial_delivery_point); 1066 } else { 1067 pd_point = stcb->sctp_ep->partial_delivery_point; 1068 } 1069 control = TAILQ_FIRST(&strm->uno_inqueue); 1070 1071 if ((control != NULL) && 1072 (asoc->idata_supported == 0)) { 1073 /* Special handling needed for "old" data format */ 1074 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1075 goto done_un; 1076 } 1077 } 1078 if (strm->pd_api_started) { 1079 /* Can't add more */ 1080 return (0); 1081 } 1082 while (control) { 1083 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1084 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1085 nctl = TAILQ_NEXT(control, next_instrm); 1086 if (control->end_added) { 1087 /* We just put the last bit on */ 1088 if (control->on_strm_q) { 1089 #ifdef INVARIANTS 1090 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1091 panic("Huh control: %p on_q: %d -- not unordered?", 1092 control, control->on_strm_q); 1093 } 1094 #endif 1095 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1096 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1097 if (asoc->size_on_all_streams >= control->length) { 1098 asoc->size_on_all_streams -= control->length; 1099 } else { 1100 #ifdef INVARIANTS 1101 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1102 #else 1103 asoc->size_on_all_streams = 0; 1104 #endif 1105 } 1106 sctp_ucount_decr(asoc->cnt_on_all_streams); 1107 control->on_strm_q = 0; 1108 } 1109 if (control->on_read_q == 0) { 1110 sctp_add_to_readq(stcb->sctp_ep, stcb, 1111 control, 1112 &stcb->sctp_socket->so_rcv, control->end_added, 1113 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1114 } 1115 } else { 1116 /* Can we do a PD-API for this un-ordered guy? */ 1117 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1118 strm->pd_api_started = 1; 1119 control->pdapi_started = 1; 1120 sctp_add_to_readq(stcb->sctp_ep, stcb, 1121 control, 1122 &stcb->sctp_socket->so_rcv, control->end_added, 1123 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1124 1125 break; 1126 } 1127 } 1128 control = nctl; 1129 } 1130 done_un: 1131 control = TAILQ_FIRST(&strm->inqueue); 1132 if (strm->pd_api_started) { 1133 /* Can't add more */ 1134 return (0); 1135 } 1136 if (control == NULL) { 1137 return (ret); 1138 } 1139 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1140 /* 1141 * Ok the guy at the top was being partially delivered 1142 * completed, so we remove it. Note the pd_api flag was 1143 * taken off when the chunk was merged on in 1144 * sctp_queue_data_for_reasm below. 1145 */ 1146 nctl = TAILQ_NEXT(control, next_instrm); 1147 SCTPDBG(SCTP_DEBUG_XXX, 1148 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1149 control, control->end_added, control->mid, 1150 control->top_fsn, control->fsn_included, 1151 strm->last_mid_delivered); 1152 if (control->end_added) { 1153 if (control->on_strm_q) { 1154 #ifdef INVARIANTS 1155 if (control->on_strm_q != SCTP_ON_ORDERED) { 1156 panic("Huh control: %p on_q: %d -- not ordered?", 1157 control, control->on_strm_q); 1158 } 1159 #endif 1160 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1161 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1162 if (asoc->size_on_all_streams >= control->length) { 1163 asoc->size_on_all_streams -= control->length; 1164 } else { 1165 #ifdef INVARIANTS 1166 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1167 #else 1168 asoc->size_on_all_streams = 0; 1169 #endif 1170 } 1171 sctp_ucount_decr(asoc->cnt_on_all_streams); 1172 control->on_strm_q = 0; 1173 } 1174 if (strm->pd_api_started && control->pdapi_started) { 1175 control->pdapi_started = 0; 1176 strm->pd_api_started = 0; 1177 } 1178 if (control->on_read_q == 0) { 1179 sctp_add_to_readq(stcb->sctp_ep, stcb, 1180 control, 1181 &stcb->sctp_socket->so_rcv, control->end_added, 1182 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1183 } 1184 control = nctl; 1185 } 1186 } 1187 if (strm->pd_api_started) { 1188 /* 1189 * Can't add more must have gotten an un-ordered above being 1190 * partially delivered. 1191 */ 1192 return (0); 1193 } 1194 deliver_more: 1195 next_to_del = strm->last_mid_delivered + 1; 1196 if (control) { 1197 SCTPDBG(SCTP_DEBUG_XXX, 1198 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1199 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1200 next_to_del); 1201 nctl = TAILQ_NEXT(control, next_instrm); 1202 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1203 (control->first_frag_seen)) { 1204 int done; 1205 1206 /* Ok we can deliver it onto the stream. */ 1207 if (control->end_added) { 1208 /* We are done with it afterwards */ 1209 if (control->on_strm_q) { 1210 #ifdef INVARIANTS 1211 if (control->on_strm_q != SCTP_ON_ORDERED) { 1212 panic("Huh control: %p on_q: %d -- not ordered?", 1213 control, control->on_strm_q); 1214 } 1215 #endif 1216 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1217 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1218 if (asoc->size_on_all_streams >= control->length) { 1219 asoc->size_on_all_streams -= control->length; 1220 } else { 1221 #ifdef INVARIANTS 1222 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1223 #else 1224 asoc->size_on_all_streams = 0; 1225 #endif 1226 } 1227 sctp_ucount_decr(asoc->cnt_on_all_streams); 1228 control->on_strm_q = 0; 1229 } 1230 ret++; 1231 } 1232 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1233 /* 1234 * A singleton now slipping through - mark 1235 * it non-revokable too 1236 */ 1237 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1238 } else if (control->end_added == 0) { 1239 /* 1240 * Check if we can defer adding until its 1241 * all there 1242 */ 1243 if ((control->length < pd_point) || (strm->pd_api_started)) { 1244 /* 1245 * Don't need it or cannot add more 1246 * (one being delivered that way) 1247 */ 1248 goto out; 1249 } 1250 } 1251 done = (control->end_added) && (control->last_frag_seen); 1252 if (control->on_read_q == 0) { 1253 if (!done) { 1254 if (asoc->size_on_all_streams >= control->length) { 1255 asoc->size_on_all_streams -= control->length; 1256 } else { 1257 #ifdef INVARIANTS 1258 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1259 #else 1260 asoc->size_on_all_streams = 0; 1261 #endif 1262 } 1263 strm->pd_api_started = 1; 1264 control->pdapi_started = 1; 1265 } 1266 sctp_add_to_readq(stcb->sctp_ep, stcb, 1267 control, 1268 &stcb->sctp_socket->so_rcv, control->end_added, 1269 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1270 } 1271 strm->last_mid_delivered = next_to_del; 1272 if (done) { 1273 control = nctl; 1274 goto deliver_more; 1275 } 1276 } 1277 } 1278 out: 1279 return (ret); 1280 } 1281 1282 uint32_t 1283 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1284 struct sctp_stream_in *strm, 1285 struct sctp_tcb *stcb, struct sctp_association *asoc, 1286 struct sctp_tmit_chunk *chk, int hold_rlock) 1287 { 1288 /* 1289 * Given a control and a chunk, merge the data from the chk onto the 1290 * control and free up the chunk resources. 1291 */ 1292 uint32_t added = 0; 1293 int i_locked = 0; 1294 1295 if (control->on_read_q && (hold_rlock == 0)) { 1296 /* 1297 * Its being pd-api'd so we must do some locks. 1298 */ 1299 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1300 i_locked = 1; 1301 } 1302 if (control->data == NULL) { 1303 control->data = chk->data; 1304 sctp_setup_tail_pointer(control); 1305 } else { 1306 sctp_add_to_tail_pointer(control, chk->data, &added); 1307 } 1308 control->fsn_included = chk->rec.data.fsn; 1309 asoc->size_on_reasm_queue -= chk->send_size; 1310 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1311 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1312 chk->data = NULL; 1313 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1314 control->first_frag_seen = 1; 1315 control->sinfo_tsn = chk->rec.data.tsn; 1316 control->sinfo_ppid = chk->rec.data.ppid; 1317 } 1318 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1319 /* Its complete */ 1320 if ((control->on_strm_q) && (control->on_read_q)) { 1321 if (control->pdapi_started) { 1322 control->pdapi_started = 0; 1323 strm->pd_api_started = 0; 1324 } 1325 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1326 /* Unordered */ 1327 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1328 control->on_strm_q = 0; 1329 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1330 /* Ordered */ 1331 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1332 /* 1333 * Don't need to decrement 1334 * size_on_all_streams, since control is on 1335 * the read queue. 1336 */ 1337 sctp_ucount_decr(asoc->cnt_on_all_streams); 1338 control->on_strm_q = 0; 1339 #ifdef INVARIANTS 1340 } else if (control->on_strm_q) { 1341 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1342 control->on_strm_q); 1343 #endif 1344 } 1345 } 1346 control->end_added = 1; 1347 control->last_frag_seen = 1; 1348 } 1349 if (i_locked) { 1350 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1351 } 1352 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1353 return (added); 1354 } 1355 1356 /* 1357 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1358 * queue, see if anthing can be delivered. If so pull it off (or as much as 1359 * we can. If we run out of space then we must dump what we can and set the 1360 * appropriate flag to say we queued what we could. 1361 */ 1362 static void 1363 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1364 struct sctp_queued_to_read *control, 1365 struct sctp_tmit_chunk *chk, 1366 int created_control, 1367 int *abort_flag, uint32_t tsn) 1368 { 1369 uint32_t next_fsn; 1370 struct sctp_tmit_chunk *at, *nat; 1371 struct sctp_stream_in *strm; 1372 int do_wakeup, unordered; 1373 uint32_t lenadded; 1374 1375 strm = &asoc->strmin[control->sinfo_stream]; 1376 /* 1377 * For old un-ordered data chunks. 1378 */ 1379 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1380 unordered = 1; 1381 } else { 1382 unordered = 0; 1383 } 1384 /* Must be added to the stream-in queue */ 1385 if (created_control) { 1386 if ((unordered == 0) || (asoc->idata_supported)) { 1387 sctp_ucount_incr(asoc->cnt_on_all_streams); 1388 } 1389 if (sctp_place_control_in_stream(strm, asoc, control)) { 1390 /* Duplicate SSN? */ 1391 sctp_abort_in_reasm(stcb, control, chk, 1392 abort_flag, 1393 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1394 sctp_clean_up_control(stcb, control); 1395 return; 1396 } 1397 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1398 /* 1399 * Ok we created this control and now lets validate 1400 * that its legal i.e. there is a B bit set, if not 1401 * and we have up to the cum-ack then its invalid. 1402 */ 1403 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1404 sctp_abort_in_reasm(stcb, control, chk, 1405 abort_flag, 1406 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1407 return; 1408 } 1409 } 1410 } 1411 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1412 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1413 return; 1414 } 1415 /* 1416 * Ok we must queue the chunk into the reasembly portion: o if its 1417 * the first it goes to the control mbuf. o if its not first but the 1418 * next in sequence it goes to the control, and each succeeding one 1419 * in order also goes. o if its not in order we place it on the list 1420 * in its place. 1421 */ 1422 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1423 /* Its the very first one. */ 1424 SCTPDBG(SCTP_DEBUG_XXX, 1425 "chunk is a first fsn: %u becomes fsn_included\n", 1426 chk->rec.data.fsn); 1427 if (control->first_frag_seen) { 1428 /* 1429 * Error on senders part, they either sent us two 1430 * data chunks with FIRST, or they sent two 1431 * un-ordered chunks that were fragmented at the 1432 * same time in the same stream. 1433 */ 1434 sctp_abort_in_reasm(stcb, control, chk, 1435 abort_flag, 1436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1437 return; 1438 } 1439 control->first_frag_seen = 1; 1440 control->sinfo_ppid = chk->rec.data.ppid; 1441 control->sinfo_tsn = chk->rec.data.tsn; 1442 control->fsn_included = chk->rec.data.fsn; 1443 control->data = chk->data; 1444 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1445 chk->data = NULL; 1446 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1447 sctp_setup_tail_pointer(control); 1448 asoc->size_on_all_streams += control->length; 1449 } else { 1450 /* Place the chunk in our list */ 1451 int inserted = 0; 1452 1453 if (control->last_frag_seen == 0) { 1454 /* Still willing to raise highest FSN seen */ 1455 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1456 SCTPDBG(SCTP_DEBUG_XXX, 1457 "We have a new top_fsn: %u\n", 1458 chk->rec.data.fsn); 1459 control->top_fsn = chk->rec.data.fsn; 1460 } 1461 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1462 SCTPDBG(SCTP_DEBUG_XXX, 1463 "The last fsn is now in place fsn: %u\n", 1464 chk->rec.data.fsn); 1465 control->last_frag_seen = 1; 1466 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) { 1467 SCTPDBG(SCTP_DEBUG_XXX, 1468 "New fsn: %u is not at top_fsn: %u -- abort\n", 1469 chk->rec.data.fsn, 1470 control->top_fsn); 1471 sctp_abort_in_reasm(stcb, control, chk, 1472 abort_flag, 1473 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1474 return; 1475 } 1476 } 1477 if (asoc->idata_supported || control->first_frag_seen) { 1478 /* 1479 * For IDATA we always check since we know 1480 * that the first fragment is 0. For old 1481 * DATA we have to receive the first before 1482 * we know the first FSN (which is the TSN). 1483 */ 1484 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1485 /* 1486 * We have already delivered up to 1487 * this so its a dup 1488 */ 1489 sctp_abort_in_reasm(stcb, control, chk, 1490 abort_flag, 1491 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1492 return; 1493 } 1494 } 1495 } else { 1496 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1497 /* Second last? huh? */ 1498 SCTPDBG(SCTP_DEBUG_XXX, 1499 "Duplicate last fsn: %u (top: %u) -- abort\n", 1500 chk->rec.data.fsn, control->top_fsn); 1501 sctp_abort_in_reasm(stcb, control, 1502 chk, abort_flag, 1503 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1504 return; 1505 } 1506 if (asoc->idata_supported || control->first_frag_seen) { 1507 /* 1508 * For IDATA we always check since we know 1509 * that the first fragment is 0. For old 1510 * DATA we have to receive the first before 1511 * we know the first FSN (which is the TSN). 1512 */ 1513 1514 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1515 /* 1516 * We have already delivered up to 1517 * this so its a dup 1518 */ 1519 SCTPDBG(SCTP_DEBUG_XXX, 1520 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1521 chk->rec.data.fsn, control->fsn_included); 1522 sctp_abort_in_reasm(stcb, control, chk, 1523 abort_flag, 1524 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1525 return; 1526 } 1527 } 1528 /* 1529 * validate not beyond top FSN if we have seen last 1530 * one 1531 */ 1532 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1533 SCTPDBG(SCTP_DEBUG_XXX, 1534 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1535 chk->rec.data.fsn, 1536 control->top_fsn); 1537 sctp_abort_in_reasm(stcb, control, chk, 1538 abort_flag, 1539 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1540 return; 1541 } 1542 } 1543 /* 1544 * If we reach here, we need to place the new chunk in the 1545 * reassembly for this control. 1546 */ 1547 SCTPDBG(SCTP_DEBUG_XXX, 1548 "chunk is a not first fsn: %u needs to be inserted\n", 1549 chk->rec.data.fsn); 1550 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1551 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1552 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1553 /* Last not at the end? huh? */ 1554 SCTPDBG(SCTP_DEBUG_XXX, 1555 "Last fragment not last in list: -- abort\n"); 1556 sctp_abort_in_reasm(stcb, control, 1557 chk, abort_flag, 1558 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1559 return; 1560 } 1561 /* 1562 * This one in queue is bigger than the new 1563 * one, insert the new one before at. 1564 */ 1565 SCTPDBG(SCTP_DEBUG_XXX, 1566 "Insert it before fsn: %u\n", 1567 at->rec.data.fsn); 1568 asoc->size_on_reasm_queue += chk->send_size; 1569 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1570 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1571 inserted = 1; 1572 break; 1573 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1574 /* 1575 * Gak, He sent me a duplicate str seq 1576 * number 1577 */ 1578 /* 1579 * foo bar, I guess I will just free this 1580 * new guy, should we abort too? FIX ME 1581 * MAYBE? Or it COULD be that the SSN's have 1582 * wrapped. Maybe I should compare to TSN 1583 * somehow... sigh for now just blow away 1584 * the chunk! 1585 */ 1586 SCTPDBG(SCTP_DEBUG_XXX, 1587 "Duplicate to fsn: %u -- abort\n", 1588 at->rec.data.fsn); 1589 sctp_abort_in_reasm(stcb, control, 1590 chk, abort_flag, 1591 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1592 return; 1593 } 1594 } 1595 if (inserted == 0) { 1596 /* Goes on the end */ 1597 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1598 chk->rec.data.fsn); 1599 asoc->size_on_reasm_queue += chk->send_size; 1600 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1601 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1602 } 1603 } 1604 /* 1605 * Ok lets see if we can suck any up into the control structure that 1606 * are in seq if it makes sense. 1607 */ 1608 do_wakeup = 0; 1609 /* 1610 * If the first fragment has not been seen there is no sense in 1611 * looking. 1612 */ 1613 if (control->first_frag_seen) { 1614 next_fsn = control->fsn_included + 1; 1615 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1616 if (at->rec.data.fsn == next_fsn) { 1617 /* We can add this one now to the control */ 1618 SCTPDBG(SCTP_DEBUG_XXX, 1619 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1620 control, at, 1621 at->rec.data.fsn, 1622 next_fsn, control->fsn_included); 1623 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1624 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1625 if (control->on_read_q) { 1626 do_wakeup = 1; 1627 } else { 1628 /* 1629 * We only add to the 1630 * size-on-all-streams if its not on 1631 * the read q. The read q flag will 1632 * cause a sballoc so its accounted 1633 * for there. 1634 */ 1635 asoc->size_on_all_streams += lenadded; 1636 } 1637 next_fsn++; 1638 if (control->end_added && control->pdapi_started) { 1639 if (strm->pd_api_started) { 1640 strm->pd_api_started = 0; 1641 control->pdapi_started = 0; 1642 } 1643 if (control->on_read_q == 0) { 1644 sctp_add_to_readq(stcb->sctp_ep, stcb, 1645 control, 1646 &stcb->sctp_socket->so_rcv, control->end_added, 1647 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1648 } 1649 break; 1650 } 1651 } else { 1652 break; 1653 } 1654 } 1655 } 1656 if (do_wakeup) { 1657 /* Need to wakeup the reader */ 1658 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1659 } 1660 } 1661 1662 static struct sctp_queued_to_read * 1663 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1664 { 1665 struct sctp_queued_to_read *control; 1666 1667 if (ordered) { 1668 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1669 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1670 break; 1671 } 1672 } 1673 } else { 1674 if (idata_supported) { 1675 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1676 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1677 break; 1678 } 1679 } 1680 } else { 1681 control = TAILQ_FIRST(&strm->uno_inqueue); 1682 } 1683 } 1684 return (control); 1685 } 1686 1687 static int 1688 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1689 struct mbuf **m, int offset, int chk_length, 1690 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1691 int *break_flag, int last_chunk, uint8_t chk_type) 1692 { 1693 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ 1694 struct sctp_stream_in *strm; 1695 uint32_t tsn, fsn, gap, mid; 1696 struct mbuf *dmbuf; 1697 int the_len; 1698 int need_reasm_check = 0; 1699 uint16_t sid; 1700 struct mbuf *op_err; 1701 char msg[SCTP_DIAG_INFO_LEN]; 1702 struct sctp_queued_to_read *control, *ncontrol; 1703 uint32_t ppid; 1704 uint8_t chk_flags; 1705 struct sctp_stream_reset_list *liste; 1706 int ordered; 1707 size_t clen; 1708 int created_control = 0; 1709 1710 if (chk_type == SCTP_IDATA) { 1711 struct sctp_idata_chunk *chunk, chunk_buf; 1712 1713 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1714 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1715 chk_flags = chunk->ch.chunk_flags; 1716 clen = sizeof(struct sctp_idata_chunk); 1717 tsn = ntohl(chunk->dp.tsn); 1718 sid = ntohs(chunk->dp.sid); 1719 mid = ntohl(chunk->dp.mid); 1720 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1721 fsn = 0; 1722 ppid = chunk->dp.ppid_fsn.ppid; 1723 } else { 1724 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1725 ppid = 0xffffffff; /* Use as an invalid value. */ 1726 } 1727 } else { 1728 struct sctp_data_chunk *chunk, chunk_buf; 1729 1730 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1731 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1732 chk_flags = chunk->ch.chunk_flags; 1733 clen = sizeof(struct sctp_data_chunk); 1734 tsn = ntohl(chunk->dp.tsn); 1735 sid = ntohs(chunk->dp.sid); 1736 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1737 fsn = tsn; 1738 ppid = chunk->dp.ppid; 1739 } 1740 if ((size_t)chk_length == clen) { 1741 /* 1742 * Need to send an abort since we had a empty data chunk. 1743 */ 1744 op_err = sctp_generate_no_user_data_cause(tsn); 1745 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1746 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1747 *abort_flag = 1; 1748 return (0); 1749 } 1750 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1751 asoc->send_sack = 1; 1752 } 1753 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1754 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1755 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1756 } 1757 if (stcb == NULL) { 1758 return (0); 1759 } 1760 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1761 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1762 /* It is a duplicate */ 1763 SCTP_STAT_INCR(sctps_recvdupdata); 1764 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1765 /* Record a dup for the next outbound sack */ 1766 asoc->dup_tsns[asoc->numduptsns] = tsn; 1767 asoc->numduptsns++; 1768 } 1769 asoc->send_sack = 1; 1770 return (0); 1771 } 1772 /* Calculate the number of TSN's between the base and this TSN */ 1773 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1774 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1775 /* Can't hold the bit in the mapping at max array, toss it */ 1776 return (0); 1777 } 1778 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1779 SCTP_TCB_LOCK_ASSERT(stcb); 1780 if (sctp_expand_mapping_array(asoc, gap)) { 1781 /* Can't expand, drop it */ 1782 return (0); 1783 } 1784 } 1785 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1786 *high_tsn = tsn; 1787 } 1788 /* See if we have received this one already */ 1789 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1790 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1791 SCTP_STAT_INCR(sctps_recvdupdata); 1792 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1793 /* Record a dup for the next outbound sack */ 1794 asoc->dup_tsns[asoc->numduptsns] = tsn; 1795 asoc->numduptsns++; 1796 } 1797 asoc->send_sack = 1; 1798 return (0); 1799 } 1800 /* 1801 * Check to see about the GONE flag, duplicates would cause a sack 1802 * to be sent up above 1803 */ 1804 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1805 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1806 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1807 /* 1808 * wait a minute, this guy is gone, there is no longer a 1809 * receiver. Send peer an ABORT! 1810 */ 1811 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1812 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1813 *abort_flag = 1; 1814 return (0); 1815 } 1816 /* 1817 * Now before going further we see if there is room. If NOT then we 1818 * MAY let one through only IF this TSN is the one we are waiting 1819 * for on a partial delivery API. 1820 */ 1821 1822 /* Is the stream valid? */ 1823 if (sid >= asoc->streamincnt) { 1824 struct sctp_error_invalid_stream *cause; 1825 1826 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1827 0, M_NOWAIT, 1, MT_DATA); 1828 if (op_err != NULL) { 1829 /* add some space up front so prepend will work well */ 1830 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1831 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1832 /* 1833 * Error causes are just param's and this one has 1834 * two back to back phdr, one with the error type 1835 * and size, the other with the streamid and a rsvd 1836 */ 1837 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1838 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1839 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1840 cause->stream_id = htons(sid); 1841 cause->reserved = htons(0); 1842 sctp_queue_op_err(stcb, op_err); 1843 } 1844 SCTP_STAT_INCR(sctps_badsid); 1845 SCTP_TCB_LOCK_ASSERT(stcb); 1846 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1847 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1848 asoc->highest_tsn_inside_nr_map = tsn; 1849 } 1850 if (tsn == (asoc->cumulative_tsn + 1)) { 1851 /* Update cum-ack */ 1852 asoc->cumulative_tsn = tsn; 1853 } 1854 return (0); 1855 } 1856 /* 1857 * If its a fragmented message, lets see if we can find the control 1858 * on the reassembly queues. 1859 */ 1860 if ((chk_type == SCTP_IDATA) && 1861 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1862 (fsn == 0)) { 1863 /* 1864 * The first *must* be fsn 0, and other (middle/end) pieces 1865 * can *not* be fsn 0. XXX: This can happen in case of a 1866 * wrap around. Ignore is for now. 1867 */ 1868 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags); 1869 goto err_out; 1870 } 1871 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1872 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1873 chk_flags, control); 1874 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1875 /* See if we can find the re-assembly entity */ 1876 if (control != NULL) { 1877 /* We found something, does it belong? */ 1878 if (ordered && (mid != control->mid)) { 1879 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1880 err_out: 1881 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1882 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1883 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 1884 *abort_flag = 1; 1885 return (0); 1886 } 1887 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1888 /* 1889 * We can't have a switched order with an 1890 * unordered chunk 1891 */ 1892 SCTP_SNPRINTF(msg, sizeof(msg), 1893 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1894 tsn); 1895 goto err_out; 1896 } 1897 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1898 /* 1899 * We can't have a switched unordered with a 1900 * ordered chunk 1901 */ 1902 SCTP_SNPRINTF(msg, sizeof(msg), 1903 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1904 tsn); 1905 goto err_out; 1906 } 1907 } 1908 } else { 1909 /* 1910 * Its a complete segment. Lets validate we don't have a 1911 * re-assembly going on with the same Stream/Seq (for 1912 * ordered) or in the same Stream for unordered. 1913 */ 1914 if (control != NULL) { 1915 if (ordered || asoc->idata_supported) { 1916 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1917 chk_flags, mid); 1918 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1919 goto err_out; 1920 } else { 1921 if ((tsn == control->fsn_included + 1) && 1922 (control->end_added == 0)) { 1923 SCTP_SNPRINTF(msg, sizeof(msg), 1924 "Illegal message sequence, missing end for MID: %8.8x", 1925 control->fsn_included); 1926 goto err_out; 1927 } else { 1928 control = NULL; 1929 } 1930 } 1931 } 1932 } 1933 /* now do the tests */ 1934 if (((asoc->cnt_on_all_streams + 1935 asoc->cnt_on_reasm_queue + 1936 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1937 (((int)asoc->my_rwnd) <= 0)) { 1938 /* 1939 * When we have NO room in the rwnd we check to make sure 1940 * the reader is doing its job... 1941 */ 1942 if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) { 1943 /* some to read, wake-up */ 1944 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1945 } 1946 /* now is it in the mapping array of what we have accepted? */ 1947 if (chk_type == SCTP_DATA) { 1948 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1949 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1950 /* Nope not in the valid range dump it */ 1951 dump_packet: 1952 sctp_set_rwnd(stcb, asoc); 1953 if ((asoc->cnt_on_all_streams + 1954 asoc->cnt_on_reasm_queue + 1955 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1956 SCTP_STAT_INCR(sctps_datadropchklmt); 1957 } else { 1958 SCTP_STAT_INCR(sctps_datadroprwnd); 1959 } 1960 *break_flag = 1; 1961 return (0); 1962 } 1963 } else { 1964 if (control == NULL) { 1965 goto dump_packet; 1966 } 1967 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1968 goto dump_packet; 1969 } 1970 } 1971 } 1972 #ifdef SCTP_ASOCLOG_OF_TSNS 1973 SCTP_TCB_LOCK_ASSERT(stcb); 1974 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1975 asoc->tsn_in_at = 0; 1976 asoc->tsn_in_wrapped = 1; 1977 } 1978 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1979 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 1980 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 1981 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1982 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1983 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1984 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1985 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1986 asoc->tsn_in_at++; 1987 #endif 1988 /* 1989 * Before we continue lets validate that we are not being fooled by 1990 * an evil attacker. We can only have Nk chunks based on our TSN 1991 * spread allowed by the mapping array N * 8 bits, so there is no 1992 * way our stream sequence numbers could have wrapped. We of course 1993 * only validate the FIRST fragment so the bit must be set. 1994 */ 1995 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 1996 (TAILQ_EMPTY(&asoc->resetHead)) && 1997 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 1998 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 1999 /* The incoming sseq is behind where we last delivered? */ 2000 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 2001 mid, asoc->strmin[sid].last_mid_delivered); 2002 2003 if (asoc->idata_supported) { 2004 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2005 asoc->strmin[sid].last_mid_delivered, 2006 tsn, 2007 sid, 2008 mid); 2009 } else { 2010 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2011 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2012 tsn, 2013 sid, 2014 (uint16_t)mid); 2015 } 2016 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2017 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2018 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2019 *abort_flag = 1; 2020 return (0); 2021 } 2022 if (chk_type == SCTP_IDATA) { 2023 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2024 } else { 2025 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2026 } 2027 if (last_chunk == 0) { 2028 if (chk_type == SCTP_IDATA) { 2029 dmbuf = SCTP_M_COPYM(*m, 2030 (offset + sizeof(struct sctp_idata_chunk)), 2031 the_len, M_NOWAIT); 2032 } else { 2033 dmbuf = SCTP_M_COPYM(*m, 2034 (offset + sizeof(struct sctp_data_chunk)), 2035 the_len, M_NOWAIT); 2036 } 2037 #ifdef SCTP_MBUF_LOGGING 2038 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2039 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2040 } 2041 #endif 2042 } else { 2043 /* We can steal the last chunk */ 2044 int l_len; 2045 2046 dmbuf = *m; 2047 /* lop off the top part */ 2048 if (chk_type == SCTP_IDATA) { 2049 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2050 } else { 2051 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2052 } 2053 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2054 l_len = SCTP_BUF_LEN(dmbuf); 2055 } else { 2056 /* 2057 * need to count up the size hopefully does not hit 2058 * this to often :-0 2059 */ 2060 struct mbuf *lat; 2061 2062 l_len = 0; 2063 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2064 l_len += SCTP_BUF_LEN(lat); 2065 } 2066 } 2067 if (l_len > the_len) { 2068 /* Trim the end round bytes off too */ 2069 m_adj(dmbuf, -(l_len - the_len)); 2070 } 2071 } 2072 if (dmbuf == NULL) { 2073 SCTP_STAT_INCR(sctps_nomem); 2074 return (0); 2075 } 2076 /* 2077 * Now no matter what, we need a control, get one if we don't have 2078 * one (we may have gotten it above when we found the message was 2079 * fragmented 2080 */ 2081 if (control == NULL) { 2082 sctp_alloc_a_readq(stcb, control); 2083 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2084 ppid, 2085 sid, 2086 chk_flags, 2087 NULL, fsn, mid); 2088 if (control == NULL) { 2089 SCTP_STAT_INCR(sctps_nomem); 2090 return (0); 2091 } 2092 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2093 struct mbuf *mm; 2094 2095 control->data = dmbuf; 2096 control->tail_mbuf = NULL; 2097 for (mm = control->data; mm; mm = mm->m_next) { 2098 control->length += SCTP_BUF_LEN(mm); 2099 if (SCTP_BUF_NEXT(mm) == NULL) { 2100 control->tail_mbuf = mm; 2101 } 2102 } 2103 control->end_added = 1; 2104 control->last_frag_seen = 1; 2105 control->first_frag_seen = 1; 2106 control->fsn_included = fsn; 2107 control->top_fsn = fsn; 2108 } 2109 created_control = 1; 2110 } 2111 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2112 chk_flags, ordered, mid, control); 2113 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2114 TAILQ_EMPTY(&asoc->resetHead) && 2115 ((ordered == 0) || 2116 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2117 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2118 /* Candidate for express delivery */ 2119 /* 2120 * Its not fragmented, No PD-API is up, Nothing in the 2121 * delivery queue, Its un-ordered OR ordered and the next to 2122 * deliver AND nothing else is stuck on the stream queue, 2123 * And there is room for it in the socket buffer. Lets just 2124 * stuff it up the buffer.... 2125 */ 2126 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2127 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2128 asoc->highest_tsn_inside_nr_map = tsn; 2129 } 2130 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2131 control, mid); 2132 2133 sctp_add_to_readq(stcb->sctp_ep, stcb, 2134 control, &stcb->sctp_socket->so_rcv, 2135 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2136 2137 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2138 /* for ordered, bump what we delivered */ 2139 asoc->strmin[sid].last_mid_delivered++; 2140 } 2141 SCTP_STAT_INCR(sctps_recvexpress); 2142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2143 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2144 SCTP_STR_LOG_FROM_EXPRS_DEL); 2145 } 2146 control = NULL; 2147 goto finish_express_del; 2148 } 2149 2150 /* Now will we need a chunk too? */ 2151 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2152 sctp_alloc_a_chunk(stcb, chk); 2153 if (chk == NULL) { 2154 /* No memory so we drop the chunk */ 2155 SCTP_STAT_INCR(sctps_nomem); 2156 if (last_chunk == 0) { 2157 /* we copied it, free the copy */ 2158 sctp_m_freem(dmbuf); 2159 } 2160 return (0); 2161 } 2162 chk->rec.data.tsn = tsn; 2163 chk->no_fr_allowed = 0; 2164 chk->rec.data.fsn = fsn; 2165 chk->rec.data.mid = mid; 2166 chk->rec.data.sid = sid; 2167 chk->rec.data.ppid = ppid; 2168 chk->rec.data.context = stcb->asoc.context; 2169 chk->rec.data.doing_fast_retransmit = 0; 2170 chk->rec.data.rcv_flags = chk_flags; 2171 chk->asoc = asoc; 2172 chk->send_size = the_len; 2173 chk->whoTo = net; 2174 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2175 chk, 2176 control, mid); 2177 atomic_add_int(&net->ref_count, 1); 2178 chk->data = dmbuf; 2179 } 2180 /* Set the appropriate TSN mark */ 2181 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2182 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2183 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2184 asoc->highest_tsn_inside_nr_map = tsn; 2185 } 2186 } else { 2187 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2188 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2189 asoc->highest_tsn_inside_map = tsn; 2190 } 2191 } 2192 /* Now is it complete (i.e. not fragmented)? */ 2193 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2194 /* 2195 * Special check for when streams are resetting. We could be 2196 * more smart about this and check the actual stream to see 2197 * if it is not being reset.. that way we would not create a 2198 * HOLB when amongst streams being reset and those not being 2199 * reset. 2200 * 2201 */ 2202 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2203 SCTP_TSN_GT(tsn, liste->tsn)) { 2204 /* 2205 * yep its past where we need to reset... go ahead 2206 * and queue it. 2207 */ 2208 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2209 /* first one on */ 2210 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2211 } else { 2212 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2213 unsigned char inserted = 0; 2214 2215 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2216 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2217 continue; 2218 } else { 2219 /* found it */ 2220 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2221 inserted = 1; 2222 break; 2223 } 2224 } 2225 if (inserted == 0) { 2226 /* 2227 * must be put at end, use prevP 2228 * (all setup from loop) to setup 2229 * nextP. 2230 */ 2231 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2232 } 2233 } 2234 goto finish_express_del; 2235 } 2236 if (chk_flags & SCTP_DATA_UNORDERED) { 2237 /* queue directly into socket buffer */ 2238 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2239 control, mid); 2240 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2241 sctp_add_to_readq(stcb->sctp_ep, stcb, 2242 control, 2243 &stcb->sctp_socket->so_rcv, 1, 2244 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2245 2246 } else { 2247 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2248 mid); 2249 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2250 if (*abort_flag) { 2251 if (last_chunk) { 2252 *m = NULL; 2253 } 2254 return (0); 2255 } 2256 } 2257 goto finish_express_del; 2258 } 2259 /* If we reach here its a reassembly */ 2260 need_reasm_check = 1; 2261 SCTPDBG(SCTP_DEBUG_XXX, 2262 "Queue data to stream for reasm control: %p MID: %u\n", 2263 control, mid); 2264 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2265 if (*abort_flag) { 2266 /* 2267 * the assoc is now gone and chk was put onto the reasm 2268 * queue, which has all been freed. 2269 */ 2270 if (last_chunk) { 2271 *m = NULL; 2272 } 2273 return (0); 2274 } 2275 finish_express_del: 2276 /* Here we tidy up things */ 2277 if (tsn == (asoc->cumulative_tsn + 1)) { 2278 /* Update cum-ack */ 2279 asoc->cumulative_tsn = tsn; 2280 } 2281 if (last_chunk) { 2282 *m = NULL; 2283 } 2284 if (ordered) { 2285 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2286 } else { 2287 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2288 } 2289 SCTP_STAT_INCR(sctps_recvdata); 2290 /* Set it present please */ 2291 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2292 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2293 } 2294 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2295 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2296 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2297 } 2298 if (need_reasm_check) { 2299 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2300 need_reasm_check = 0; 2301 } 2302 /* check the special flag for stream resets */ 2303 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2304 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2305 /* 2306 * we have finished working through the backlogged TSN's now 2307 * time to reset streams. 1: call reset function. 2: free 2308 * pending_reply space 3: distribute any chunks in 2309 * pending_reply_queue. 2310 */ 2311 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2312 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2313 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2314 SCTP_FREE(liste, SCTP_M_STRESET); 2315 /* sa_ignore FREED_MEMORY */ 2316 liste = TAILQ_FIRST(&asoc->resetHead); 2317 if (TAILQ_EMPTY(&asoc->resetHead)) { 2318 /* All can be removed */ 2319 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2320 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2321 strm = &asoc->strmin[control->sinfo_stream]; 2322 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2323 if (*abort_flag) { 2324 return (0); 2325 } 2326 if (need_reasm_check) { 2327 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2328 need_reasm_check = 0; 2329 } 2330 } 2331 } else { 2332 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2333 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2334 break; 2335 } 2336 /* 2337 * if control->sinfo_tsn is <= liste->tsn we 2338 * can process it which is the NOT of 2339 * control->sinfo_tsn > liste->tsn 2340 */ 2341 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2342 strm = &asoc->strmin[control->sinfo_stream]; 2343 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2344 if (*abort_flag) { 2345 return (0); 2346 } 2347 if (need_reasm_check) { 2348 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2349 need_reasm_check = 0; 2350 } 2351 } 2352 } 2353 } 2354 return (1); 2355 } 2356 2357 static const int8_t sctp_map_lookup_tab[256] = { 2358 0, 1, 0, 2, 0, 1, 0, 3, 2359 0, 1, 0, 2, 0, 1, 0, 4, 2360 0, 1, 0, 2, 0, 1, 0, 3, 2361 0, 1, 0, 2, 0, 1, 0, 5, 2362 0, 1, 0, 2, 0, 1, 0, 3, 2363 0, 1, 0, 2, 0, 1, 0, 4, 2364 0, 1, 0, 2, 0, 1, 0, 3, 2365 0, 1, 0, 2, 0, 1, 0, 6, 2366 0, 1, 0, 2, 0, 1, 0, 3, 2367 0, 1, 0, 2, 0, 1, 0, 4, 2368 0, 1, 0, 2, 0, 1, 0, 3, 2369 0, 1, 0, 2, 0, 1, 0, 5, 2370 0, 1, 0, 2, 0, 1, 0, 3, 2371 0, 1, 0, 2, 0, 1, 0, 4, 2372 0, 1, 0, 2, 0, 1, 0, 3, 2373 0, 1, 0, 2, 0, 1, 0, 7, 2374 0, 1, 0, 2, 0, 1, 0, 3, 2375 0, 1, 0, 2, 0, 1, 0, 4, 2376 0, 1, 0, 2, 0, 1, 0, 3, 2377 0, 1, 0, 2, 0, 1, 0, 5, 2378 0, 1, 0, 2, 0, 1, 0, 3, 2379 0, 1, 0, 2, 0, 1, 0, 4, 2380 0, 1, 0, 2, 0, 1, 0, 3, 2381 0, 1, 0, 2, 0, 1, 0, 6, 2382 0, 1, 0, 2, 0, 1, 0, 3, 2383 0, 1, 0, 2, 0, 1, 0, 4, 2384 0, 1, 0, 2, 0, 1, 0, 3, 2385 0, 1, 0, 2, 0, 1, 0, 5, 2386 0, 1, 0, 2, 0, 1, 0, 3, 2387 0, 1, 0, 2, 0, 1, 0, 4, 2388 0, 1, 0, 2, 0, 1, 0, 3, 2389 0, 1, 0, 2, 0, 1, 0, 8 2390 }; 2391 2392 void 2393 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2394 { 2395 /* 2396 * Now we also need to check the mapping array in a couple of ways. 2397 * 1) Did we move the cum-ack point? 2398 * 2399 * When you first glance at this you might think that all entries 2400 * that make up the position of the cum-ack would be in the 2401 * nr-mapping array only.. i.e. things up to the cum-ack are always 2402 * deliverable. Thats true with one exception, when its a fragmented 2403 * message we may not deliver the data until some threshold (or all 2404 * of it) is in place. So we must OR the nr_mapping_array and 2405 * mapping_array to get a true picture of the cum-ack. 2406 */ 2407 struct sctp_association *asoc; 2408 int at; 2409 uint8_t val; 2410 int slide_from, slide_end, lgap, distance; 2411 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2412 2413 asoc = &stcb->asoc; 2414 2415 old_cumack = asoc->cumulative_tsn; 2416 old_base = asoc->mapping_array_base_tsn; 2417 old_highest = asoc->highest_tsn_inside_map; 2418 /* 2419 * We could probably improve this a small bit by calculating the 2420 * offset of the current cum-ack as the starting point. 2421 */ 2422 at = 0; 2423 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2424 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2425 if (val == 0xff) { 2426 at += 8; 2427 } else { 2428 /* there is a 0 bit */ 2429 at += sctp_map_lookup_tab[val]; 2430 break; 2431 } 2432 } 2433 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2434 2435 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2436 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2437 #ifdef INVARIANTS 2438 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2439 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2440 #else 2441 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2442 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2443 sctp_print_mapping_array(asoc); 2444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2445 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2446 } 2447 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2448 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2449 #endif 2450 } 2451 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2452 highest_tsn = asoc->highest_tsn_inside_nr_map; 2453 } else { 2454 highest_tsn = asoc->highest_tsn_inside_map; 2455 } 2456 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2457 /* The complete array was completed by a single FR */ 2458 /* highest becomes the cum-ack */ 2459 int clr; 2460 #ifdef INVARIANTS 2461 unsigned int i; 2462 #endif 2463 2464 /* clear the array */ 2465 clr = ((at + 7) >> 3); 2466 if (clr > asoc->mapping_array_size) { 2467 clr = asoc->mapping_array_size; 2468 } 2469 memset(asoc->mapping_array, 0, clr); 2470 memset(asoc->nr_mapping_array, 0, clr); 2471 #ifdef INVARIANTS 2472 for (i = 0; i < asoc->mapping_array_size; i++) { 2473 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2474 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2475 sctp_print_mapping_array(asoc); 2476 } 2477 } 2478 #endif 2479 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2480 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2481 } else if (at >= 8) { 2482 /* we can slide the mapping array down */ 2483 /* slide_from holds where we hit the first NON 0xff byte */ 2484 2485 /* 2486 * now calculate the ceiling of the move using our highest 2487 * TSN value 2488 */ 2489 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2490 slide_end = (lgap >> 3); 2491 if (slide_end < slide_from) { 2492 sctp_print_mapping_array(asoc); 2493 #ifdef INVARIANTS 2494 panic("impossible slide"); 2495 #else 2496 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2497 lgap, slide_end, slide_from, at); 2498 return; 2499 #endif 2500 } 2501 if (slide_end > asoc->mapping_array_size) { 2502 #ifdef INVARIANTS 2503 panic("would overrun buffer"); 2504 #else 2505 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2506 asoc->mapping_array_size, slide_end); 2507 slide_end = asoc->mapping_array_size; 2508 #endif 2509 } 2510 distance = (slide_end - slide_from) + 1; 2511 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2512 sctp_log_map(old_base, old_cumack, old_highest, 2513 SCTP_MAP_PREPARE_SLIDE); 2514 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2515 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2516 } 2517 if (distance + slide_from > asoc->mapping_array_size || 2518 distance < 0) { 2519 /* 2520 * Here we do NOT slide forward the array so that 2521 * hopefully when more data comes in to fill it up 2522 * we will be able to slide it forward. Really I 2523 * don't think this should happen :-0 2524 */ 2525 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2526 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2527 (uint32_t)asoc->mapping_array_size, 2528 SCTP_MAP_SLIDE_NONE); 2529 } 2530 } else { 2531 int ii; 2532 2533 for (ii = 0; ii < distance; ii++) { 2534 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2535 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2536 } 2537 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2538 asoc->mapping_array[ii] = 0; 2539 asoc->nr_mapping_array[ii] = 0; 2540 } 2541 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2542 asoc->highest_tsn_inside_map += (slide_from << 3); 2543 } 2544 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2545 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2546 } 2547 asoc->mapping_array_base_tsn += (slide_from << 3); 2548 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2549 sctp_log_map(asoc->mapping_array_base_tsn, 2550 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2551 SCTP_MAP_SLIDE_RESULT); 2552 } 2553 } 2554 } 2555 } 2556 2557 void 2558 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2559 { 2560 struct sctp_association *asoc; 2561 uint32_t highest_tsn; 2562 int is_a_gap; 2563 2564 sctp_slide_mapping_arrays(stcb); 2565 asoc = &stcb->asoc; 2566 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2567 highest_tsn = asoc->highest_tsn_inside_nr_map; 2568 } else { 2569 highest_tsn = asoc->highest_tsn_inside_map; 2570 } 2571 /* Is there a gap now? */ 2572 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2573 2574 /* 2575 * Now we need to see if we need to queue a sack or just start the 2576 * timer (if allowed). 2577 */ 2578 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2579 /* 2580 * Ok special case, in SHUTDOWN-SENT case. here we maker 2581 * sure SACK timer is off and instead send a SHUTDOWN and a 2582 * SACK 2583 */ 2584 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2585 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2586 stcb->sctp_ep, stcb, NULL, 2587 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2588 } 2589 sctp_send_shutdown(stcb, 2590 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2591 if (is_a_gap) { 2592 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2593 } 2594 } else { 2595 /* 2596 * CMT DAC algorithm: increase number of packets received 2597 * since last ack 2598 */ 2599 stcb->asoc.cmt_dac_pkts_rcvd++; 2600 2601 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2602 * SACK */ 2603 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2604 * longer is one */ 2605 (stcb->asoc.numduptsns) || /* we have dup's */ 2606 (is_a_gap) || /* is still a gap */ 2607 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2608 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */ 2609 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2610 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2611 (stcb->asoc.send_sack == 0) && 2612 (stcb->asoc.numduptsns == 0) && 2613 (stcb->asoc.delayed_ack) && 2614 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2615 /* 2616 * CMT DAC algorithm: With CMT, delay acks 2617 * even in the face of reordering. 2618 * Therefore, if acks that do not have to be 2619 * sent because of the above reasons, will 2620 * be delayed. That is, acks that would have 2621 * been sent due to gap reports will be 2622 * delayed with DAC. Start the delayed ack 2623 * timer. 2624 */ 2625 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2626 stcb->sctp_ep, stcb, NULL); 2627 } else { 2628 /* 2629 * Ok we must build a SACK since the timer 2630 * is pending, we got our first packet OR 2631 * there are gaps or duplicates. 2632 */ 2633 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 2634 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 2635 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2636 } 2637 } else { 2638 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2639 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2640 stcb->sctp_ep, stcb, NULL); 2641 } 2642 } 2643 } 2644 } 2645 2646 int 2647 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2648 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2649 struct sctp_nets *net, uint32_t *high_tsn) 2650 { 2651 struct sctp_chunkhdr *ch, chunk_buf; 2652 struct sctp_association *asoc; 2653 int num_chunks = 0; /* number of control chunks processed */ 2654 int stop_proc = 0; 2655 int break_flag, last_chunk; 2656 int abort_flag = 0, was_a_gap; 2657 struct mbuf *m; 2658 uint32_t highest_tsn; 2659 uint16_t chk_length; 2660 2661 /* set the rwnd */ 2662 sctp_set_rwnd(stcb, &stcb->asoc); 2663 2664 m = *mm; 2665 SCTP_TCB_LOCK_ASSERT(stcb); 2666 asoc = &stcb->asoc; 2667 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2668 highest_tsn = asoc->highest_tsn_inside_nr_map; 2669 } else { 2670 highest_tsn = asoc->highest_tsn_inside_map; 2671 } 2672 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2673 /* 2674 * setup where we got the last DATA packet from for any SACK that 2675 * may need to go out. Don't bump the net. This is done ONLY when a 2676 * chunk is assigned. 2677 */ 2678 asoc->last_data_chunk_from = net; 2679 2680 /*- 2681 * Now before we proceed we must figure out if this is a wasted 2682 * cluster... i.e. it is a small packet sent in and yet the driver 2683 * underneath allocated a full cluster for it. If so we must copy it 2684 * to a smaller mbuf and free up the cluster mbuf. This will help 2685 * with cluster starvation. 2686 */ 2687 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2688 /* we only handle mbufs that are singletons.. not chains */ 2689 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2690 if (m) { 2691 /* ok lets see if we can copy the data up */ 2692 caddr_t *from, *to; 2693 2694 /* get the pointers and copy */ 2695 to = mtod(m, caddr_t *); 2696 from = mtod((*mm), caddr_t *); 2697 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2698 /* copy the length and free up the old */ 2699 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2700 sctp_m_freem(*mm); 2701 /* success, back copy */ 2702 *mm = m; 2703 } else { 2704 /* We are in trouble in the mbuf world .. yikes */ 2705 m = *mm; 2706 } 2707 } 2708 /* get pointer to the first chunk header */ 2709 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2710 sizeof(struct sctp_chunkhdr), 2711 (uint8_t *)&chunk_buf); 2712 if (ch == NULL) { 2713 return (1); 2714 } 2715 /* 2716 * process all DATA chunks... 2717 */ 2718 *high_tsn = asoc->cumulative_tsn; 2719 break_flag = 0; 2720 asoc->data_pkts_seen++; 2721 while (stop_proc == 0) { 2722 /* validate chunk length */ 2723 chk_length = ntohs(ch->chunk_length); 2724 if (length - *offset < chk_length) { 2725 /* all done, mutulated chunk */ 2726 stop_proc = 1; 2727 continue; 2728 } 2729 if ((asoc->idata_supported == 1) && 2730 (ch->chunk_type == SCTP_DATA)) { 2731 struct mbuf *op_err; 2732 char msg[SCTP_DIAG_INFO_LEN]; 2733 2734 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2735 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2736 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 2737 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2738 return (2); 2739 } 2740 if ((asoc->idata_supported == 0) && 2741 (ch->chunk_type == SCTP_IDATA)) { 2742 struct mbuf *op_err; 2743 char msg[SCTP_DIAG_INFO_LEN]; 2744 2745 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2746 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2747 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22; 2748 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2749 return (2); 2750 } 2751 if ((ch->chunk_type == SCTP_DATA) || 2752 (ch->chunk_type == SCTP_IDATA)) { 2753 uint16_t clen; 2754 2755 if (ch->chunk_type == SCTP_DATA) { 2756 clen = sizeof(struct sctp_data_chunk); 2757 } else { 2758 clen = sizeof(struct sctp_idata_chunk); 2759 } 2760 if (chk_length < clen) { 2761 /* 2762 * Need to send an abort since we had a 2763 * invalid data chunk. 2764 */ 2765 struct mbuf *op_err; 2766 char msg[SCTP_DIAG_INFO_LEN]; 2767 2768 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u", 2769 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2770 chk_length); 2771 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2772 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23; 2773 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2774 return (2); 2775 } 2776 #ifdef SCTP_AUDITING_ENABLED 2777 sctp_audit_log(0xB1, 0); 2778 #endif 2779 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2780 last_chunk = 1; 2781 } else { 2782 last_chunk = 0; 2783 } 2784 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2785 chk_length, net, high_tsn, &abort_flag, &break_flag, 2786 last_chunk, ch->chunk_type)) { 2787 num_chunks++; 2788 } 2789 if (abort_flag) 2790 return (2); 2791 2792 if (break_flag) { 2793 /* 2794 * Set because of out of rwnd space and no 2795 * drop rep space left. 2796 */ 2797 stop_proc = 1; 2798 continue; 2799 } 2800 } else { 2801 /* not a data chunk in the data region */ 2802 switch (ch->chunk_type) { 2803 case SCTP_INITIATION: 2804 case SCTP_INITIATION_ACK: 2805 case SCTP_SELECTIVE_ACK: 2806 case SCTP_NR_SELECTIVE_ACK: 2807 case SCTP_HEARTBEAT_REQUEST: 2808 case SCTP_HEARTBEAT_ACK: 2809 case SCTP_ABORT_ASSOCIATION: 2810 case SCTP_SHUTDOWN: 2811 case SCTP_SHUTDOWN_ACK: 2812 case SCTP_OPERATION_ERROR: 2813 case SCTP_COOKIE_ECHO: 2814 case SCTP_COOKIE_ACK: 2815 case SCTP_ECN_ECHO: 2816 case SCTP_ECN_CWR: 2817 case SCTP_SHUTDOWN_COMPLETE: 2818 case SCTP_AUTHENTICATION: 2819 case SCTP_ASCONF_ACK: 2820 case SCTP_PACKET_DROPPED: 2821 case SCTP_STREAM_RESET: 2822 case SCTP_FORWARD_CUM_TSN: 2823 case SCTP_ASCONF: 2824 { 2825 /* 2826 * Now, what do we do with KNOWN 2827 * chunks that are NOT in the right 2828 * place? 2829 * 2830 * For now, I do nothing but ignore 2831 * them. We may later want to add 2832 * sysctl stuff to switch out and do 2833 * either an ABORT() or possibly 2834 * process them. 2835 */ 2836 struct mbuf *op_err; 2837 char msg[SCTP_DIAG_INFO_LEN]; 2838 2839 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2840 ch->chunk_type); 2841 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2842 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2843 return (2); 2844 } 2845 default: 2846 /* 2847 * Unknown chunk type: use bit rules after 2848 * checking length 2849 */ 2850 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2851 /* 2852 * Need to send an abort since we 2853 * had a invalid chunk. 2854 */ 2855 struct mbuf *op_err; 2856 char msg[SCTP_DIAG_INFO_LEN]; 2857 2858 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length); 2859 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2860 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 2861 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 2862 return (2); 2863 } 2864 if (ch->chunk_type & 0x40) { 2865 /* Add a error report to the queue */ 2866 struct mbuf *op_err; 2867 struct sctp_gen_error_cause *cause; 2868 2869 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2870 0, M_NOWAIT, 1, MT_DATA); 2871 if (op_err != NULL) { 2872 cause = mtod(op_err, struct sctp_gen_error_cause *); 2873 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2874 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2875 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2876 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2877 if (SCTP_BUF_NEXT(op_err) != NULL) { 2878 sctp_queue_op_err(stcb, op_err); 2879 } else { 2880 sctp_m_freem(op_err); 2881 } 2882 } 2883 } 2884 if ((ch->chunk_type & 0x80) == 0) { 2885 /* discard the rest of this packet */ 2886 stop_proc = 1; 2887 } /* else skip this bad chunk and 2888 * continue... */ 2889 break; 2890 } /* switch of chunk type */ 2891 } 2892 *offset += SCTP_SIZE32(chk_length); 2893 if ((*offset >= length) || stop_proc) { 2894 /* no more data left in the mbuf chain */ 2895 stop_proc = 1; 2896 continue; 2897 } 2898 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2899 sizeof(struct sctp_chunkhdr), 2900 (uint8_t *)&chunk_buf); 2901 if (ch == NULL) { 2902 *offset = length; 2903 stop_proc = 1; 2904 continue; 2905 } 2906 } 2907 if (break_flag) { 2908 /* 2909 * we need to report rwnd overrun drops. 2910 */ 2911 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2912 } 2913 if (num_chunks) { 2914 /* 2915 * Did we get data, if so update the time for auto-close and 2916 * give peer credit for being alive. 2917 */ 2918 SCTP_STAT_INCR(sctps_recvpktwithdata); 2919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2920 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2921 stcb->asoc.overall_error_count, 2922 0, 2923 SCTP_FROM_SCTP_INDATA, 2924 __LINE__); 2925 } 2926 stcb->asoc.overall_error_count = 0; 2927 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2928 } 2929 /* now service all of the reassm queue if needed */ 2930 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2931 /* Assure that we ack right away */ 2932 stcb->asoc.send_sack = 1; 2933 } 2934 /* Start a sack timer or QUEUE a SACK for sending */ 2935 sctp_sack_check(stcb, was_a_gap); 2936 return (0); 2937 } 2938 2939 static int 2940 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2941 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2942 int *num_frs, 2943 uint32_t *biggest_newly_acked_tsn, 2944 uint32_t *this_sack_lowest_newack, 2945 int *rto_ok) 2946 { 2947 struct sctp_tmit_chunk *tp1; 2948 unsigned int theTSN; 2949 int j, wake_him = 0, circled = 0; 2950 2951 /* Recover the tp1 we last saw */ 2952 tp1 = *p_tp1; 2953 if (tp1 == NULL) { 2954 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2955 } 2956 for (j = frag_strt; j <= frag_end; j++) { 2957 theTSN = j + last_tsn; 2958 while (tp1) { 2959 if (tp1->rec.data.doing_fast_retransmit) 2960 (*num_frs) += 1; 2961 2962 /*- 2963 * CMT: CUCv2 algorithm. For each TSN being 2964 * processed from the sent queue, track the 2965 * next expected pseudo-cumack, or 2966 * rtx_pseudo_cumack, if required. Separate 2967 * cumack trackers for first transmissions, 2968 * and retransmissions. 2969 */ 2970 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2971 (tp1->whoTo->find_pseudo_cumack == 1) && 2972 (tp1->snd_count == 1)) { 2973 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 2974 tp1->whoTo->find_pseudo_cumack = 0; 2975 } 2976 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2977 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2978 (tp1->snd_count > 1)) { 2979 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 2980 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2981 } 2982 if (tp1->rec.data.tsn == theTSN) { 2983 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2984 /*- 2985 * must be held until 2986 * cum-ack passes 2987 */ 2988 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2989 /*- 2990 * If it is less than RESEND, it is 2991 * now no-longer in flight. 2992 * Higher values may already be set 2993 * via previous Gap Ack Blocks... 2994 * i.e. ACKED or RESEND. 2995 */ 2996 if (SCTP_TSN_GT(tp1->rec.data.tsn, 2997 *biggest_newly_acked_tsn)) { 2998 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 2999 } 3000 /*- 3001 * CMT: SFR algo (and HTNA) - set 3002 * saw_newack to 1 for dest being 3003 * newly acked. update 3004 * this_sack_highest_newack if 3005 * appropriate. 3006 */ 3007 if (tp1->rec.data.chunk_was_revoked == 0) 3008 tp1->whoTo->saw_newack = 1; 3009 3010 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3011 tp1->whoTo->this_sack_highest_newack)) { 3012 tp1->whoTo->this_sack_highest_newack = 3013 tp1->rec.data.tsn; 3014 } 3015 /*- 3016 * CMT DAC algo: also update 3017 * this_sack_lowest_newack 3018 */ 3019 if (*this_sack_lowest_newack == 0) { 3020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3021 sctp_log_sack(*this_sack_lowest_newack, 3022 last_tsn, 3023 tp1->rec.data.tsn, 3024 0, 3025 0, 3026 SCTP_LOG_TSN_ACKED); 3027 } 3028 *this_sack_lowest_newack = tp1->rec.data.tsn; 3029 } 3030 /*- 3031 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3032 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3033 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3034 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3035 * Separate pseudo_cumack trackers for first transmissions and 3036 * retransmissions. 3037 */ 3038 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3039 if (tp1->rec.data.chunk_was_revoked == 0) { 3040 tp1->whoTo->new_pseudo_cumack = 1; 3041 } 3042 tp1->whoTo->find_pseudo_cumack = 1; 3043 } 3044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3045 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3046 } 3047 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3048 if (tp1->rec.data.chunk_was_revoked == 0) { 3049 tp1->whoTo->new_pseudo_cumack = 1; 3050 } 3051 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3052 } 3053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3054 sctp_log_sack(*biggest_newly_acked_tsn, 3055 last_tsn, 3056 tp1->rec.data.tsn, 3057 frag_strt, 3058 frag_end, 3059 SCTP_LOG_TSN_ACKED); 3060 } 3061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3062 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3063 tp1->whoTo->flight_size, 3064 tp1->book_size, 3065 (uint32_t)(uintptr_t)tp1->whoTo, 3066 tp1->rec.data.tsn); 3067 } 3068 sctp_flight_size_decrease(tp1); 3069 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3070 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3071 tp1); 3072 } 3073 sctp_total_flight_decrease(stcb, tp1); 3074 3075 tp1->whoTo->net_ack += tp1->send_size; 3076 if (tp1->snd_count < 2) { 3077 /*- 3078 * True non-retransmitted chunk 3079 */ 3080 tp1->whoTo->net_ack2 += tp1->send_size; 3081 3082 /*- 3083 * update RTO too ? 3084 */ 3085 if (tp1->do_rtt) { 3086 if (*rto_ok && 3087 sctp_calculate_rto(stcb, 3088 &stcb->asoc, 3089 tp1->whoTo, 3090 &tp1->sent_rcv_time, 3091 SCTP_RTT_FROM_DATA)) { 3092 *rto_ok = 0; 3093 } 3094 if (tp1->whoTo->rto_needed == 0) { 3095 tp1->whoTo->rto_needed = 1; 3096 } 3097 tp1->do_rtt = 0; 3098 } 3099 } 3100 } 3101 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3102 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3103 stcb->asoc.this_sack_highest_gap)) { 3104 stcb->asoc.this_sack_highest_gap = 3105 tp1->rec.data.tsn; 3106 } 3107 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3108 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3109 #ifdef SCTP_AUDITING_ENABLED 3110 sctp_audit_log(0xB2, 3111 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3112 #endif 3113 } 3114 } 3115 /*- 3116 * All chunks NOT UNSENT fall through here and are marked 3117 * (leave PR-SCTP ones that are to skip alone though) 3118 */ 3119 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3120 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3121 tp1->sent = SCTP_DATAGRAM_MARKED; 3122 } 3123 if (tp1->rec.data.chunk_was_revoked) { 3124 /* deflate the cwnd */ 3125 tp1->whoTo->cwnd -= tp1->book_size; 3126 tp1->rec.data.chunk_was_revoked = 0; 3127 } 3128 /* NR Sack code here */ 3129 if (nr_sacking && 3130 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3131 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3132 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3133 #ifdef INVARIANTS 3134 } else { 3135 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3136 #endif 3137 } 3138 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3139 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3140 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3141 stcb->asoc.trigger_reset = 1; 3142 } 3143 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3144 if (tp1->data) { 3145 /* 3146 * sa_ignore 3147 * NO_NULL_CHK 3148 */ 3149 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3150 sctp_m_freem(tp1->data); 3151 tp1->data = NULL; 3152 } 3153 wake_him++; 3154 } 3155 } 3156 break; 3157 } /* if (tp1->tsn == theTSN) */ 3158 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3159 break; 3160 } 3161 tp1 = TAILQ_NEXT(tp1, sctp_next); 3162 if ((tp1 == NULL) && (circled == 0)) { 3163 circled++; 3164 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3165 } 3166 } /* end while (tp1) */ 3167 if (tp1 == NULL) { 3168 circled = 0; 3169 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3170 } 3171 /* In case the fragments were not in order we must reset */ 3172 } /* end for (j = fragStart */ 3173 *p_tp1 = tp1; 3174 return (wake_him); /* Return value only used for nr-sack */ 3175 } 3176 3177 static int 3178 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3179 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3180 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3181 int num_seg, int num_nr_seg, int *rto_ok) 3182 { 3183 struct sctp_gap_ack_block *frag, block; 3184 struct sctp_tmit_chunk *tp1; 3185 int i; 3186 int num_frs = 0; 3187 int chunk_freed; 3188 int non_revocable; 3189 uint16_t frag_strt, frag_end, prev_frag_end; 3190 3191 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3192 prev_frag_end = 0; 3193 chunk_freed = 0; 3194 3195 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3196 if (i == num_seg) { 3197 prev_frag_end = 0; 3198 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3199 } 3200 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3201 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3202 *offset += sizeof(block); 3203 if (frag == NULL) { 3204 return (chunk_freed); 3205 } 3206 frag_strt = ntohs(frag->start); 3207 frag_end = ntohs(frag->end); 3208 3209 if (frag_strt > frag_end) { 3210 /* This gap report is malformed, skip it. */ 3211 continue; 3212 } 3213 if (frag_strt <= prev_frag_end) { 3214 /* This gap report is not in order, so restart. */ 3215 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3216 } 3217 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3218 *biggest_tsn_acked = last_tsn + frag_end; 3219 } 3220 if (i < num_seg) { 3221 non_revocable = 0; 3222 } else { 3223 non_revocable = 1; 3224 } 3225 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3226 non_revocable, &num_frs, biggest_newly_acked_tsn, 3227 this_sack_lowest_newack, rto_ok)) { 3228 chunk_freed = 1; 3229 } 3230 prev_frag_end = frag_end; 3231 } 3232 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3233 if (num_frs) 3234 sctp_log_fr(*biggest_tsn_acked, 3235 *biggest_newly_acked_tsn, 3236 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3237 } 3238 return (chunk_freed); 3239 } 3240 3241 static void 3242 sctp_check_for_revoked(struct sctp_tcb *stcb, 3243 struct sctp_association *asoc, uint32_t cumack, 3244 uint32_t biggest_tsn_acked) 3245 { 3246 struct sctp_tmit_chunk *tp1; 3247 3248 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3249 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3250 /* 3251 * ok this guy is either ACK or MARKED. If it is 3252 * ACKED it has been previously acked but not this 3253 * time i.e. revoked. If it is MARKED it was ACK'ed 3254 * again. 3255 */ 3256 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3257 break; 3258 } 3259 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3260 /* it has been revoked */ 3261 tp1->sent = SCTP_DATAGRAM_SENT; 3262 tp1->rec.data.chunk_was_revoked = 1; 3263 /* 3264 * We must add this stuff back in to assure 3265 * timers and such get started. 3266 */ 3267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3268 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3269 tp1->whoTo->flight_size, 3270 tp1->book_size, 3271 (uint32_t)(uintptr_t)tp1->whoTo, 3272 tp1->rec.data.tsn); 3273 } 3274 sctp_flight_size_increase(tp1); 3275 sctp_total_flight_increase(stcb, tp1); 3276 /* 3277 * We inflate the cwnd to compensate for our 3278 * artificial inflation of the flight_size. 3279 */ 3280 tp1->whoTo->cwnd += tp1->book_size; 3281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3282 sctp_log_sack(asoc->last_acked_seq, 3283 cumack, 3284 tp1->rec.data.tsn, 3285 0, 3286 0, 3287 SCTP_LOG_TSN_REVOKED); 3288 } 3289 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3290 /* it has been re-acked in this SACK */ 3291 tp1->sent = SCTP_DATAGRAM_ACKED; 3292 } 3293 } 3294 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3295 break; 3296 } 3297 } 3298 3299 static void 3300 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3301 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3302 { 3303 struct sctp_tmit_chunk *tp1; 3304 int strike_flag = 0; 3305 struct timeval now; 3306 uint32_t sending_seq; 3307 struct sctp_nets *net; 3308 int num_dests_sacked = 0; 3309 3310 /* 3311 * select the sending_seq, this is either the next thing ready to be 3312 * sent but not transmitted, OR, the next seq we assign. 3313 */ 3314 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3315 if (tp1 == NULL) { 3316 sending_seq = asoc->sending_seq; 3317 } else { 3318 sending_seq = tp1->rec.data.tsn; 3319 } 3320 3321 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3322 if ((asoc->sctp_cmt_on_off > 0) && 3323 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3324 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3325 if (net->saw_newack) 3326 num_dests_sacked++; 3327 } 3328 } 3329 if (stcb->asoc.prsctp_supported) { 3330 (void)SCTP_GETTIME_TIMEVAL(&now); 3331 } 3332 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3333 strike_flag = 0; 3334 if (tp1->no_fr_allowed) { 3335 /* this one had a timeout or something */ 3336 continue; 3337 } 3338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3339 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3340 sctp_log_fr(biggest_tsn_newly_acked, 3341 tp1->rec.data.tsn, 3342 tp1->sent, 3343 SCTP_FR_LOG_CHECK_STRIKE); 3344 } 3345 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3346 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3347 /* done */ 3348 break; 3349 } 3350 if (stcb->asoc.prsctp_supported) { 3351 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3352 /* Is it expired? */ 3353 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3354 /* Yes so drop it */ 3355 if (tp1->data != NULL) { 3356 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3357 SCTP_SO_NOT_LOCKED); 3358 } 3359 continue; 3360 } 3361 } 3362 } 3363 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3364 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3365 /* we are beyond the tsn in the sack */ 3366 break; 3367 } 3368 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3369 /* either a RESEND, ACKED, or MARKED */ 3370 /* skip */ 3371 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3372 /* Continue strikin FWD-TSN chunks */ 3373 tp1->rec.data.fwd_tsn_cnt++; 3374 } 3375 continue; 3376 } 3377 /* 3378 * CMT : SFR algo (covers part of DAC and HTNA as well) 3379 */ 3380 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3381 /* 3382 * No new acks were received for data sent to this 3383 * dest. Therefore, according to the SFR algo for 3384 * CMT, no data sent to this dest can be marked for 3385 * FR using this SACK. 3386 */ 3387 continue; 3388 } else if (tp1->whoTo && 3389 SCTP_TSN_GT(tp1->rec.data.tsn, 3390 tp1->whoTo->this_sack_highest_newack) && 3391 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3392 /* 3393 * CMT: New acks were received for data sent to this 3394 * dest. But no new acks were seen for data sent 3395 * after tp1. Therefore, according to the SFR algo 3396 * for CMT, tp1 cannot be marked for FR using this 3397 * SACK. This step covers part of the DAC algo and 3398 * the HTNA algo as well. 3399 */ 3400 continue; 3401 } 3402 /* 3403 * Here we check to see if we were have already done a FR 3404 * and if so we see if the biggest TSN we saw in the sack is 3405 * smaller than the recovery point. If so we don't strike 3406 * the tsn... otherwise we CAN strike the TSN. 3407 */ 3408 /* 3409 * @@@ JRI: Check for CMT if (accum_moved && 3410 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3411 * 0)) { 3412 */ 3413 if (accum_moved && asoc->fast_retran_loss_recovery) { 3414 /* 3415 * Strike the TSN if in fast-recovery and cum-ack 3416 * moved. 3417 */ 3418 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3419 sctp_log_fr(biggest_tsn_newly_acked, 3420 tp1->rec.data.tsn, 3421 tp1->sent, 3422 SCTP_FR_LOG_STRIKE_CHUNK); 3423 } 3424 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3425 tp1->sent++; 3426 } 3427 if ((asoc->sctp_cmt_on_off > 0) && 3428 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3429 /* 3430 * CMT DAC algorithm: If SACK flag is set to 3431 * 0, then lowest_newack test will not pass 3432 * because it would have been set to the 3433 * cumack earlier. If not already to be 3434 * rtx'd, If not a mixed sack and if tp1 is 3435 * not between two sacked TSNs, then mark by 3436 * one more. NOTE that we are marking by one 3437 * additional time since the SACK DAC flag 3438 * indicates that two packets have been 3439 * received after this missing TSN. 3440 */ 3441 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3442 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3444 sctp_log_fr(16 + num_dests_sacked, 3445 tp1->rec.data.tsn, 3446 tp1->sent, 3447 SCTP_FR_LOG_STRIKE_CHUNK); 3448 } 3449 tp1->sent++; 3450 } 3451 } 3452 } else if ((tp1->rec.data.doing_fast_retransmit) && 3453 (asoc->sctp_cmt_on_off == 0)) { 3454 /* 3455 * For those that have done a FR we must take 3456 * special consideration if we strike. I.e the 3457 * biggest_newly_acked must be higher than the 3458 * sending_seq at the time we did the FR. 3459 */ 3460 if ( 3461 #ifdef SCTP_FR_TO_ALTERNATE 3462 /* 3463 * If FR's go to new networks, then we must only do 3464 * this for singly homed asoc's. However if the FR's 3465 * go to the same network (Armando's work) then its 3466 * ok to FR multiple times. 3467 */ 3468 (asoc->numnets < 2) 3469 #else 3470 (1) 3471 #endif 3472 ) { 3473 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3474 tp1->rec.data.fast_retran_tsn)) { 3475 /* 3476 * Strike the TSN, since this ack is 3477 * beyond where things were when we 3478 * did a FR. 3479 */ 3480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3481 sctp_log_fr(biggest_tsn_newly_acked, 3482 tp1->rec.data.tsn, 3483 tp1->sent, 3484 SCTP_FR_LOG_STRIKE_CHUNK); 3485 } 3486 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3487 tp1->sent++; 3488 } 3489 strike_flag = 1; 3490 if ((asoc->sctp_cmt_on_off > 0) && 3491 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3492 /* 3493 * CMT DAC algorithm: If 3494 * SACK flag is set to 0, 3495 * then lowest_newack test 3496 * will not pass because it 3497 * would have been set to 3498 * the cumack earlier. If 3499 * not already to be rtx'd, 3500 * If not a mixed sack and 3501 * if tp1 is not between two 3502 * sacked TSNs, then mark by 3503 * one more. NOTE that we 3504 * are marking by one 3505 * additional time since the 3506 * SACK DAC flag indicates 3507 * that two packets have 3508 * been received after this 3509 * missing TSN. 3510 */ 3511 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3512 (num_dests_sacked == 1) && 3513 SCTP_TSN_GT(this_sack_lowest_newack, 3514 tp1->rec.data.tsn)) { 3515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3516 sctp_log_fr(32 + num_dests_sacked, 3517 tp1->rec.data.tsn, 3518 tp1->sent, 3519 SCTP_FR_LOG_STRIKE_CHUNK); 3520 } 3521 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3522 tp1->sent++; 3523 } 3524 } 3525 } 3526 } 3527 } 3528 /* 3529 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3530 * algo covers HTNA. 3531 */ 3532 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3533 biggest_tsn_newly_acked)) { 3534 /* 3535 * We don't strike these: This is the HTNA 3536 * algorithm i.e. we don't strike If our TSN is 3537 * larger than the Highest TSN Newly Acked. 3538 */ 3539 ; 3540 } else { 3541 /* Strike the TSN */ 3542 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3543 sctp_log_fr(biggest_tsn_newly_acked, 3544 tp1->rec.data.tsn, 3545 tp1->sent, 3546 SCTP_FR_LOG_STRIKE_CHUNK); 3547 } 3548 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3549 tp1->sent++; 3550 } 3551 if ((asoc->sctp_cmt_on_off > 0) && 3552 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3553 /* 3554 * CMT DAC algorithm: If SACK flag is set to 3555 * 0, then lowest_newack test will not pass 3556 * because it would have been set to the 3557 * cumack earlier. If not already to be 3558 * rtx'd, If not a mixed sack and if tp1 is 3559 * not between two sacked TSNs, then mark by 3560 * one more. NOTE that we are marking by one 3561 * additional time since the SACK DAC flag 3562 * indicates that two packets have been 3563 * received after this missing TSN. 3564 */ 3565 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3566 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3568 sctp_log_fr(48 + num_dests_sacked, 3569 tp1->rec.data.tsn, 3570 tp1->sent, 3571 SCTP_FR_LOG_STRIKE_CHUNK); 3572 } 3573 tp1->sent++; 3574 } 3575 } 3576 } 3577 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3578 struct sctp_nets *alt; 3579 3580 /* fix counts and things */ 3581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3582 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3583 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3584 tp1->book_size, 3585 (uint32_t)(uintptr_t)tp1->whoTo, 3586 tp1->rec.data.tsn); 3587 } 3588 if (tp1->whoTo) { 3589 tp1->whoTo->net_ack++; 3590 sctp_flight_size_decrease(tp1); 3591 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3592 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3593 tp1); 3594 } 3595 } 3596 3597 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3598 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3599 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3600 } 3601 /* add back to the rwnd */ 3602 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3603 3604 /* remove from the total flight */ 3605 sctp_total_flight_decrease(stcb, tp1); 3606 3607 if ((stcb->asoc.prsctp_supported) && 3608 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3609 /* 3610 * Has it been retransmitted tv_sec times? - 3611 * we store the retran count there. 3612 */ 3613 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3614 /* Yes, so drop it */ 3615 if (tp1->data != NULL) { 3616 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3617 SCTP_SO_NOT_LOCKED); 3618 } 3619 /* Make sure to flag we had a FR */ 3620 if (tp1->whoTo != NULL) { 3621 tp1->whoTo->net_ack++; 3622 } 3623 continue; 3624 } 3625 } 3626 /* 3627 * SCTP_PRINTF("OK, we are now ready to FR this 3628 * guy\n"); 3629 */ 3630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3631 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3632 0, SCTP_FR_MARKED); 3633 } 3634 if (strike_flag) { 3635 /* This is a subsequent FR */ 3636 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3637 } 3638 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3639 if (asoc->sctp_cmt_on_off > 0) { 3640 /* 3641 * CMT: Using RTX_SSTHRESH policy for CMT. 3642 * If CMT is being used, then pick dest with 3643 * largest ssthresh for any retransmission. 3644 */ 3645 tp1->no_fr_allowed = 1; 3646 alt = tp1->whoTo; 3647 /* sa_ignore NO_NULL_CHK */ 3648 if (asoc->sctp_cmt_pf > 0) { 3649 /* 3650 * JRS 5/18/07 - If CMT PF is on, 3651 * use the PF version of 3652 * find_alt_net() 3653 */ 3654 alt = sctp_find_alternate_net(stcb, alt, 2); 3655 } else { 3656 /* 3657 * JRS 5/18/07 - If only CMT is on, 3658 * use the CMT version of 3659 * find_alt_net() 3660 */ 3661 /* sa_ignore NO_NULL_CHK */ 3662 alt = sctp_find_alternate_net(stcb, alt, 1); 3663 } 3664 if (alt == NULL) { 3665 alt = tp1->whoTo; 3666 } 3667 /* 3668 * CUCv2: If a different dest is picked for 3669 * the retransmission, then new 3670 * (rtx-)pseudo_cumack needs to be tracked 3671 * for orig dest. Let CUCv2 track new (rtx-) 3672 * pseudo-cumack always. 3673 */ 3674 if (tp1->whoTo) { 3675 tp1->whoTo->find_pseudo_cumack = 1; 3676 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3677 } 3678 } else { /* CMT is OFF */ 3679 #ifdef SCTP_FR_TO_ALTERNATE 3680 /* Can we find an alternate? */ 3681 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3682 #else 3683 /* 3684 * default behavior is to NOT retransmit 3685 * FR's to an alternate. Armando Caro's 3686 * paper details why. 3687 */ 3688 alt = tp1->whoTo; 3689 #endif 3690 } 3691 3692 tp1->rec.data.doing_fast_retransmit = 1; 3693 /* mark the sending seq for possible subsequent FR's */ 3694 /* 3695 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3696 * (uint32_t)tpi->rec.data.tsn); 3697 */ 3698 if (TAILQ_EMPTY(&asoc->send_queue)) { 3699 /* 3700 * If the queue of send is empty then its 3701 * the next sequence number that will be 3702 * assigned so we subtract one from this to 3703 * get the one we last sent. 3704 */ 3705 tp1->rec.data.fast_retran_tsn = sending_seq; 3706 } else { 3707 /* 3708 * If there are chunks on the send queue 3709 * (unsent data that has made it from the 3710 * stream queues but not out the door, we 3711 * take the first one (which will have the 3712 * lowest TSN) and subtract one to get the 3713 * one we last sent. 3714 */ 3715 struct sctp_tmit_chunk *ttt; 3716 3717 ttt = TAILQ_FIRST(&asoc->send_queue); 3718 tp1->rec.data.fast_retran_tsn = 3719 ttt->rec.data.tsn; 3720 } 3721 3722 if (tp1->do_rtt) { 3723 /* 3724 * this guy had a RTO calculation pending on 3725 * it, cancel it 3726 */ 3727 if ((tp1->whoTo != NULL) && 3728 (tp1->whoTo->rto_needed == 0)) { 3729 tp1->whoTo->rto_needed = 1; 3730 } 3731 tp1->do_rtt = 0; 3732 } 3733 if (alt != tp1->whoTo) { 3734 /* yes, there is an alternate. */ 3735 sctp_free_remote_addr(tp1->whoTo); 3736 /* sa_ignore FREED_MEMORY */ 3737 tp1->whoTo = alt; 3738 atomic_add_int(&alt->ref_count, 1); 3739 } 3740 } 3741 } 3742 } 3743 3744 struct sctp_tmit_chunk * 3745 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3746 struct sctp_association *asoc) 3747 { 3748 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3749 struct timeval now; 3750 int now_filled = 0; 3751 3752 if (asoc->prsctp_supported == 0) { 3753 return (NULL); 3754 } 3755 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3756 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3757 tp1->sent != SCTP_DATAGRAM_RESEND && 3758 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3759 /* no chance to advance, out of here */ 3760 break; 3761 } 3762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3763 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3764 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3765 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3766 asoc->advanced_peer_ack_point, 3767 tp1->rec.data.tsn, 0, 0); 3768 } 3769 } 3770 if (!PR_SCTP_ENABLED(tp1->flags)) { 3771 /* 3772 * We can't fwd-tsn past any that are reliable aka 3773 * retransmitted until the asoc fails. 3774 */ 3775 break; 3776 } 3777 if (!now_filled) { 3778 (void)SCTP_GETTIME_TIMEVAL(&now); 3779 now_filled = 1; 3780 } 3781 /* 3782 * now we got a chunk which is marked for another 3783 * retransmission to a PR-stream but has run out its chances 3784 * already maybe OR has been marked to skip now. Can we skip 3785 * it if its a resend? 3786 */ 3787 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3788 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3789 /* 3790 * Now is this one marked for resend and its time is 3791 * now up? 3792 */ 3793 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3794 /* Yes so drop it */ 3795 if (tp1->data) { 3796 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3797 1, SCTP_SO_NOT_LOCKED); 3798 } 3799 } else { 3800 /* 3801 * No, we are done when hit one for resend 3802 * whos time as not expired. 3803 */ 3804 break; 3805 } 3806 } 3807 /* 3808 * Ok now if this chunk is marked to drop it we can clean up 3809 * the chunk, advance our peer ack point and we can check 3810 * the next chunk. 3811 */ 3812 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3813 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3814 /* advance PeerAckPoint goes forward */ 3815 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3816 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3817 a_adv = tp1; 3818 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3819 /* No update but we do save the chk */ 3820 a_adv = tp1; 3821 } 3822 } else { 3823 /* 3824 * If it is still in RESEND we can advance no 3825 * further 3826 */ 3827 break; 3828 } 3829 } 3830 return (a_adv); 3831 } 3832 3833 static int 3834 sctp_fs_audit(struct sctp_association *asoc) 3835 { 3836 struct sctp_tmit_chunk *chk; 3837 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3838 int ret; 3839 #ifndef INVARIANTS 3840 int entry_flight, entry_cnt; 3841 #endif 3842 3843 ret = 0; 3844 #ifndef INVARIANTS 3845 entry_flight = asoc->total_flight; 3846 entry_cnt = asoc->total_flight_count; 3847 #endif 3848 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3849 return (0); 3850 3851 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3852 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3853 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3854 chk->rec.data.tsn, 3855 chk->send_size, 3856 chk->snd_count); 3857 inflight++; 3858 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3859 resend++; 3860 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3861 inbetween++; 3862 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3863 above++; 3864 } else { 3865 acked++; 3866 } 3867 } 3868 3869 if ((inflight > 0) || (inbetween > 0)) { 3870 #ifdef INVARIANTS 3871 panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d", 3872 inflight, inbetween, resend, above, acked); 3873 #else 3874 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3875 entry_flight, entry_cnt); 3876 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3877 inflight, inbetween, resend, above, acked); 3878 ret = 1; 3879 #endif 3880 } 3881 return (ret); 3882 } 3883 3884 static void 3885 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3886 struct sctp_association *asoc, 3887 struct sctp_tmit_chunk *tp1) 3888 { 3889 tp1->window_probe = 0; 3890 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3891 /* TSN's skipped we do NOT move back. */ 3892 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3893 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3894 tp1->book_size, 3895 (uint32_t)(uintptr_t)tp1->whoTo, 3896 tp1->rec.data.tsn); 3897 return; 3898 } 3899 /* First setup this by shrinking flight */ 3900 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3901 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3902 tp1); 3903 } 3904 sctp_flight_size_decrease(tp1); 3905 sctp_total_flight_decrease(stcb, tp1); 3906 /* Now mark for resend */ 3907 tp1->sent = SCTP_DATAGRAM_RESEND; 3908 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3909 3910 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3911 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3912 tp1->whoTo->flight_size, 3913 tp1->book_size, 3914 (uint32_t)(uintptr_t)tp1->whoTo, 3915 tp1->rec.data.tsn); 3916 } 3917 } 3918 3919 void 3920 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3921 uint32_t rwnd, int *abort_now, int ecne_seen) 3922 { 3923 struct sctp_nets *net; 3924 struct sctp_association *asoc; 3925 struct sctp_tmit_chunk *tp1, *tp2; 3926 uint32_t old_rwnd; 3927 int win_probe_recovery = 0; 3928 int win_probe_recovered = 0; 3929 int j, done_once = 0; 3930 int rto_ok = 1; 3931 uint32_t send_s; 3932 3933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3934 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3935 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3936 } 3937 SCTP_TCB_LOCK_ASSERT(stcb); 3938 #ifdef SCTP_ASOCLOG_OF_TSNS 3939 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3940 stcb->asoc.cumack_log_at++; 3941 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3942 stcb->asoc.cumack_log_at = 0; 3943 } 3944 #endif 3945 asoc = &stcb->asoc; 3946 old_rwnd = asoc->peers_rwnd; 3947 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3948 /* old ack */ 3949 return; 3950 } else if (asoc->last_acked_seq == cumack) { 3951 /* Window update sack */ 3952 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3953 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3954 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3955 /* SWS sender side engages */ 3956 asoc->peers_rwnd = 0; 3957 } 3958 if (asoc->peers_rwnd > old_rwnd) { 3959 goto again; 3960 } 3961 return; 3962 } 3963 3964 /* First setup for CC stuff */ 3965 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3966 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3967 /* Drag along the window_tsn for cwr's */ 3968 net->cwr_window_tsn = cumack; 3969 } 3970 net->prev_cwnd = net->cwnd; 3971 net->net_ack = 0; 3972 net->net_ack2 = 0; 3973 3974 /* 3975 * CMT: Reset CUC and Fast recovery algo variables before 3976 * SACK processing 3977 */ 3978 net->new_pseudo_cumack = 0; 3979 net->will_exit_fast_recovery = 0; 3980 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3981 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3982 } 3983 } 3984 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3985 tp1 = TAILQ_LAST(&asoc->sent_queue, 3986 sctpchunk_listhead); 3987 send_s = tp1->rec.data.tsn + 1; 3988 } else { 3989 send_s = asoc->sending_seq; 3990 } 3991 if (SCTP_TSN_GE(cumack, send_s)) { 3992 struct mbuf *op_err; 3993 char msg[SCTP_DIAG_INFO_LEN]; 3994 3995 *abort_now = 1; 3996 /* XXX */ 3997 SCTP_SNPRINTF(msg, sizeof(msg), 3998 "Cum ack %8.8x greater or equal than TSN %8.8x", 3999 cumack, send_s); 4000 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4001 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4002 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4003 return; 4004 } 4005 asoc->this_sack_highest_gap = cumack; 4006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4007 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4008 stcb->asoc.overall_error_count, 4009 0, 4010 SCTP_FROM_SCTP_INDATA, 4011 __LINE__); 4012 } 4013 stcb->asoc.overall_error_count = 0; 4014 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4015 /* process the new consecutive TSN first */ 4016 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4017 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4018 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4019 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4020 } 4021 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4022 /* 4023 * If it is less than ACKED, it is 4024 * now no-longer in flight. Higher 4025 * values may occur during marking 4026 */ 4027 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4028 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4029 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4030 tp1->whoTo->flight_size, 4031 tp1->book_size, 4032 (uint32_t)(uintptr_t)tp1->whoTo, 4033 tp1->rec.data.tsn); 4034 } 4035 sctp_flight_size_decrease(tp1); 4036 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4037 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4038 tp1); 4039 } 4040 /* sa_ignore NO_NULL_CHK */ 4041 sctp_total_flight_decrease(stcb, tp1); 4042 } 4043 tp1->whoTo->net_ack += tp1->send_size; 4044 if (tp1->snd_count < 2) { 4045 /* 4046 * True non-retransmitted 4047 * chunk 4048 */ 4049 tp1->whoTo->net_ack2 += 4050 tp1->send_size; 4051 4052 /* update RTO too? */ 4053 if (tp1->do_rtt) { 4054 if (rto_ok && 4055 sctp_calculate_rto(stcb, 4056 &stcb->asoc, 4057 tp1->whoTo, 4058 &tp1->sent_rcv_time, 4059 SCTP_RTT_FROM_DATA)) { 4060 rto_ok = 0; 4061 } 4062 if (tp1->whoTo->rto_needed == 0) { 4063 tp1->whoTo->rto_needed = 1; 4064 } 4065 tp1->do_rtt = 0; 4066 } 4067 } 4068 /* 4069 * CMT: CUCv2 algorithm. From the 4070 * cumack'd TSNs, for each TSN being 4071 * acked for the first time, set the 4072 * following variables for the 4073 * corresp destination. 4074 * new_pseudo_cumack will trigger a 4075 * cwnd update. 4076 * find_(rtx_)pseudo_cumack will 4077 * trigger search for the next 4078 * expected (rtx-)pseudo-cumack. 4079 */ 4080 tp1->whoTo->new_pseudo_cumack = 1; 4081 tp1->whoTo->find_pseudo_cumack = 1; 4082 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4084 /* sa_ignore NO_NULL_CHK */ 4085 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4086 } 4087 } 4088 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4089 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4090 } 4091 if (tp1->rec.data.chunk_was_revoked) { 4092 /* deflate the cwnd */ 4093 tp1->whoTo->cwnd -= tp1->book_size; 4094 tp1->rec.data.chunk_was_revoked = 0; 4095 } 4096 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4097 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4098 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4099 #ifdef INVARIANTS 4100 } else { 4101 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4102 #endif 4103 } 4104 } 4105 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4106 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4107 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4108 asoc->trigger_reset = 1; 4109 } 4110 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4111 if (tp1->data) { 4112 /* sa_ignore NO_NULL_CHK */ 4113 sctp_free_bufspace(stcb, asoc, tp1, 1); 4114 sctp_m_freem(tp1->data); 4115 tp1->data = NULL; 4116 } 4117 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4118 sctp_log_sack(asoc->last_acked_seq, 4119 cumack, 4120 tp1->rec.data.tsn, 4121 0, 4122 0, 4123 SCTP_LOG_FREE_SENT); 4124 } 4125 asoc->sent_queue_cnt--; 4126 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4127 } else { 4128 break; 4129 } 4130 } 4131 } 4132 /* sa_ignore NO_NULL_CHK */ 4133 if (stcb->sctp_socket) { 4134 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4136 /* sa_ignore NO_NULL_CHK */ 4137 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4138 } 4139 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4140 } else { 4141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4142 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4143 } 4144 } 4145 4146 /* JRS - Use the congestion control given in the CC module */ 4147 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4148 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4149 if (net->net_ack2 > 0) { 4150 /* 4151 * Karn's rule applies to clearing error 4152 * count, this is optional. 4153 */ 4154 net->error_count = 0; 4155 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 4156 /* addr came good */ 4157 net->dest_state |= SCTP_ADDR_REACHABLE; 4158 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4159 0, (void *)net, SCTP_SO_NOT_LOCKED); 4160 } 4161 if (net == stcb->asoc.primary_destination) { 4162 if (stcb->asoc.alternate) { 4163 /* 4164 * release the alternate, 4165 * primary is good 4166 */ 4167 sctp_free_remote_addr(stcb->asoc.alternate); 4168 stcb->asoc.alternate = NULL; 4169 } 4170 } 4171 if (net->dest_state & SCTP_ADDR_PF) { 4172 net->dest_state &= ~SCTP_ADDR_PF; 4173 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4174 stcb->sctp_ep, stcb, net, 4175 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4176 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4177 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4178 /* Done with this net */ 4179 net->net_ack = 0; 4180 } 4181 /* restore any doubled timers */ 4182 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4183 if (net->RTO < stcb->asoc.minrto) { 4184 net->RTO = stcb->asoc.minrto; 4185 } 4186 if (net->RTO > stcb->asoc.maxrto) { 4187 net->RTO = stcb->asoc.maxrto; 4188 } 4189 } 4190 } 4191 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4192 } 4193 asoc->last_acked_seq = cumack; 4194 4195 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4196 /* nothing left in-flight */ 4197 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4198 net->flight_size = 0; 4199 net->partial_bytes_acked = 0; 4200 } 4201 asoc->total_flight = 0; 4202 asoc->total_flight_count = 0; 4203 } 4204 4205 /* RWND update */ 4206 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4207 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4208 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4209 /* SWS sender side engages */ 4210 asoc->peers_rwnd = 0; 4211 } 4212 if (asoc->peers_rwnd > old_rwnd) { 4213 win_probe_recovery = 1; 4214 } 4215 /* Now assure a timer where data is queued at */ 4216 again: 4217 j = 0; 4218 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4219 if (win_probe_recovery && (net->window_probe)) { 4220 win_probe_recovered = 1; 4221 /* 4222 * Find first chunk that was used with window probe 4223 * and clear the sent 4224 */ 4225 /* sa_ignore FREED_MEMORY */ 4226 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4227 if (tp1->window_probe) { 4228 /* move back to data send queue */ 4229 sctp_window_probe_recovery(stcb, asoc, tp1); 4230 break; 4231 } 4232 } 4233 } 4234 if (net->flight_size) { 4235 j++; 4236 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4237 if (net->window_probe) { 4238 net->window_probe = 0; 4239 } 4240 } else { 4241 if (net->window_probe) { 4242 /* 4243 * In window probes we must assure a timer 4244 * is still running there 4245 */ 4246 net->window_probe = 0; 4247 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4248 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4249 } 4250 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4251 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4252 stcb, net, 4253 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4254 } 4255 } 4256 } 4257 if ((j == 0) && 4258 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4259 (asoc->sent_queue_retran_cnt == 0) && 4260 (win_probe_recovered == 0) && 4261 (done_once == 0)) { 4262 /* 4263 * huh, this should not happen unless all packets are 4264 * PR-SCTP and marked to skip of course. 4265 */ 4266 if (sctp_fs_audit(asoc)) { 4267 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4268 net->flight_size = 0; 4269 } 4270 asoc->total_flight = 0; 4271 asoc->total_flight_count = 0; 4272 asoc->sent_queue_retran_cnt = 0; 4273 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4274 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4275 sctp_flight_size_increase(tp1); 4276 sctp_total_flight_increase(stcb, tp1); 4277 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4278 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4279 } 4280 } 4281 } 4282 done_once = 1; 4283 goto again; 4284 } 4285 /**********************************/ 4286 /* Now what about shutdown issues */ 4287 /**********************************/ 4288 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4289 /* nothing left on sendqueue.. consider done */ 4290 /* clean up */ 4291 if ((asoc->stream_queue_cnt == 1) && 4292 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4293 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4294 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4295 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4296 } 4297 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4298 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4299 (asoc->stream_queue_cnt == 1) && 4300 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4301 struct mbuf *op_err; 4302 4303 *abort_now = 1; 4304 /* XXX */ 4305 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4306 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28; 4307 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4308 return; 4309 } 4310 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4311 (asoc->stream_queue_cnt == 0)) { 4312 struct sctp_nets *netp; 4313 4314 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4315 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4316 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4317 } 4318 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4319 sctp_stop_timers_for_shutdown(stcb); 4320 if (asoc->alternate) { 4321 netp = asoc->alternate; 4322 } else { 4323 netp = asoc->primary_destination; 4324 } 4325 sctp_send_shutdown(stcb, netp); 4326 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4327 stcb->sctp_ep, stcb, netp); 4328 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4329 stcb->sctp_ep, stcb, NULL); 4330 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4331 (asoc->stream_queue_cnt == 0)) { 4332 struct sctp_nets *netp; 4333 4334 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4335 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 4336 sctp_stop_timers_for_shutdown(stcb); 4337 if (asoc->alternate) { 4338 netp = asoc->alternate; 4339 } else { 4340 netp = asoc->primary_destination; 4341 } 4342 sctp_send_shutdown_ack(stcb, netp); 4343 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4344 stcb->sctp_ep, stcb, netp); 4345 } 4346 } 4347 /*********************************************/ 4348 /* Here we perform PR-SCTP procedures */ 4349 /* (section 4.2) */ 4350 /*********************************************/ 4351 /* C1. update advancedPeerAckPoint */ 4352 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4353 asoc->advanced_peer_ack_point = cumack; 4354 } 4355 /* PR-Sctp issues need to be addressed too */ 4356 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4357 struct sctp_tmit_chunk *lchk; 4358 uint32_t old_adv_peer_ack_point; 4359 4360 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4361 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4362 /* C3. See if we need to send a Fwd-TSN */ 4363 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4364 /* 4365 * ISSUE with ECN, see FWD-TSN processing. 4366 */ 4367 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4368 send_forward_tsn(stcb, asoc); 4369 } else if (lchk) { 4370 /* try to FR fwd-tsn's that get lost too */ 4371 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4372 send_forward_tsn(stcb, asoc); 4373 } 4374 } 4375 } 4376 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 4377 if (lchk->whoTo != NULL) { 4378 break; 4379 } 4380 } 4381 if (lchk != NULL) { 4382 /* Assure a timer is up */ 4383 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4384 stcb->sctp_ep, stcb, lchk->whoTo); 4385 } 4386 } 4387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4388 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4389 rwnd, 4390 stcb->asoc.peers_rwnd, 4391 stcb->asoc.total_flight, 4392 stcb->asoc.total_output_queue_size); 4393 } 4394 } 4395 4396 void 4397 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4398 struct sctp_tcb *stcb, 4399 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4400 int *abort_now, uint8_t flags, 4401 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4402 { 4403 struct sctp_association *asoc; 4404 struct sctp_tmit_chunk *tp1, *tp2; 4405 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4406 uint16_t wake_him = 0; 4407 uint32_t send_s = 0; 4408 long j; 4409 int accum_moved = 0; 4410 int will_exit_fast_recovery = 0; 4411 uint32_t a_rwnd, old_rwnd; 4412 int win_probe_recovery = 0; 4413 int win_probe_recovered = 0; 4414 struct sctp_nets *net = NULL; 4415 int done_once; 4416 int rto_ok = 1; 4417 uint8_t reneged_all = 0; 4418 uint8_t cmt_dac_flag; 4419 4420 /* 4421 * we take any chance we can to service our queues since we cannot 4422 * get awoken when the socket is read from :< 4423 */ 4424 /* 4425 * Now perform the actual SACK handling: 1) Verify that it is not an 4426 * old sack, if so discard. 2) If there is nothing left in the send 4427 * queue (cum-ack is equal to last acked) then you have a duplicate 4428 * too, update any rwnd change and verify no timers are running. 4429 * then return. 3) Process any new consecutive data i.e. cum-ack 4430 * moved process these first and note that it moved. 4) Process any 4431 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4432 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4433 * sync up flightsizes and things, stop all timers and also check 4434 * for shutdown_pending state. If so then go ahead and send off the 4435 * shutdown. If in shutdown recv, send off the shutdown-ack and 4436 * start that timer, Ret. 9) Strike any non-acked things and do FR 4437 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4438 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4439 * if in shutdown_recv state. 4440 */ 4441 SCTP_TCB_LOCK_ASSERT(stcb); 4442 /* CMT DAC algo */ 4443 this_sack_lowest_newack = 0; 4444 SCTP_STAT_INCR(sctps_slowpath_sack); 4445 last_tsn = cum_ack; 4446 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4447 #ifdef SCTP_ASOCLOG_OF_TSNS 4448 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4449 stcb->asoc.cumack_log_at++; 4450 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4451 stcb->asoc.cumack_log_at = 0; 4452 } 4453 #endif 4454 a_rwnd = rwnd; 4455 4456 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4457 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4458 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4459 } 4460 4461 old_rwnd = stcb->asoc.peers_rwnd; 4462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4463 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4464 stcb->asoc.overall_error_count, 4465 0, 4466 SCTP_FROM_SCTP_INDATA, 4467 __LINE__); 4468 } 4469 stcb->asoc.overall_error_count = 0; 4470 asoc = &stcb->asoc; 4471 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4472 sctp_log_sack(asoc->last_acked_seq, 4473 cum_ack, 4474 0, 4475 num_seg, 4476 num_dup, 4477 SCTP_LOG_NEW_SACK); 4478 } 4479 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4480 uint16_t i; 4481 uint32_t *dupdata, dblock; 4482 4483 for (i = 0; i < num_dup; i++) { 4484 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4485 sizeof(uint32_t), (uint8_t *)&dblock); 4486 if (dupdata == NULL) { 4487 break; 4488 } 4489 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4490 } 4491 } 4492 /* reality check */ 4493 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4494 tp1 = TAILQ_LAST(&asoc->sent_queue, 4495 sctpchunk_listhead); 4496 send_s = tp1->rec.data.tsn + 1; 4497 } else { 4498 tp1 = NULL; 4499 send_s = asoc->sending_seq; 4500 } 4501 if (SCTP_TSN_GE(cum_ack, send_s)) { 4502 struct mbuf *op_err; 4503 char msg[SCTP_DIAG_INFO_LEN]; 4504 4505 /* 4506 * no way, we have not even sent this TSN out yet. Peer is 4507 * hopelessly messed up with us. 4508 */ 4509 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4510 cum_ack, send_s); 4511 if (tp1) { 4512 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4513 tp1->rec.data.tsn, (void *)tp1); 4514 } 4515 hopeless_peer: 4516 *abort_now = 1; 4517 /* XXX */ 4518 SCTP_SNPRINTF(msg, sizeof(msg), 4519 "Cum ack %8.8x greater or equal than TSN %8.8x", 4520 cum_ack, send_s); 4521 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4522 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29; 4523 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4524 return; 4525 } 4526 /**********************/ 4527 /* 1) check the range */ 4528 /**********************/ 4529 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4530 /* acking something behind */ 4531 return; 4532 } 4533 4534 /* update the Rwnd of the peer */ 4535 if (TAILQ_EMPTY(&asoc->sent_queue) && 4536 TAILQ_EMPTY(&asoc->send_queue) && 4537 (asoc->stream_queue_cnt == 0)) { 4538 /* nothing left on send/sent and strmq */ 4539 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4540 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4541 asoc->peers_rwnd, 0, 0, a_rwnd); 4542 } 4543 asoc->peers_rwnd = a_rwnd; 4544 if (asoc->sent_queue_retran_cnt) { 4545 asoc->sent_queue_retran_cnt = 0; 4546 } 4547 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4548 /* SWS sender side engages */ 4549 asoc->peers_rwnd = 0; 4550 } 4551 /* stop any timers */ 4552 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4553 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4554 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4555 net->partial_bytes_acked = 0; 4556 net->flight_size = 0; 4557 } 4558 asoc->total_flight = 0; 4559 asoc->total_flight_count = 0; 4560 return; 4561 } 4562 /* 4563 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4564 * things. The total byte count acked is tracked in netAckSz AND 4565 * netAck2 is used to track the total bytes acked that are un- 4566 * ambiguous and were never retransmitted. We track these on a per 4567 * destination address basis. 4568 */ 4569 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4570 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4571 /* Drag along the window_tsn for cwr's */ 4572 net->cwr_window_tsn = cum_ack; 4573 } 4574 net->prev_cwnd = net->cwnd; 4575 net->net_ack = 0; 4576 net->net_ack2 = 0; 4577 4578 /* 4579 * CMT: Reset CUC and Fast recovery algo variables before 4580 * SACK processing 4581 */ 4582 net->new_pseudo_cumack = 0; 4583 net->will_exit_fast_recovery = 0; 4584 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4585 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4586 } 4587 4588 /* 4589 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4590 * to be greater than the cumack. Also reset saw_newack to 0 4591 * for all dests. 4592 */ 4593 net->saw_newack = 0; 4594 net->this_sack_highest_newack = last_tsn; 4595 } 4596 /* process the new consecutive TSN first */ 4597 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4598 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4599 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4600 accum_moved = 1; 4601 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4602 /* 4603 * If it is less than ACKED, it is 4604 * now no-longer in flight. Higher 4605 * values may occur during marking 4606 */ 4607 if ((tp1->whoTo->dest_state & 4608 SCTP_ADDR_UNCONFIRMED) && 4609 (tp1->snd_count < 2)) { 4610 /* 4611 * If there was no retran 4612 * and the address is 4613 * un-confirmed and we sent 4614 * there and are now 4615 * sacked.. its confirmed, 4616 * mark it so. 4617 */ 4618 tp1->whoTo->dest_state &= 4619 ~SCTP_ADDR_UNCONFIRMED; 4620 } 4621 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4622 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4623 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4624 tp1->whoTo->flight_size, 4625 tp1->book_size, 4626 (uint32_t)(uintptr_t)tp1->whoTo, 4627 tp1->rec.data.tsn); 4628 } 4629 sctp_flight_size_decrease(tp1); 4630 sctp_total_flight_decrease(stcb, tp1); 4631 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4632 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4633 tp1); 4634 } 4635 } 4636 tp1->whoTo->net_ack += tp1->send_size; 4637 4638 /* CMT SFR and DAC algos */ 4639 this_sack_lowest_newack = tp1->rec.data.tsn; 4640 tp1->whoTo->saw_newack = 1; 4641 4642 if (tp1->snd_count < 2) { 4643 /* 4644 * True non-retransmitted 4645 * chunk 4646 */ 4647 tp1->whoTo->net_ack2 += 4648 tp1->send_size; 4649 4650 /* update RTO too? */ 4651 if (tp1->do_rtt) { 4652 if (rto_ok && 4653 sctp_calculate_rto(stcb, 4654 &stcb->asoc, 4655 tp1->whoTo, 4656 &tp1->sent_rcv_time, 4657 SCTP_RTT_FROM_DATA)) { 4658 rto_ok = 0; 4659 } 4660 if (tp1->whoTo->rto_needed == 0) { 4661 tp1->whoTo->rto_needed = 1; 4662 } 4663 tp1->do_rtt = 0; 4664 } 4665 } 4666 /* 4667 * CMT: CUCv2 algorithm. From the 4668 * cumack'd TSNs, for each TSN being 4669 * acked for the first time, set the 4670 * following variables for the 4671 * corresp destination. 4672 * new_pseudo_cumack will trigger a 4673 * cwnd update. 4674 * find_(rtx_)pseudo_cumack will 4675 * trigger search for the next 4676 * expected (rtx-)pseudo-cumack. 4677 */ 4678 tp1->whoTo->new_pseudo_cumack = 1; 4679 tp1->whoTo->find_pseudo_cumack = 1; 4680 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4681 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4682 sctp_log_sack(asoc->last_acked_seq, 4683 cum_ack, 4684 tp1->rec.data.tsn, 4685 0, 4686 0, 4687 SCTP_LOG_TSN_ACKED); 4688 } 4689 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4690 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4691 } 4692 } 4693 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4694 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4695 #ifdef SCTP_AUDITING_ENABLED 4696 sctp_audit_log(0xB3, 4697 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4698 #endif 4699 } 4700 if (tp1->rec.data.chunk_was_revoked) { 4701 /* deflate the cwnd */ 4702 tp1->whoTo->cwnd -= tp1->book_size; 4703 tp1->rec.data.chunk_was_revoked = 0; 4704 } 4705 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4706 tp1->sent = SCTP_DATAGRAM_ACKED; 4707 } 4708 } 4709 } else { 4710 break; 4711 } 4712 } 4713 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4714 /* always set this up to cum-ack */ 4715 asoc->this_sack_highest_gap = last_tsn; 4716 4717 if ((num_seg > 0) || (num_nr_seg > 0)) { 4718 /* 4719 * thisSackHighestGap will increase while handling NEW 4720 * segments this_sack_highest_newack will increase while 4721 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4722 * used for CMT DAC algo. saw_newack will also change. 4723 */ 4724 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4725 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4726 num_seg, num_nr_seg, &rto_ok)) { 4727 wake_him++; 4728 } 4729 /* 4730 * validate the biggest_tsn_acked in the gap acks if strict 4731 * adherence is wanted. 4732 */ 4733 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4734 /* 4735 * peer is either confused or we are under attack. 4736 * We must abort. 4737 */ 4738 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4739 biggest_tsn_acked, send_s); 4740 goto hopeless_peer; 4741 } 4742 } 4743 /*******************************************/ 4744 /* cancel ALL T3-send timer if accum moved */ 4745 /*******************************************/ 4746 if (asoc->sctp_cmt_on_off > 0) { 4747 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4748 if (net->new_pseudo_cumack) 4749 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4750 stcb, net, 4751 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4752 } 4753 } else { 4754 if (accum_moved) { 4755 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4756 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4757 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 4758 } 4759 } 4760 } 4761 /********************************************/ 4762 /* drop the acked chunks from the sentqueue */ 4763 /********************************************/ 4764 asoc->last_acked_seq = cum_ack; 4765 4766 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4767 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4768 break; 4769 } 4770 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4771 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4772 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4773 #ifdef INVARIANTS 4774 } else { 4775 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4776 #endif 4777 } 4778 } 4779 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4780 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4781 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4782 asoc->trigger_reset = 1; 4783 } 4784 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4785 if (PR_SCTP_ENABLED(tp1->flags)) { 4786 if (asoc->pr_sctp_cnt != 0) 4787 asoc->pr_sctp_cnt--; 4788 } 4789 asoc->sent_queue_cnt--; 4790 if (tp1->data) { 4791 /* sa_ignore NO_NULL_CHK */ 4792 sctp_free_bufspace(stcb, asoc, tp1, 1); 4793 sctp_m_freem(tp1->data); 4794 tp1->data = NULL; 4795 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4796 asoc->sent_queue_cnt_removeable--; 4797 } 4798 } 4799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4800 sctp_log_sack(asoc->last_acked_seq, 4801 cum_ack, 4802 tp1->rec.data.tsn, 4803 0, 4804 0, 4805 SCTP_LOG_FREE_SENT); 4806 } 4807 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4808 wake_him++; 4809 } 4810 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4811 #ifdef INVARIANTS 4812 panic("Warning flight size is positive and should be 0"); 4813 #else 4814 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4815 asoc->total_flight); 4816 #endif 4817 asoc->total_flight = 0; 4818 } 4819 4820 /* sa_ignore NO_NULL_CHK */ 4821 if ((wake_him) && (stcb->sctp_socket)) { 4822 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4824 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4825 } 4826 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4827 } else { 4828 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4829 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4830 } 4831 } 4832 4833 if (asoc->fast_retran_loss_recovery && accum_moved) { 4834 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4835 /* Setup so we will exit RFC2582 fast recovery */ 4836 will_exit_fast_recovery = 1; 4837 } 4838 } 4839 /* 4840 * Check for revoked fragments: 4841 * 4842 * if Previous sack - Had no frags then we can't have any revoked if 4843 * Previous sack - Had frag's then - If we now have frags aka 4844 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4845 * some of them. else - The peer revoked all ACKED fragments, since 4846 * we had some before and now we have NONE. 4847 */ 4848 4849 if (num_seg) { 4850 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4851 asoc->saw_sack_with_frags = 1; 4852 } else if (asoc->saw_sack_with_frags) { 4853 int cnt_revoked = 0; 4854 4855 /* Peer revoked all dg's marked or acked */ 4856 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4857 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4858 tp1->sent = SCTP_DATAGRAM_SENT; 4859 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4860 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4861 tp1->whoTo->flight_size, 4862 tp1->book_size, 4863 (uint32_t)(uintptr_t)tp1->whoTo, 4864 tp1->rec.data.tsn); 4865 } 4866 sctp_flight_size_increase(tp1); 4867 sctp_total_flight_increase(stcb, tp1); 4868 tp1->rec.data.chunk_was_revoked = 1; 4869 /* 4870 * To ensure that this increase in 4871 * flightsize, which is artificial, does not 4872 * throttle the sender, we also increase the 4873 * cwnd artificially. 4874 */ 4875 tp1->whoTo->cwnd += tp1->book_size; 4876 cnt_revoked++; 4877 } 4878 } 4879 if (cnt_revoked) { 4880 reneged_all = 1; 4881 } 4882 asoc->saw_sack_with_frags = 0; 4883 } 4884 if (num_nr_seg > 0) 4885 asoc->saw_sack_with_nr_frags = 1; 4886 else 4887 asoc->saw_sack_with_nr_frags = 0; 4888 4889 /* JRS - Use the congestion control given in the CC module */ 4890 if (ecne_seen == 0) { 4891 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4892 if (net->net_ack2 > 0) { 4893 /* 4894 * Karn's rule applies to clearing error 4895 * count, this is optional. 4896 */ 4897 net->error_count = 0; 4898 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 4899 /* addr came good */ 4900 net->dest_state |= SCTP_ADDR_REACHABLE; 4901 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4902 0, (void *)net, SCTP_SO_NOT_LOCKED); 4903 } 4904 4905 if (net == stcb->asoc.primary_destination) { 4906 if (stcb->asoc.alternate) { 4907 /* 4908 * release the alternate, 4909 * primary is good 4910 */ 4911 sctp_free_remote_addr(stcb->asoc.alternate); 4912 stcb->asoc.alternate = NULL; 4913 } 4914 } 4915 4916 if (net->dest_state & SCTP_ADDR_PF) { 4917 net->dest_state &= ~SCTP_ADDR_PF; 4918 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4919 stcb->sctp_ep, stcb, net, 4920 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 4921 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4922 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4923 /* Done with this net */ 4924 net->net_ack = 0; 4925 } 4926 /* restore any doubled timers */ 4927 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4928 if (net->RTO < stcb->asoc.minrto) { 4929 net->RTO = stcb->asoc.minrto; 4930 } 4931 if (net->RTO > stcb->asoc.maxrto) { 4932 net->RTO = stcb->asoc.maxrto; 4933 } 4934 } 4935 } 4936 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4937 } 4938 4939 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4940 /* nothing left in-flight */ 4941 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4942 /* stop all timers */ 4943 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4944 stcb, net, 4945 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34); 4946 net->flight_size = 0; 4947 net->partial_bytes_acked = 0; 4948 } 4949 asoc->total_flight = 0; 4950 asoc->total_flight_count = 0; 4951 } 4952 4953 /**********************************/ 4954 /* Now what about shutdown issues */ 4955 /**********************************/ 4956 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4957 /* nothing left on sendqueue.. consider done */ 4958 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4959 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4960 asoc->peers_rwnd, 0, 0, a_rwnd); 4961 } 4962 asoc->peers_rwnd = a_rwnd; 4963 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4964 /* SWS sender side engages */ 4965 asoc->peers_rwnd = 0; 4966 } 4967 /* clean up */ 4968 if ((asoc->stream_queue_cnt == 1) && 4969 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4970 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4971 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4972 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4973 } 4974 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4975 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4976 (asoc->stream_queue_cnt == 1) && 4977 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4978 struct mbuf *op_err; 4979 4980 *abort_now = 1; 4981 /* XXX */ 4982 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4983 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35; 4984 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 4985 return; 4986 } 4987 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4988 (asoc->stream_queue_cnt == 0)) { 4989 struct sctp_nets *netp; 4990 4991 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4992 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4993 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4994 } 4995 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4996 sctp_stop_timers_for_shutdown(stcb); 4997 if (asoc->alternate) { 4998 netp = asoc->alternate; 4999 } else { 5000 netp = asoc->primary_destination; 5001 } 5002 sctp_send_shutdown(stcb, netp); 5003 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5004 stcb->sctp_ep, stcb, netp); 5005 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5006 stcb->sctp_ep, stcb, NULL); 5007 return; 5008 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5009 (asoc->stream_queue_cnt == 0)) { 5010 struct sctp_nets *netp; 5011 5012 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5013 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 5014 sctp_stop_timers_for_shutdown(stcb); 5015 if (asoc->alternate) { 5016 netp = asoc->alternate; 5017 } else { 5018 netp = asoc->primary_destination; 5019 } 5020 sctp_send_shutdown_ack(stcb, netp); 5021 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5022 stcb->sctp_ep, stcb, netp); 5023 return; 5024 } 5025 } 5026 /* 5027 * Now here we are going to recycle net_ack for a different use... 5028 * HEADS UP. 5029 */ 5030 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5031 net->net_ack = 0; 5032 } 5033 5034 /* 5035 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5036 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5037 * automatically ensure that. 5038 */ 5039 if ((asoc->sctp_cmt_on_off > 0) && 5040 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5041 (cmt_dac_flag == 0)) { 5042 this_sack_lowest_newack = cum_ack; 5043 } 5044 if ((num_seg > 0) || (num_nr_seg > 0)) { 5045 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5046 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5047 } 5048 /* JRS - Use the congestion control given in the CC module */ 5049 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5050 5051 /* Now are we exiting loss recovery ? */ 5052 if (will_exit_fast_recovery) { 5053 /* Ok, we must exit fast recovery */ 5054 asoc->fast_retran_loss_recovery = 0; 5055 } 5056 if ((asoc->sat_t3_loss_recovery) && 5057 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5058 /* end satellite t3 loss recovery */ 5059 asoc->sat_t3_loss_recovery = 0; 5060 } 5061 /* 5062 * CMT Fast recovery 5063 */ 5064 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5065 if (net->will_exit_fast_recovery) { 5066 /* Ok, we must exit fast recovery */ 5067 net->fast_retran_loss_recovery = 0; 5068 } 5069 } 5070 5071 /* Adjust and set the new rwnd value */ 5072 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5073 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5074 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5075 } 5076 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5077 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5078 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5079 /* SWS sender side engages */ 5080 asoc->peers_rwnd = 0; 5081 } 5082 if (asoc->peers_rwnd > old_rwnd) { 5083 win_probe_recovery = 1; 5084 } 5085 5086 /* 5087 * Now we must setup so we have a timer up for anyone with 5088 * outstanding data. 5089 */ 5090 done_once = 0; 5091 again: 5092 j = 0; 5093 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5094 if (win_probe_recovery && (net->window_probe)) { 5095 win_probe_recovered = 1; 5096 /*- 5097 * Find first chunk that was used with 5098 * window probe and clear the event. Put 5099 * it back into the send queue as if has 5100 * not been sent. 5101 */ 5102 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5103 if (tp1->window_probe) { 5104 sctp_window_probe_recovery(stcb, asoc, tp1); 5105 break; 5106 } 5107 } 5108 } 5109 if (net->flight_size) { 5110 j++; 5111 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5112 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5113 stcb->sctp_ep, stcb, net); 5114 } 5115 if (net->window_probe) { 5116 net->window_probe = 0; 5117 } 5118 } else { 5119 if (net->window_probe) { 5120 /* 5121 * In window probes we must assure a timer 5122 * is still running there 5123 */ 5124 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5125 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5126 stcb->sctp_ep, stcb, net); 5127 } 5128 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5129 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5130 stcb, net, 5131 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36); 5132 } 5133 } 5134 } 5135 if ((j == 0) && 5136 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5137 (asoc->sent_queue_retran_cnt == 0) && 5138 (win_probe_recovered == 0) && 5139 (done_once == 0)) { 5140 /* 5141 * huh, this should not happen unless all packets are 5142 * PR-SCTP and marked to skip of course. 5143 */ 5144 if (sctp_fs_audit(asoc)) { 5145 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5146 net->flight_size = 0; 5147 } 5148 asoc->total_flight = 0; 5149 asoc->total_flight_count = 0; 5150 asoc->sent_queue_retran_cnt = 0; 5151 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5152 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5153 sctp_flight_size_increase(tp1); 5154 sctp_total_flight_increase(stcb, tp1); 5155 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5156 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5157 } 5158 } 5159 } 5160 done_once = 1; 5161 goto again; 5162 } 5163 /*********************************************/ 5164 /* Here we perform PR-SCTP procedures */ 5165 /* (section 4.2) */ 5166 /*********************************************/ 5167 /* C1. update advancedPeerAckPoint */ 5168 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5169 asoc->advanced_peer_ack_point = cum_ack; 5170 } 5171 /* C2. try to further move advancedPeerAckPoint ahead */ 5172 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5173 struct sctp_tmit_chunk *lchk; 5174 uint32_t old_adv_peer_ack_point; 5175 5176 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5177 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5178 /* C3. See if we need to send a Fwd-TSN */ 5179 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5180 /* 5181 * ISSUE with ECN, see FWD-TSN processing. 5182 */ 5183 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5184 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5185 0xee, cum_ack, asoc->advanced_peer_ack_point, 5186 old_adv_peer_ack_point); 5187 } 5188 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5189 send_forward_tsn(stcb, asoc); 5190 } else if (lchk) { 5191 /* try to FR fwd-tsn's that get lost too */ 5192 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5193 send_forward_tsn(stcb, asoc); 5194 } 5195 } 5196 } 5197 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 5198 if (lchk->whoTo != NULL) { 5199 break; 5200 } 5201 } 5202 if (lchk != NULL) { 5203 /* Assure a timer is up */ 5204 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5205 stcb->sctp_ep, stcb, lchk->whoTo); 5206 } 5207 } 5208 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5209 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5210 a_rwnd, 5211 stcb->asoc.peers_rwnd, 5212 stcb->asoc.total_flight, 5213 stcb->asoc.total_output_queue_size); 5214 } 5215 } 5216 5217 void 5218 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5219 { 5220 /* Copy cum-ack */ 5221 uint32_t cum_ack, a_rwnd; 5222 5223 cum_ack = ntohl(cp->cumulative_tsn_ack); 5224 /* Arrange so a_rwnd does NOT change */ 5225 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5226 5227 /* Now call the express sack handling */ 5228 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5229 } 5230 5231 static void 5232 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5233 struct sctp_stream_in *strmin) 5234 { 5235 struct sctp_queued_to_read *control, *ncontrol; 5236 struct sctp_association *asoc; 5237 uint32_t mid; 5238 int need_reasm_check = 0; 5239 5240 asoc = &stcb->asoc; 5241 mid = strmin->last_mid_delivered; 5242 /* 5243 * First deliver anything prior to and including the stream no that 5244 * came in. 5245 */ 5246 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5247 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5248 /* this is deliverable now */ 5249 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5250 if (control->on_strm_q) { 5251 if (control->on_strm_q == SCTP_ON_ORDERED) { 5252 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5253 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5254 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5255 #ifdef INVARIANTS 5256 } else { 5257 panic("strmin: %p ctl: %p unknown %d", 5258 strmin, control, control->on_strm_q); 5259 #endif 5260 } 5261 control->on_strm_q = 0; 5262 } 5263 /* subtract pending on streams */ 5264 if (asoc->size_on_all_streams >= control->length) { 5265 asoc->size_on_all_streams -= control->length; 5266 } else { 5267 #ifdef INVARIANTS 5268 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5269 #else 5270 asoc->size_on_all_streams = 0; 5271 #endif 5272 } 5273 sctp_ucount_decr(asoc->cnt_on_all_streams); 5274 /* deliver it to at least the delivery-q */ 5275 if (stcb->sctp_socket) { 5276 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5277 sctp_add_to_readq(stcb->sctp_ep, stcb, 5278 control, 5279 &stcb->sctp_socket->so_rcv, 5280 1, SCTP_READ_LOCK_HELD, 5281 SCTP_SO_NOT_LOCKED); 5282 } 5283 } else { 5284 /* Its a fragmented message */ 5285 if (control->first_frag_seen) { 5286 /* 5287 * Make it so this is next to 5288 * deliver, we restore later 5289 */ 5290 strmin->last_mid_delivered = control->mid - 1; 5291 need_reasm_check = 1; 5292 break; 5293 } 5294 } 5295 } else { 5296 /* no more delivery now. */ 5297 break; 5298 } 5299 } 5300 if (need_reasm_check) { 5301 int ret; 5302 5303 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5304 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5305 /* Restore the next to deliver unless we are ahead */ 5306 strmin->last_mid_delivered = mid; 5307 } 5308 if (ret == 0) { 5309 /* Left the front Partial one on */ 5310 return; 5311 } 5312 need_reasm_check = 0; 5313 } 5314 /* 5315 * now we must deliver things in queue the normal way if any are 5316 * now ready. 5317 */ 5318 mid = strmin->last_mid_delivered + 1; 5319 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5320 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5321 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5322 /* this is deliverable now */ 5323 if (control->on_strm_q) { 5324 if (control->on_strm_q == SCTP_ON_ORDERED) { 5325 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5326 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5327 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5328 #ifdef INVARIANTS 5329 } else { 5330 panic("strmin: %p ctl: %p unknown %d", 5331 strmin, control, control->on_strm_q); 5332 #endif 5333 } 5334 control->on_strm_q = 0; 5335 } 5336 /* subtract pending on streams */ 5337 if (asoc->size_on_all_streams >= control->length) { 5338 asoc->size_on_all_streams -= control->length; 5339 } else { 5340 #ifdef INVARIANTS 5341 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5342 #else 5343 asoc->size_on_all_streams = 0; 5344 #endif 5345 } 5346 sctp_ucount_decr(asoc->cnt_on_all_streams); 5347 /* deliver it to at least the delivery-q */ 5348 strmin->last_mid_delivered = control->mid; 5349 if (stcb->sctp_socket) { 5350 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5351 sctp_add_to_readq(stcb->sctp_ep, stcb, 5352 control, 5353 &stcb->sctp_socket->so_rcv, 1, 5354 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5355 } 5356 mid = strmin->last_mid_delivered + 1; 5357 } else { 5358 /* Its a fragmented message */ 5359 if (control->first_frag_seen) { 5360 /* 5361 * Make it so this is next to 5362 * deliver 5363 */ 5364 strmin->last_mid_delivered = control->mid - 1; 5365 need_reasm_check = 1; 5366 break; 5367 } 5368 } 5369 } else { 5370 break; 5371 } 5372 } 5373 if (need_reasm_check) { 5374 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5375 } 5376 } 5377 5378 static void 5379 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5380 struct sctp_association *asoc, struct sctp_stream_in *strm, 5381 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn) 5382 { 5383 struct sctp_tmit_chunk *chk, *nchk; 5384 5385 /* 5386 * For now large messages held on the stream reasm that are complete 5387 * will be tossed too. We could in theory do more work to spin 5388 * through and stop after dumping one msg aka seeing the start of a 5389 * new msg at the head, and call the delivery function... to see if 5390 * it can be delivered... But for now we just dump everything on the 5391 * queue. 5392 */ 5393 if (!asoc->idata_supported && !ordered && 5394 control->first_frag_seen && 5395 SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5396 return; 5397 } 5398 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5399 /* Purge hanging chunks */ 5400 if (!asoc->idata_supported && !ordered) { 5401 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5402 break; 5403 } 5404 } 5405 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5406 if (asoc->size_on_reasm_queue >= chk->send_size) { 5407 asoc->size_on_reasm_queue -= chk->send_size; 5408 } else { 5409 #ifdef INVARIANTS 5410 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5411 #else 5412 asoc->size_on_reasm_queue = 0; 5413 #endif 5414 } 5415 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5416 if (chk->data) { 5417 sctp_m_freem(chk->data); 5418 chk->data = NULL; 5419 } 5420 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5421 } 5422 if (!TAILQ_EMPTY(&control->reasm)) { 5423 /* This has to be old data, unordered */ 5424 if (control->data) { 5425 sctp_m_freem(control->data); 5426 control->data = NULL; 5427 } 5428 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5429 chk = TAILQ_FIRST(&control->reasm); 5430 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5431 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5432 sctp_add_chk_to_control(control, strm, stcb, asoc, 5433 chk, SCTP_READ_LOCK_HELD); 5434 } 5435 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5436 return; 5437 } 5438 if (control->on_strm_q == SCTP_ON_ORDERED) { 5439 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5440 if (asoc->size_on_all_streams >= control->length) { 5441 asoc->size_on_all_streams -= control->length; 5442 } else { 5443 #ifdef INVARIANTS 5444 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5445 #else 5446 asoc->size_on_all_streams = 0; 5447 #endif 5448 } 5449 sctp_ucount_decr(asoc->cnt_on_all_streams); 5450 control->on_strm_q = 0; 5451 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5452 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5453 control->on_strm_q = 0; 5454 #ifdef INVARIANTS 5455 } else if (control->on_strm_q) { 5456 panic("strm: %p ctl: %p unknown %d", 5457 strm, control, control->on_strm_q); 5458 #endif 5459 } 5460 control->on_strm_q = 0; 5461 if (control->on_read_q == 0) { 5462 sctp_free_remote_addr(control->whoFrom); 5463 if (control->data) { 5464 sctp_m_freem(control->data); 5465 control->data = NULL; 5466 } 5467 sctp_free_a_readq(stcb, control); 5468 } 5469 } 5470 5471 void 5472 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5473 struct sctp_forward_tsn_chunk *fwd, 5474 int *abort_flag, struct mbuf *m, int offset) 5475 { 5476 /* The pr-sctp fwd tsn */ 5477 /* 5478 * here we will perform all the data receiver side steps for 5479 * processing FwdTSN, as required in by pr-sctp draft: 5480 * 5481 * Assume we get FwdTSN(x): 5482 * 5483 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5484 * + others we have 3) examine and update re-ordering queue on 5485 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5486 * report where we are. 5487 */ 5488 struct sctp_association *asoc; 5489 uint32_t new_cum_tsn, gap; 5490 unsigned int i, fwd_sz, m_size; 5491 struct sctp_stream_in *strm; 5492 struct sctp_queued_to_read *control, *ncontrol; 5493 5494 asoc = &stcb->asoc; 5495 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5496 SCTPDBG(SCTP_DEBUG_INDATA1, 5497 "Bad size too small/big fwd-tsn\n"); 5498 return; 5499 } 5500 m_size = (stcb->asoc.mapping_array_size << 3); 5501 /*************************************************************/ 5502 /* 1. Here we update local cumTSN and shift the bitmap array */ 5503 /*************************************************************/ 5504 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5505 5506 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5507 /* Already got there ... */ 5508 return; 5509 } 5510 /* 5511 * now we know the new TSN is more advanced, let's find the actual 5512 * gap 5513 */ 5514 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5515 asoc->cumulative_tsn = new_cum_tsn; 5516 if (gap >= m_size) { 5517 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5518 struct mbuf *op_err; 5519 char msg[SCTP_DIAG_INFO_LEN]; 5520 5521 /* 5522 * out of range (of single byte chunks in the rwnd I 5523 * give out). This must be an attacker. 5524 */ 5525 *abort_flag = 1; 5526 SCTP_SNPRINTF(msg, sizeof(msg), 5527 "New cum ack %8.8x too high, highest TSN %8.8x", 5528 new_cum_tsn, asoc->highest_tsn_inside_map); 5529 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5530 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37; 5531 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); 5532 return; 5533 } 5534 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5535 5536 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5537 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5538 asoc->highest_tsn_inside_map = new_cum_tsn; 5539 5540 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5541 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5542 5543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5544 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5545 } 5546 } else { 5547 SCTP_TCB_LOCK_ASSERT(stcb); 5548 for (i = 0; i <= gap; i++) { 5549 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5550 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5551 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5552 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5553 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5554 } 5555 } 5556 } 5557 } 5558 /*************************************************************/ 5559 /* 2. Clear up re-assembly queue */ 5560 /*************************************************************/ 5561 5562 /* This is now done as part of clearing up the stream/seq */ 5563 if (asoc->idata_supported == 0) { 5564 uint16_t sid; 5565 5566 /* Flush all the un-ordered data based on cum-tsn */ 5567 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5568 for (sid = 0; sid < asoc->streamincnt; sid++) { 5569 strm = &asoc->strmin[sid]; 5570 if (!TAILQ_EMPTY(&strm->uno_inqueue)) { 5571 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn); 5572 } 5573 } 5574 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5575 } 5576 /*******************************************************/ 5577 /* 3. Update the PR-stream re-ordering queues and fix */ 5578 /* delivery issues as needed. */ 5579 /*******************************************************/ 5580 fwd_sz -= sizeof(*fwd); 5581 if (m && fwd_sz) { 5582 /* New method. */ 5583 unsigned int num_str; 5584 uint32_t mid; 5585 uint16_t sid; 5586 uint16_t ordered, flags; 5587 struct sctp_strseq *stseq, strseqbuf; 5588 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5589 5590 offset += sizeof(*fwd); 5591 5592 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5593 if (asoc->idata_supported) { 5594 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5595 } else { 5596 num_str = fwd_sz / sizeof(struct sctp_strseq); 5597 } 5598 for (i = 0; i < num_str; i++) { 5599 if (asoc->idata_supported) { 5600 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5601 sizeof(struct sctp_strseq_mid), 5602 (uint8_t *)&strseqbuf_m); 5603 offset += sizeof(struct sctp_strseq_mid); 5604 if (stseq_m == NULL) { 5605 break; 5606 } 5607 sid = ntohs(stseq_m->sid); 5608 mid = ntohl(stseq_m->mid); 5609 flags = ntohs(stseq_m->flags); 5610 if (flags & PR_SCTP_UNORDERED_FLAG) { 5611 ordered = 0; 5612 } else { 5613 ordered = 1; 5614 } 5615 } else { 5616 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5617 sizeof(struct sctp_strseq), 5618 (uint8_t *)&strseqbuf); 5619 offset += sizeof(struct sctp_strseq); 5620 if (stseq == NULL) { 5621 break; 5622 } 5623 sid = ntohs(stseq->sid); 5624 mid = (uint32_t)ntohs(stseq->ssn); 5625 ordered = 1; 5626 } 5627 /* Convert */ 5628 5629 /* now process */ 5630 5631 /* 5632 * Ok we now look for the stream/seq on the read 5633 * queue where its not all delivered. If we find it 5634 * we transmute the read entry into a PDI_ABORTED. 5635 */ 5636 if (sid >= asoc->streamincnt) { 5637 /* screwed up streams, stop! */ 5638 break; 5639 } 5640 if ((asoc->str_of_pdapi == sid) && 5641 (asoc->ssn_of_pdapi == mid)) { 5642 /* 5643 * If this is the one we were partially 5644 * delivering now then we no longer are. 5645 * Note this will change with the reassembly 5646 * re-write. 5647 */ 5648 asoc->fragmented_delivery_inprogress = 0; 5649 } 5650 strm = &asoc->strmin[sid]; 5651 if (ordered) { 5652 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) { 5653 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5654 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); 5655 } 5656 } 5657 } else { 5658 if (asoc->idata_supported) { 5659 TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) { 5660 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5661 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); 5662 } 5663 } 5664 } else { 5665 if (!TAILQ_EMPTY(&strm->uno_inqueue)) { 5666 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn); 5667 } 5668 } 5669 } 5670 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5671 if ((control->sinfo_stream == sid) && 5672 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5673 control->pdapi_aborted = 1; 5674 control->end_added = 1; 5675 if (control->on_strm_q == SCTP_ON_ORDERED) { 5676 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5677 if (asoc->size_on_all_streams >= control->length) { 5678 asoc->size_on_all_streams -= control->length; 5679 } else { 5680 #ifdef INVARIANTS 5681 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5682 #else 5683 asoc->size_on_all_streams = 0; 5684 #endif 5685 } 5686 sctp_ucount_decr(asoc->cnt_on_all_streams); 5687 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5688 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5689 #ifdef INVARIANTS 5690 } else if (control->on_strm_q) { 5691 panic("strm: %p ctl: %p unknown %d", 5692 strm, control, control->on_strm_q); 5693 #endif 5694 } 5695 control->on_strm_q = 0; 5696 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5697 stcb, 5698 SCTP_PARTIAL_DELIVERY_ABORTED, 5699 (void *)control, 5700 SCTP_SO_NOT_LOCKED); 5701 break; 5702 } else if ((control->sinfo_stream == sid) && 5703 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5704 /* We are past our victim SSN */ 5705 break; 5706 } 5707 } 5708 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5709 /* Update the sequence number */ 5710 strm->last_mid_delivered = mid; 5711 } 5712 /* now kick the stream the new way */ 5713 /* sa_ignore NO_NULL_CHK */ 5714 sctp_kick_prsctp_reorder_queue(stcb, strm); 5715 } 5716 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5717 } 5718 /* 5719 * Now slide thing forward. 5720 */ 5721 sctp_slide_mapping_arrays(stcb); 5722 } 5723