1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <sys/proc.h> 40 #include <netinet/sctp_var.h> 41 #include <netinet/sctp_sysctl.h> 42 #include <netinet/sctp_header.h> 43 #include <netinet/sctp_pcb.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_auth.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/sctp_input.h> 53 #include <netinet/sctp_crc32.h> 54 #include <netinet/sctp_lock_bsd.h> 55 /* 56 * NOTES: On the outbound side of things I need to check the sack timer to 57 * see if I should generate a sack into the chunk queue (if I have data to 58 * send that is and will be sending it .. for bundling. 59 * 60 * The callback in sctp_usrreq.c will get called when the socket is read from. 61 * This will cause sctp_service_queues() to get called on the top entry in 62 * the list. 63 */ 64 static uint32_t 65 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 66 struct sctp_stream_in *strm, 67 struct sctp_tcb *stcb, 68 struct sctp_association *asoc, 69 struct sctp_tmit_chunk *chk, int hold_rlock); 70 71 void 72 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 73 { 74 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 75 } 76 77 /* Calculate what the rwnd would be */ 78 uint32_t 79 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 80 { 81 uint32_t calc = 0; 82 83 /* 84 * This is really set wrong with respect to a 1-2-m socket. Since 85 * the sb_cc is the count that everyone as put up. When we re-write 86 * sctp_soreceive then we will fix this so that ONLY this 87 * associations data is taken into account. 88 */ 89 if (stcb->sctp_socket == NULL) { 90 return (calc); 91 } 92 93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 96 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 97 if (stcb->asoc.sb_cc == 0 && 98 asoc->cnt_on_reasm_queue == 0 && 99 asoc->cnt_on_all_streams == 0) { 100 /* Full rwnd granted */ 101 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 102 return (calc); 103 } 104 /* get actual space */ 105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 106 /* 107 * take out what has NOT been put on socket queue and we yet hold 108 * for putting up. 109 */ 110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 111 asoc->cnt_on_reasm_queue * MSIZE)); 112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 113 asoc->cnt_on_all_streams * MSIZE)); 114 if (calc == 0) { 115 /* out of space */ 116 return (calc); 117 } 118 119 /* what is the overhead of all these rwnd's */ 120 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 121 /* 122 * If the window gets too small due to ctrl-stuff, reduce it to 1, 123 * even it is 0. SWS engaged 124 */ 125 if (calc < stcb->asoc.my_rwnd_control_len) { 126 calc = 1; 127 } 128 return (calc); 129 } 130 131 /* 132 * Build out our readq entry based on the incoming packet. 133 */ 134 struct sctp_queued_to_read * 135 sctp_build_readq_entry(struct sctp_tcb *stcb, 136 struct sctp_nets *net, 137 uint32_t tsn, uint32_t ppid, 138 uint32_t context, uint16_t sid, 139 uint32_t mid, uint8_t flags, 140 struct mbuf *dm) 141 { 142 struct sctp_queued_to_read *read_queue_e = NULL; 143 144 sctp_alloc_a_readq(stcb, read_queue_e); 145 if (read_queue_e == NULL) { 146 goto failed_build; 147 } 148 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 149 read_queue_e->sinfo_stream = sid; 150 read_queue_e->sinfo_flags = (flags << 8); 151 read_queue_e->sinfo_ppid = ppid; 152 read_queue_e->sinfo_context = context; 153 read_queue_e->sinfo_tsn = tsn; 154 read_queue_e->sinfo_cumtsn = tsn; 155 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 156 read_queue_e->mid = mid; 157 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 158 TAILQ_INIT(&read_queue_e->reasm); 159 read_queue_e->whoFrom = net; 160 atomic_add_int(&net->ref_count, 1); 161 read_queue_e->data = dm; 162 read_queue_e->stcb = stcb; 163 read_queue_e->port_from = stcb->rport; 164 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 165 read_queue_e->do_not_ref_stcb = 1; 166 } 167 failed_build: 168 return (read_queue_e); 169 } 170 171 struct mbuf * 172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 173 { 174 struct sctp_extrcvinfo *seinfo; 175 struct sctp_sndrcvinfo *outinfo; 176 struct sctp_rcvinfo *rcvinfo; 177 struct sctp_nxtinfo *nxtinfo; 178 struct cmsghdr *cmh; 179 struct mbuf *ret; 180 int len; 181 int use_extended; 182 int provide_nxt; 183 184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 187 /* user does not want any ancillary data */ 188 return (NULL); 189 } 190 191 len = 0; 192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 194 } 195 seinfo = (struct sctp_extrcvinfo *)sinfo; 196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 198 provide_nxt = 1; 199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 200 } else { 201 provide_nxt = 0; 202 } 203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 205 use_extended = 1; 206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 207 } else { 208 use_extended = 0; 209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 210 } 211 } else { 212 use_extended = 0; 213 } 214 215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 216 if (ret == NULL) { 217 /* No space */ 218 return (ret); 219 } 220 SCTP_BUF_LEN(ret) = 0; 221 222 /* We need a CMSG header followed by the struct */ 223 cmh = mtod(ret, struct cmsghdr *); 224 /* 225 * Make sure that there is no un-initialized padding between the 226 * cmsg header and cmsg data and after the cmsg data. 227 */ 228 memset(cmh, 0, len); 229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 230 cmh->cmsg_level = IPPROTO_SCTP; 231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 232 cmh->cmsg_type = SCTP_RCVINFO; 233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 234 rcvinfo->rcv_sid = sinfo->sinfo_stream; 235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 236 rcvinfo->rcv_flags = sinfo->sinfo_flags; 237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 240 rcvinfo->rcv_context = sinfo->sinfo_context; 241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 244 } 245 if (provide_nxt) { 246 cmh->cmsg_level = IPPROTO_SCTP; 247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 248 cmh->cmsg_type = SCTP_NXTINFO; 249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 251 nxtinfo->nxt_flags = 0; 252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 253 nxtinfo->nxt_flags |= SCTP_UNORDERED; 254 } 255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 257 } 258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 259 nxtinfo->nxt_flags |= SCTP_COMPLETE; 260 } 261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 262 nxtinfo->nxt_length = seinfo->serinfo_next_length; 263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 266 } 267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 268 cmh->cmsg_level = IPPROTO_SCTP; 269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 270 if (use_extended) { 271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 272 cmh->cmsg_type = SCTP_EXTRCV; 273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 275 } else { 276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 277 cmh->cmsg_type = SCTP_SNDRCV; 278 *outinfo = *sinfo; 279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 280 } 281 } 282 return (ret); 283 } 284 285 static void 286 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 287 { 288 uint32_t gap, i, cumackp1; 289 int fnd = 0; 290 int in_r = 0, in_nr = 0; 291 292 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 293 return; 294 } 295 cumackp1 = asoc->cumulative_tsn + 1; 296 if (SCTP_TSN_GT(cumackp1, tsn)) { 297 /* 298 * this tsn is behind the cum ack and thus we don't need to 299 * worry about it being moved from one to the other. 300 */ 301 return; 302 } 303 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 304 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 305 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 306 if ((in_r == 0) && (in_nr == 0)) { 307 #ifdef INVARIANTS 308 panic("Things are really messed up now"); 309 #else 310 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 311 sctp_print_mapping_array(asoc); 312 #endif 313 } 314 if (in_nr == 0) 315 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 316 if (in_r) 317 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 318 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 319 asoc->highest_tsn_inside_nr_map = tsn; 320 } 321 if (tsn == asoc->highest_tsn_inside_map) { 322 /* We must back down to see what the new highest is */ 323 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 324 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 325 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 326 asoc->highest_tsn_inside_map = i; 327 fnd = 1; 328 break; 329 } 330 } 331 if (!fnd) { 332 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 333 } 334 } 335 } 336 337 static int 338 sctp_place_control_in_stream(struct sctp_stream_in *strm, 339 struct sctp_association *asoc, 340 struct sctp_queued_to_read *control) 341 { 342 struct sctp_queued_to_read *at; 343 struct sctp_readhead *q; 344 uint8_t flags, unordered; 345 346 flags = (control->sinfo_flags >> 8); 347 unordered = flags & SCTP_DATA_UNORDERED; 348 if (unordered) { 349 q = &strm->uno_inqueue; 350 if (asoc->idata_supported == 0) { 351 if (!TAILQ_EMPTY(q)) { 352 /* 353 * Only one stream can be here in old style 354 * -- abort 355 */ 356 return (-1); 357 } 358 TAILQ_INSERT_TAIL(q, control, next_instrm); 359 control->on_strm_q = SCTP_ON_UNORDERED; 360 return (0); 361 } 362 } else { 363 q = &strm->inqueue; 364 } 365 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 366 control->end_added = 1; 367 control->first_frag_seen = 1; 368 control->last_frag_seen = 1; 369 } 370 if (TAILQ_EMPTY(q)) { 371 /* Empty queue */ 372 TAILQ_INSERT_HEAD(q, control, next_instrm); 373 if (unordered) { 374 control->on_strm_q = SCTP_ON_UNORDERED; 375 } else { 376 control->on_strm_q = SCTP_ON_ORDERED; 377 } 378 return (0); 379 } else { 380 TAILQ_FOREACH(at, q, next_instrm) { 381 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 382 /* 383 * one in queue is bigger than the new one, 384 * insert before this one 385 */ 386 TAILQ_INSERT_BEFORE(at, control, next_instrm); 387 if (unordered) { 388 control->on_strm_q = SCTP_ON_UNORDERED; 389 } else { 390 control->on_strm_q = SCTP_ON_ORDERED; 391 } 392 break; 393 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 394 /* 395 * Gak, He sent me a duplicate msg id 396 * number?? return -1 to abort. 397 */ 398 return (-1); 399 } else { 400 if (TAILQ_NEXT(at, next_instrm) == NULL) { 401 /* 402 * We are at the end, insert it 403 * after this one 404 */ 405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 406 sctp_log_strm_del(control, at, 407 SCTP_STR_LOG_FROM_INSERT_TL); 408 } 409 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 410 if (unordered) { 411 control->on_strm_q = SCTP_ON_UNORDERED; 412 } else { 413 control->on_strm_q = SCTP_ON_ORDERED; 414 } 415 break; 416 } 417 } 418 } 419 } 420 return (0); 421 } 422 423 static void 424 sctp_abort_in_reasm(struct sctp_tcb *stcb, 425 struct sctp_queued_to_read *control, 426 struct sctp_tmit_chunk *chk, 427 int *abort_flag, int opspot) 428 { 429 char msg[SCTP_DIAG_INFO_LEN]; 430 struct mbuf *oper; 431 432 if (stcb->asoc.idata_supported) { 433 SCTP_SNPRINTF(msg, sizeof(msg), 434 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 435 opspot, 436 control->fsn_included, 437 chk->rec.data.tsn, 438 chk->rec.data.sid, 439 chk->rec.data.fsn, chk->rec.data.mid); 440 } else { 441 SCTP_SNPRINTF(msg, sizeof(msg), 442 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 443 opspot, 444 control->fsn_included, 445 chk->rec.data.tsn, 446 chk->rec.data.sid, 447 chk->rec.data.fsn, 448 (uint16_t)chk->rec.data.mid); 449 } 450 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 451 sctp_m_freem(chk->data); 452 chk->data = NULL; 453 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 454 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 455 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 456 *abort_flag = 1; 457 } 458 459 static void 460 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 461 { 462 /* 463 * The control could not be placed and must be cleaned. 464 */ 465 struct sctp_tmit_chunk *chk, *nchk; 466 467 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 468 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 469 if (chk->data) 470 sctp_m_freem(chk->data); 471 chk->data = NULL; 472 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 473 } 474 sctp_free_remote_addr(control->whoFrom); 475 if (control->data) { 476 sctp_m_freem(control->data); 477 control->data = NULL; 478 } 479 sctp_free_a_readq(stcb, control); 480 } 481 482 /* 483 * Queue the chunk either right into the socket buffer if it is the next one 484 * to go OR put it in the correct place in the delivery queue. If we do 485 * append to the so_buf, keep doing so until we are out of order as 486 * long as the control's entered are non-fragmented. 487 */ 488 static void 489 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 490 struct sctp_association *asoc, 491 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 492 { 493 /* 494 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 495 * all the data in one stream this could happen quite rapidly. One 496 * could use the TSN to keep track of things, but this scheme breaks 497 * down in the other type of stream usage that could occur. Send a 498 * single msg to stream 0, send 4Billion messages to stream 1, now 499 * send a message to stream 0. You have a situation where the TSN 500 * has wrapped but not in the stream. Is this worth worrying about 501 * or should we just change our queue sort at the bottom to be by 502 * TSN. 503 * 504 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 505 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 506 * assignment this could happen... and I don't see how this would be 507 * a violation. So for now I am undecided an will leave the sort by 508 * SSN alone. Maybe a hybred approach is the answer 509 * 510 */ 511 struct sctp_queued_to_read *at; 512 int queue_needed; 513 uint32_t nxt_todel; 514 struct mbuf *op_err; 515 struct sctp_stream_in *strm; 516 char msg[SCTP_DIAG_INFO_LEN]; 517 518 strm = &asoc->strmin[control->sinfo_stream]; 519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 520 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 521 } 522 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 523 /* The incoming sseq is behind where we last delivered? */ 524 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 525 strm->last_mid_delivered, control->mid); 526 /* 527 * throw it in the stream so it gets cleaned up in 528 * association destruction 529 */ 530 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 531 if (asoc->idata_supported) { 532 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 533 strm->last_mid_delivered, control->sinfo_tsn, 534 control->sinfo_stream, control->mid); 535 } else { 536 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 537 (uint16_t)strm->last_mid_delivered, 538 control->sinfo_tsn, 539 control->sinfo_stream, 540 (uint16_t)control->mid); 541 } 542 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 543 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 544 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 545 *abort_flag = 1; 546 return; 547 } 548 queue_needed = 1; 549 asoc->size_on_all_streams += control->length; 550 sctp_ucount_incr(asoc->cnt_on_all_streams); 551 nxt_todel = strm->last_mid_delivered + 1; 552 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 553 /* can be delivered right away? */ 554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 555 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 556 } 557 /* EY it wont be queued if it could be delivered directly */ 558 queue_needed = 0; 559 if (asoc->size_on_all_streams >= control->length) { 560 asoc->size_on_all_streams -= control->length; 561 } else { 562 #ifdef INVARIANTS 563 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 564 #else 565 asoc->size_on_all_streams = 0; 566 #endif 567 } 568 sctp_ucount_decr(asoc->cnt_on_all_streams); 569 strm->last_mid_delivered++; 570 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 571 sctp_add_to_readq(stcb->sctp_ep, stcb, 572 control, 573 &stcb->sctp_socket->so_rcv, 1, 574 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 575 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 576 /* all delivered */ 577 nxt_todel = strm->last_mid_delivered + 1; 578 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 579 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 580 if (control->on_strm_q == SCTP_ON_ORDERED) { 581 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 582 if (asoc->size_on_all_streams >= control->length) { 583 asoc->size_on_all_streams -= control->length; 584 } else { 585 #ifdef INVARIANTS 586 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 587 #else 588 asoc->size_on_all_streams = 0; 589 #endif 590 } 591 sctp_ucount_decr(asoc->cnt_on_all_streams); 592 #ifdef INVARIANTS 593 } else { 594 panic("Huh control: %p is on_strm_q: %d", 595 control, control->on_strm_q); 596 #endif 597 } 598 control->on_strm_q = 0; 599 strm->last_mid_delivered++; 600 /* 601 * We ignore the return of deliver_data here 602 * since we always can hold the chunk on the 603 * d-queue. And we have a finite number that 604 * can be delivered from the strq. 605 */ 606 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 607 sctp_log_strm_del(control, NULL, 608 SCTP_STR_LOG_FROM_IMMED_DEL); 609 } 610 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 611 sctp_add_to_readq(stcb->sctp_ep, stcb, 612 control, 613 &stcb->sctp_socket->so_rcv, 1, 614 SCTP_READ_LOCK_NOT_HELD, 615 SCTP_SO_LOCKED); 616 continue; 617 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 618 *need_reasm = 1; 619 } 620 break; 621 } 622 } 623 if (queue_needed) { 624 /* 625 * Ok, we did not deliver this guy, find the correct place 626 * to put it on the queue. 627 */ 628 if (sctp_place_control_in_stream(strm, asoc, control)) { 629 SCTP_SNPRINTF(msg, sizeof(msg), 630 "Queue to str MID: %u duplicate", control->mid); 631 sctp_clean_up_control(stcb, control); 632 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 633 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 634 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 635 *abort_flag = 1; 636 } 637 } 638 } 639 640 static void 641 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 642 { 643 struct mbuf *m, *prev = NULL; 644 struct sctp_tcb *stcb; 645 646 stcb = control->stcb; 647 control->held_length = 0; 648 control->length = 0; 649 m = control->data; 650 while (m) { 651 if (SCTP_BUF_LEN(m) == 0) { 652 /* Skip mbufs with NO length */ 653 if (prev == NULL) { 654 /* First one */ 655 control->data = sctp_m_free(m); 656 m = control->data; 657 } else { 658 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 659 m = SCTP_BUF_NEXT(prev); 660 } 661 if (m == NULL) { 662 control->tail_mbuf = prev; 663 } 664 continue; 665 } 666 prev = m; 667 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 668 if (control->on_read_q) { 669 /* 670 * On read queue so we must increment the SB stuff, 671 * we assume caller has done any locks of SB. 672 */ 673 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 674 } 675 m = SCTP_BUF_NEXT(m); 676 } 677 if (prev) { 678 control->tail_mbuf = prev; 679 } 680 } 681 682 static void 683 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 684 { 685 struct mbuf *prev = NULL; 686 struct sctp_tcb *stcb; 687 688 stcb = control->stcb; 689 if (stcb == NULL) { 690 #ifdef INVARIANTS 691 panic("Control broken"); 692 #else 693 return; 694 #endif 695 } 696 if (control->tail_mbuf == NULL) { 697 /* TSNH */ 698 sctp_m_freem(control->data); 699 control->data = m; 700 sctp_setup_tail_pointer(control); 701 return; 702 } 703 control->tail_mbuf->m_next = m; 704 while (m) { 705 if (SCTP_BUF_LEN(m) == 0) { 706 /* Skip mbufs with NO length */ 707 if (prev == NULL) { 708 /* First one */ 709 control->tail_mbuf->m_next = sctp_m_free(m); 710 m = control->tail_mbuf->m_next; 711 } else { 712 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 713 m = SCTP_BUF_NEXT(prev); 714 } 715 if (m == NULL) { 716 control->tail_mbuf = prev; 717 } 718 continue; 719 } 720 prev = m; 721 if (control->on_read_q) { 722 /* 723 * On read queue so we must increment the SB stuff, 724 * we assume caller has done any locks of SB. 725 */ 726 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 727 } 728 *added += SCTP_BUF_LEN(m); 729 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 730 m = SCTP_BUF_NEXT(m); 731 } 732 if (prev) { 733 control->tail_mbuf = prev; 734 } 735 } 736 737 static void 738 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 739 { 740 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 741 nc->sinfo_stream = control->sinfo_stream; 742 nc->mid = control->mid; 743 TAILQ_INIT(&nc->reasm); 744 nc->top_fsn = control->top_fsn; 745 nc->mid = control->mid; 746 nc->sinfo_flags = control->sinfo_flags; 747 nc->sinfo_ppid = control->sinfo_ppid; 748 nc->sinfo_context = control->sinfo_context; 749 nc->fsn_included = 0xffffffff; 750 nc->sinfo_tsn = control->sinfo_tsn; 751 nc->sinfo_cumtsn = control->sinfo_cumtsn; 752 nc->sinfo_assoc_id = control->sinfo_assoc_id; 753 nc->whoFrom = control->whoFrom; 754 atomic_add_int(&nc->whoFrom->ref_count, 1); 755 nc->stcb = control->stcb; 756 nc->port_from = control->port_from; 757 nc->do_not_ref_stcb = control->do_not_ref_stcb; 758 } 759 760 static void 761 sctp_reset_a_control(struct sctp_queued_to_read *control, 762 struct sctp_inpcb *inp, uint32_t tsn) 763 { 764 control->fsn_included = tsn; 765 if (control->on_read_q) { 766 /* 767 * We have to purge it from there, hopefully this will work 768 * :-) 769 */ 770 TAILQ_REMOVE(&inp->read_queue, control, next); 771 control->on_read_q = 0; 772 } 773 } 774 775 static int 776 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 777 struct sctp_association *asoc, 778 struct sctp_stream_in *strm, 779 struct sctp_queued_to_read *control, 780 uint32_t pd_point, 781 int inp_read_lock_held) 782 { 783 /* 784 * Special handling for the old un-ordered data chunk. All the 785 * chunks/TSN's go to mid 0. So we have to do the old style watching 786 * to see if we have it all. If you return one, no other control 787 * entries on the un-ordered queue will be looked at. In theory 788 * there should be no others entries in reality, unless the guy is 789 * sending both unordered NDATA and unordered DATA... 790 */ 791 struct sctp_tmit_chunk *chk, *lchk, *tchk; 792 uint32_t fsn; 793 struct sctp_queued_to_read *nc; 794 int cnt_added; 795 796 if (control->first_frag_seen == 0) { 797 /* Nothing we can do, we have not seen the first piece yet */ 798 return (1); 799 } 800 /* Collapse any we can */ 801 cnt_added = 0; 802 restart: 803 fsn = control->fsn_included + 1; 804 /* Now what can we add? */ 805 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 806 if (chk->rec.data.fsn == fsn) { 807 /* Ok lets add it */ 808 sctp_alloc_a_readq(stcb, nc); 809 if (nc == NULL) { 810 break; 811 } 812 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 813 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 814 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD); 815 fsn++; 816 cnt_added++; 817 chk = NULL; 818 if (control->end_added) { 819 /* We are done */ 820 if (!TAILQ_EMPTY(&control->reasm)) { 821 /* 822 * Ok we have to move anything left 823 * on the control queue to a new 824 * control. 825 */ 826 sctp_build_readq_entry_from_ctl(nc, control); 827 tchk = TAILQ_FIRST(&control->reasm); 828 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 829 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 830 if (asoc->size_on_reasm_queue >= tchk->send_size) { 831 asoc->size_on_reasm_queue -= tchk->send_size; 832 } else { 833 #ifdef INVARIANTS 834 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 835 #else 836 asoc->size_on_reasm_queue = 0; 837 #endif 838 } 839 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 840 nc->first_frag_seen = 1; 841 nc->fsn_included = tchk->rec.data.fsn; 842 nc->data = tchk->data; 843 nc->sinfo_ppid = tchk->rec.data.ppid; 844 nc->sinfo_tsn = tchk->rec.data.tsn; 845 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 846 tchk->data = NULL; 847 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 848 sctp_setup_tail_pointer(nc); 849 tchk = TAILQ_FIRST(&control->reasm); 850 } 851 /* Spin the rest onto the queue */ 852 while (tchk) { 853 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 854 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 855 tchk = TAILQ_FIRST(&control->reasm); 856 } 857 /* 858 * Now lets add it to the queue 859 * after removing control 860 */ 861 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 862 nc->on_strm_q = SCTP_ON_UNORDERED; 863 if (control->on_strm_q) { 864 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 865 control->on_strm_q = 0; 866 } 867 } 868 if (control->pdapi_started) { 869 strm->pd_api_started = 0; 870 control->pdapi_started = 0; 871 } 872 if (control->on_strm_q) { 873 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 874 control->on_strm_q = 0; 875 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 876 } 877 if (control->on_read_q == 0) { 878 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 879 &stcb->sctp_socket->so_rcv, control->end_added, 880 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 881 } 882 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 883 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 884 /* 885 * Switch to the new guy and 886 * continue 887 */ 888 control = nc; 889 goto restart; 890 } else { 891 if (nc->on_strm_q == 0) { 892 sctp_free_a_readq(stcb, nc); 893 } 894 } 895 return (1); 896 } else { 897 sctp_free_a_readq(stcb, nc); 898 } 899 } else { 900 /* Can't add more */ 901 break; 902 } 903 } 904 if (cnt_added && strm->pd_api_started) { 905 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 906 } 907 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 908 strm->pd_api_started = 1; 909 control->pdapi_started = 1; 910 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 911 &stcb->sctp_socket->so_rcv, control->end_added, 912 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 913 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 914 return (0); 915 } else { 916 return (1); 917 } 918 } 919 920 static void 921 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 922 struct sctp_association *asoc, 923 struct sctp_queued_to_read *control, 924 struct sctp_tmit_chunk *chk, 925 int *abort_flag) 926 { 927 struct sctp_tmit_chunk *at; 928 int inserted; 929 930 /* 931 * Here we need to place the chunk into the control structure sorted 932 * in the correct order. 933 */ 934 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 935 /* Its the very first one. */ 936 SCTPDBG(SCTP_DEBUG_XXX, 937 "chunk is a first fsn: %u becomes fsn_included\n", 938 chk->rec.data.fsn); 939 at = TAILQ_FIRST(&control->reasm); 940 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { 941 /* 942 * The first chunk in the reassembly is a smaller 943 * TSN than this one, even though this has a first, 944 * it must be from a subsequent msg. 945 */ 946 goto place_chunk; 947 } 948 if (control->first_frag_seen) { 949 /* 950 * In old un-ordered we can reassembly on one 951 * control multiple messages. As long as the next 952 * FIRST is greater then the old first (TSN i.e. FSN 953 * wise) 954 */ 955 struct mbuf *tdata; 956 uint32_t tmp; 957 958 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 959 /* 960 * Easy way the start of a new guy beyond 961 * the lowest 962 */ 963 goto place_chunk; 964 } 965 if ((chk->rec.data.fsn == control->fsn_included) || 966 (control->pdapi_started)) { 967 /* 968 * Ok this should not happen, if it does we 969 * started the pd-api on the higher TSN 970 * (since the equals part is a TSN failure 971 * it must be that). 972 * 973 * We are completly hosed in that case since 974 * I have no way to recover. This really 975 * will only happen if we can get more TSN's 976 * higher before the pd-api-point. 977 */ 978 sctp_abort_in_reasm(stcb, control, chk, 979 abort_flag, 980 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 981 982 return; 983 } 984 /* 985 * Ok we have two firsts and the one we just got is 986 * smaller than the one we previously placed.. yuck! 987 * We must swap them out. 988 */ 989 /* swap the mbufs */ 990 tdata = control->data; 991 control->data = chk->data; 992 chk->data = tdata; 993 /* Save the lengths */ 994 chk->send_size = control->length; 995 /* Recompute length of control and tail pointer */ 996 sctp_setup_tail_pointer(control); 997 /* Fix the FSN included */ 998 tmp = control->fsn_included; 999 control->fsn_included = chk->rec.data.fsn; 1000 chk->rec.data.fsn = tmp; 1001 /* Fix the TSN included */ 1002 tmp = control->sinfo_tsn; 1003 control->sinfo_tsn = chk->rec.data.tsn; 1004 chk->rec.data.tsn = tmp; 1005 /* Fix the PPID included */ 1006 tmp = control->sinfo_ppid; 1007 control->sinfo_ppid = chk->rec.data.ppid; 1008 chk->rec.data.ppid = tmp; 1009 /* Fix tail pointer */ 1010 goto place_chunk; 1011 } 1012 control->first_frag_seen = 1; 1013 control->fsn_included = chk->rec.data.fsn; 1014 control->top_fsn = chk->rec.data.fsn; 1015 control->sinfo_tsn = chk->rec.data.tsn; 1016 control->sinfo_ppid = chk->rec.data.ppid; 1017 control->data = chk->data; 1018 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1019 chk->data = NULL; 1020 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1021 sctp_setup_tail_pointer(control); 1022 return; 1023 } 1024 place_chunk: 1025 inserted = 0; 1026 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1027 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1028 /* 1029 * This one in queue is bigger than the new one, 1030 * insert the new one before at. 1031 */ 1032 asoc->size_on_reasm_queue += chk->send_size; 1033 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1034 inserted = 1; 1035 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1036 break; 1037 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1038 /* 1039 * They sent a duplicate fsn number. This really 1040 * should not happen since the FSN is a TSN and it 1041 * should have been dropped earlier. 1042 */ 1043 sctp_abort_in_reasm(stcb, control, chk, 1044 abort_flag, 1045 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1046 return; 1047 } 1048 } 1049 if (inserted == 0) { 1050 /* Its at the end */ 1051 asoc->size_on_reasm_queue += chk->send_size; 1052 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1053 control->top_fsn = chk->rec.data.fsn; 1054 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1055 } 1056 } 1057 1058 static int 1059 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1060 struct sctp_stream_in *strm, int inp_read_lock_held) 1061 { 1062 /* 1063 * Given a stream, strm, see if any of the SSN's on it that are 1064 * fragmented are ready to deliver. If so go ahead and place them on 1065 * the read queue. In so placing if we have hit the end, then we 1066 * need to remove them from the stream's queue. 1067 */ 1068 struct sctp_queued_to_read *control, *nctl = NULL; 1069 uint32_t next_to_del; 1070 uint32_t pd_point; 1071 int ret = 0; 1072 1073 if (stcb->sctp_socket) { 1074 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1075 stcb->sctp_ep->partial_delivery_point); 1076 } else { 1077 pd_point = stcb->sctp_ep->partial_delivery_point; 1078 } 1079 control = TAILQ_FIRST(&strm->uno_inqueue); 1080 1081 if ((control != NULL) && 1082 (asoc->idata_supported == 0)) { 1083 /* Special handling needed for "old" data format */ 1084 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1085 goto done_un; 1086 } 1087 } 1088 if (strm->pd_api_started) { 1089 /* Can't add more */ 1090 return (0); 1091 } 1092 while (control) { 1093 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1094 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1095 nctl = TAILQ_NEXT(control, next_instrm); 1096 if (control->end_added) { 1097 /* We just put the last bit on */ 1098 if (control->on_strm_q) { 1099 #ifdef INVARIANTS 1100 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1101 panic("Huh control: %p on_q: %d -- not unordered?", 1102 control, control->on_strm_q); 1103 } 1104 #endif 1105 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1106 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1107 if (asoc->size_on_all_streams >= control->length) { 1108 asoc->size_on_all_streams -= control->length; 1109 } else { 1110 #ifdef INVARIANTS 1111 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1112 #else 1113 asoc->size_on_all_streams = 0; 1114 #endif 1115 } 1116 sctp_ucount_decr(asoc->cnt_on_all_streams); 1117 control->on_strm_q = 0; 1118 } 1119 if (control->on_read_q == 0) { 1120 sctp_add_to_readq(stcb->sctp_ep, stcb, 1121 control, 1122 &stcb->sctp_socket->so_rcv, control->end_added, 1123 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1124 } 1125 } else { 1126 /* Can we do a PD-API for this un-ordered guy? */ 1127 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1128 strm->pd_api_started = 1; 1129 control->pdapi_started = 1; 1130 sctp_add_to_readq(stcb->sctp_ep, stcb, 1131 control, 1132 &stcb->sctp_socket->so_rcv, control->end_added, 1133 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1134 1135 break; 1136 } 1137 } 1138 control = nctl; 1139 } 1140 done_un: 1141 control = TAILQ_FIRST(&strm->inqueue); 1142 if (strm->pd_api_started) { 1143 /* Can't add more */ 1144 return (0); 1145 } 1146 if (control == NULL) { 1147 return (ret); 1148 } 1149 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1150 /* 1151 * Ok the guy at the top was being partially delivered 1152 * completed, so we remove it. Note the pd_api flag was 1153 * taken off when the chunk was merged on in 1154 * sctp_queue_data_for_reasm below. 1155 */ 1156 nctl = TAILQ_NEXT(control, next_instrm); 1157 SCTPDBG(SCTP_DEBUG_XXX, 1158 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1159 control, control->end_added, control->mid, 1160 control->top_fsn, control->fsn_included, 1161 strm->last_mid_delivered); 1162 if (control->end_added) { 1163 if (control->on_strm_q) { 1164 #ifdef INVARIANTS 1165 if (control->on_strm_q != SCTP_ON_ORDERED) { 1166 panic("Huh control: %p on_q: %d -- not ordered?", 1167 control, control->on_strm_q); 1168 } 1169 #endif 1170 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1171 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1172 if (asoc->size_on_all_streams >= control->length) { 1173 asoc->size_on_all_streams -= control->length; 1174 } else { 1175 #ifdef INVARIANTS 1176 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1177 #else 1178 asoc->size_on_all_streams = 0; 1179 #endif 1180 } 1181 sctp_ucount_decr(asoc->cnt_on_all_streams); 1182 control->on_strm_q = 0; 1183 } 1184 if (strm->pd_api_started && control->pdapi_started) { 1185 control->pdapi_started = 0; 1186 strm->pd_api_started = 0; 1187 } 1188 if (control->on_read_q == 0) { 1189 sctp_add_to_readq(stcb->sctp_ep, stcb, 1190 control, 1191 &stcb->sctp_socket->so_rcv, control->end_added, 1192 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1193 } 1194 control = nctl; 1195 } 1196 } 1197 if (strm->pd_api_started) { 1198 /* 1199 * Can't add more must have gotten an un-ordered above being 1200 * partially delivered. 1201 */ 1202 return (0); 1203 } 1204 deliver_more: 1205 next_to_del = strm->last_mid_delivered + 1; 1206 if (control) { 1207 SCTPDBG(SCTP_DEBUG_XXX, 1208 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1209 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1210 next_to_del); 1211 nctl = TAILQ_NEXT(control, next_instrm); 1212 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1213 (control->first_frag_seen)) { 1214 int done; 1215 1216 /* Ok we can deliver it onto the stream. */ 1217 if (control->end_added) { 1218 /* We are done with it afterwards */ 1219 if (control->on_strm_q) { 1220 #ifdef INVARIANTS 1221 if (control->on_strm_q != SCTP_ON_ORDERED) { 1222 panic("Huh control: %p on_q: %d -- not ordered?", 1223 control, control->on_strm_q); 1224 } 1225 #endif 1226 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1227 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1228 if (asoc->size_on_all_streams >= control->length) { 1229 asoc->size_on_all_streams -= control->length; 1230 } else { 1231 #ifdef INVARIANTS 1232 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1233 #else 1234 asoc->size_on_all_streams = 0; 1235 #endif 1236 } 1237 sctp_ucount_decr(asoc->cnt_on_all_streams); 1238 control->on_strm_q = 0; 1239 } 1240 ret++; 1241 } 1242 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1243 /* 1244 * A singleton now slipping through - mark 1245 * it non-revokable too 1246 */ 1247 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1248 } else if (control->end_added == 0) { 1249 /* 1250 * Check if we can defer adding until its 1251 * all there 1252 */ 1253 if ((control->length < pd_point) || (strm->pd_api_started)) { 1254 /* 1255 * Don't need it or cannot add more 1256 * (one being delivered that way) 1257 */ 1258 goto out; 1259 } 1260 } 1261 done = (control->end_added) && (control->last_frag_seen); 1262 if (control->on_read_q == 0) { 1263 if (!done) { 1264 if (asoc->size_on_all_streams >= control->length) { 1265 asoc->size_on_all_streams -= control->length; 1266 } else { 1267 #ifdef INVARIANTS 1268 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1269 #else 1270 asoc->size_on_all_streams = 0; 1271 #endif 1272 } 1273 strm->pd_api_started = 1; 1274 control->pdapi_started = 1; 1275 } 1276 sctp_add_to_readq(stcb->sctp_ep, stcb, 1277 control, 1278 &stcb->sctp_socket->so_rcv, control->end_added, 1279 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1280 } 1281 strm->last_mid_delivered = next_to_del; 1282 if (done) { 1283 control = nctl; 1284 goto deliver_more; 1285 } 1286 } 1287 } 1288 out: 1289 return (ret); 1290 } 1291 1292 uint32_t 1293 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1294 struct sctp_stream_in *strm, 1295 struct sctp_tcb *stcb, struct sctp_association *asoc, 1296 struct sctp_tmit_chunk *chk, int hold_rlock) 1297 { 1298 /* 1299 * Given a control and a chunk, merge the data from the chk onto the 1300 * control and free up the chunk resources. 1301 */ 1302 uint32_t added = 0; 1303 int i_locked = 0; 1304 1305 if (control->on_read_q && (hold_rlock == 0)) { 1306 /* 1307 * Its being pd-api'd so we must do some locks. 1308 */ 1309 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1310 i_locked = 1; 1311 } 1312 if (control->data == NULL) { 1313 control->data = chk->data; 1314 sctp_setup_tail_pointer(control); 1315 } else { 1316 sctp_add_to_tail_pointer(control, chk->data, &added); 1317 } 1318 control->fsn_included = chk->rec.data.fsn; 1319 asoc->size_on_reasm_queue -= chk->send_size; 1320 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1321 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1322 chk->data = NULL; 1323 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1324 control->first_frag_seen = 1; 1325 control->sinfo_tsn = chk->rec.data.tsn; 1326 control->sinfo_ppid = chk->rec.data.ppid; 1327 } 1328 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1329 /* Its complete */ 1330 if ((control->on_strm_q) && (control->on_read_q)) { 1331 if (control->pdapi_started) { 1332 control->pdapi_started = 0; 1333 strm->pd_api_started = 0; 1334 } 1335 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1336 /* Unordered */ 1337 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1338 control->on_strm_q = 0; 1339 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1340 /* Ordered */ 1341 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1342 /* 1343 * Don't need to decrement 1344 * size_on_all_streams, since control is on 1345 * the read queue. 1346 */ 1347 sctp_ucount_decr(asoc->cnt_on_all_streams); 1348 control->on_strm_q = 0; 1349 #ifdef INVARIANTS 1350 } else if (control->on_strm_q) { 1351 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1352 control->on_strm_q); 1353 #endif 1354 } 1355 } 1356 control->end_added = 1; 1357 control->last_frag_seen = 1; 1358 } 1359 if (i_locked) { 1360 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1361 } 1362 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1363 return (added); 1364 } 1365 1366 /* 1367 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1368 * queue, see if anthing can be delivered. If so pull it off (or as much as 1369 * we can. If we run out of space then we must dump what we can and set the 1370 * appropriate flag to say we queued what we could. 1371 */ 1372 static void 1373 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1374 struct sctp_queued_to_read *control, 1375 struct sctp_tmit_chunk *chk, 1376 int created_control, 1377 int *abort_flag, uint32_t tsn) 1378 { 1379 uint32_t next_fsn; 1380 struct sctp_tmit_chunk *at, *nat; 1381 struct sctp_stream_in *strm; 1382 int do_wakeup, unordered; 1383 uint32_t lenadded; 1384 1385 strm = &asoc->strmin[control->sinfo_stream]; 1386 /* 1387 * For old un-ordered data chunks. 1388 */ 1389 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1390 unordered = 1; 1391 } else { 1392 unordered = 0; 1393 } 1394 /* Must be added to the stream-in queue */ 1395 if (created_control) { 1396 if ((unordered == 0) || (asoc->idata_supported)) { 1397 sctp_ucount_incr(asoc->cnt_on_all_streams); 1398 } 1399 if (sctp_place_control_in_stream(strm, asoc, control)) { 1400 /* Duplicate SSN? */ 1401 sctp_abort_in_reasm(stcb, control, chk, 1402 abort_flag, 1403 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1404 sctp_clean_up_control(stcb, control); 1405 return; 1406 } 1407 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1408 /* 1409 * Ok we created this control and now lets validate 1410 * that its legal i.e. there is a B bit set, if not 1411 * and we have up to the cum-ack then its invalid. 1412 */ 1413 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1414 sctp_abort_in_reasm(stcb, control, chk, 1415 abort_flag, 1416 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1417 return; 1418 } 1419 } 1420 } 1421 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1422 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1423 return; 1424 } 1425 /* 1426 * Ok we must queue the chunk into the reasembly portion: o if its 1427 * the first it goes to the control mbuf. o if its not first but the 1428 * next in sequence it goes to the control, and each succeeding one 1429 * in order also goes. o if its not in order we place it on the list 1430 * in its place. 1431 */ 1432 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1433 /* Its the very first one. */ 1434 SCTPDBG(SCTP_DEBUG_XXX, 1435 "chunk is a first fsn: %u becomes fsn_included\n", 1436 chk->rec.data.fsn); 1437 if (control->first_frag_seen) { 1438 /* 1439 * Error on senders part, they either sent us two 1440 * data chunks with FIRST, or they sent two 1441 * un-ordered chunks that were fragmented at the 1442 * same time in the same stream. 1443 */ 1444 sctp_abort_in_reasm(stcb, control, chk, 1445 abort_flag, 1446 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1447 return; 1448 } 1449 control->first_frag_seen = 1; 1450 control->sinfo_ppid = chk->rec.data.ppid; 1451 control->sinfo_tsn = chk->rec.data.tsn; 1452 control->fsn_included = chk->rec.data.fsn; 1453 control->data = chk->data; 1454 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1455 chk->data = NULL; 1456 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1457 sctp_setup_tail_pointer(control); 1458 asoc->size_on_all_streams += control->length; 1459 } else { 1460 /* Place the chunk in our list */ 1461 int inserted = 0; 1462 1463 if (control->last_frag_seen == 0) { 1464 /* Still willing to raise highest FSN seen */ 1465 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1466 SCTPDBG(SCTP_DEBUG_XXX, 1467 "We have a new top_fsn: %u\n", 1468 chk->rec.data.fsn); 1469 control->top_fsn = chk->rec.data.fsn; 1470 } 1471 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1472 SCTPDBG(SCTP_DEBUG_XXX, 1473 "The last fsn is now in place fsn: %u\n", 1474 chk->rec.data.fsn); 1475 control->last_frag_seen = 1; 1476 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) { 1477 SCTPDBG(SCTP_DEBUG_XXX, 1478 "New fsn: %u is not at top_fsn: %u -- abort\n", 1479 chk->rec.data.fsn, 1480 control->top_fsn); 1481 sctp_abort_in_reasm(stcb, control, chk, 1482 abort_flag, 1483 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1484 return; 1485 } 1486 } 1487 if (asoc->idata_supported || control->first_frag_seen) { 1488 /* 1489 * For IDATA we always check since we know 1490 * that the first fragment is 0. For old 1491 * DATA we have to receive the first before 1492 * we know the first FSN (which is the TSN). 1493 */ 1494 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1495 /* 1496 * We have already delivered up to 1497 * this so its a dup 1498 */ 1499 sctp_abort_in_reasm(stcb, control, chk, 1500 abort_flag, 1501 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1502 return; 1503 } 1504 } 1505 } else { 1506 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1507 /* Second last? huh? */ 1508 SCTPDBG(SCTP_DEBUG_XXX, 1509 "Duplicate last fsn: %u (top: %u) -- abort\n", 1510 chk->rec.data.fsn, control->top_fsn); 1511 sctp_abort_in_reasm(stcb, control, 1512 chk, abort_flag, 1513 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1514 return; 1515 } 1516 if (asoc->idata_supported || control->first_frag_seen) { 1517 /* 1518 * For IDATA we always check since we know 1519 * that the first fragment is 0. For old 1520 * DATA we have to receive the first before 1521 * we know the first FSN (which is the TSN). 1522 */ 1523 1524 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1525 /* 1526 * We have already delivered up to 1527 * this so its a dup 1528 */ 1529 SCTPDBG(SCTP_DEBUG_XXX, 1530 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1531 chk->rec.data.fsn, control->fsn_included); 1532 sctp_abort_in_reasm(stcb, control, chk, 1533 abort_flag, 1534 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1535 return; 1536 } 1537 } 1538 /* 1539 * validate not beyond top FSN if we have seen last 1540 * one 1541 */ 1542 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1543 SCTPDBG(SCTP_DEBUG_XXX, 1544 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1545 chk->rec.data.fsn, 1546 control->top_fsn); 1547 sctp_abort_in_reasm(stcb, control, chk, 1548 abort_flag, 1549 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1550 return; 1551 } 1552 } 1553 /* 1554 * If we reach here, we need to place the new chunk in the 1555 * reassembly for this control. 1556 */ 1557 SCTPDBG(SCTP_DEBUG_XXX, 1558 "chunk is a not first fsn: %u needs to be inserted\n", 1559 chk->rec.data.fsn); 1560 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1561 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1562 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1563 /* Last not at the end? huh? */ 1564 SCTPDBG(SCTP_DEBUG_XXX, 1565 "Last fragment not last in list: -- abort\n"); 1566 sctp_abort_in_reasm(stcb, control, 1567 chk, abort_flag, 1568 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1569 return; 1570 } 1571 /* 1572 * This one in queue is bigger than the new 1573 * one, insert the new one before at. 1574 */ 1575 SCTPDBG(SCTP_DEBUG_XXX, 1576 "Insert it before fsn: %u\n", 1577 at->rec.data.fsn); 1578 asoc->size_on_reasm_queue += chk->send_size; 1579 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1580 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1581 inserted = 1; 1582 break; 1583 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1584 /* 1585 * Gak, He sent me a duplicate str seq 1586 * number 1587 */ 1588 /* 1589 * foo bar, I guess I will just free this 1590 * new guy, should we abort too? FIX ME 1591 * MAYBE? Or it COULD be that the SSN's have 1592 * wrapped. Maybe I should compare to TSN 1593 * somehow... sigh for now just blow away 1594 * the chunk! 1595 */ 1596 SCTPDBG(SCTP_DEBUG_XXX, 1597 "Duplicate to fsn: %u -- abort\n", 1598 at->rec.data.fsn); 1599 sctp_abort_in_reasm(stcb, control, 1600 chk, abort_flag, 1601 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1602 return; 1603 } 1604 } 1605 if (inserted == 0) { 1606 /* Goes on the end */ 1607 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1608 chk->rec.data.fsn); 1609 asoc->size_on_reasm_queue += chk->send_size; 1610 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1611 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1612 } 1613 } 1614 /* 1615 * Ok lets see if we can suck any up into the control structure that 1616 * are in seq if it makes sense. 1617 */ 1618 do_wakeup = 0; 1619 /* 1620 * If the first fragment has not been seen there is no sense in 1621 * looking. 1622 */ 1623 if (control->first_frag_seen) { 1624 next_fsn = control->fsn_included + 1; 1625 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1626 if (at->rec.data.fsn == next_fsn) { 1627 /* We can add this one now to the control */ 1628 SCTPDBG(SCTP_DEBUG_XXX, 1629 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1630 control, at, 1631 at->rec.data.fsn, 1632 next_fsn, control->fsn_included); 1633 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1634 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1635 if (control->on_read_q) { 1636 do_wakeup = 1; 1637 } else { 1638 /* 1639 * We only add to the 1640 * size-on-all-streams if its not on 1641 * the read q. The read q flag will 1642 * cause a sballoc so its accounted 1643 * for there. 1644 */ 1645 asoc->size_on_all_streams += lenadded; 1646 } 1647 next_fsn++; 1648 if (control->end_added && control->pdapi_started) { 1649 if (strm->pd_api_started) { 1650 strm->pd_api_started = 0; 1651 control->pdapi_started = 0; 1652 } 1653 if (control->on_read_q == 0) { 1654 sctp_add_to_readq(stcb->sctp_ep, stcb, 1655 control, 1656 &stcb->sctp_socket->so_rcv, control->end_added, 1657 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1658 } 1659 break; 1660 } 1661 } else { 1662 break; 1663 } 1664 } 1665 } 1666 if (do_wakeup) { 1667 /* Need to wakeup the reader */ 1668 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1669 } 1670 } 1671 1672 static struct sctp_queued_to_read * 1673 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1674 { 1675 struct sctp_queued_to_read *control; 1676 1677 if (ordered) { 1678 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1679 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1680 break; 1681 } 1682 } 1683 } else { 1684 if (idata_supported) { 1685 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1686 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1687 break; 1688 } 1689 } 1690 } else { 1691 control = TAILQ_FIRST(&strm->uno_inqueue); 1692 } 1693 } 1694 return (control); 1695 } 1696 1697 static int 1698 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1699 struct mbuf **m, int offset, int chk_length, 1700 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1701 int *break_flag, int last_chunk, uint8_t chk_type) 1702 { 1703 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ 1704 struct sctp_stream_in *strm; 1705 uint32_t tsn, fsn, gap, mid; 1706 struct mbuf *dmbuf; 1707 int the_len; 1708 int need_reasm_check = 0; 1709 uint16_t sid; 1710 struct mbuf *op_err; 1711 char msg[SCTP_DIAG_INFO_LEN]; 1712 struct sctp_queued_to_read *control, *ncontrol; 1713 uint32_t ppid; 1714 uint8_t chk_flags; 1715 struct sctp_stream_reset_list *liste; 1716 int ordered; 1717 size_t clen; 1718 int created_control = 0; 1719 1720 if (chk_type == SCTP_IDATA) { 1721 struct sctp_idata_chunk *chunk, chunk_buf; 1722 1723 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1724 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1725 chk_flags = chunk->ch.chunk_flags; 1726 clen = sizeof(struct sctp_idata_chunk); 1727 tsn = ntohl(chunk->dp.tsn); 1728 sid = ntohs(chunk->dp.sid); 1729 mid = ntohl(chunk->dp.mid); 1730 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1731 fsn = 0; 1732 ppid = chunk->dp.ppid_fsn.ppid; 1733 } else { 1734 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1735 ppid = 0xffffffff; /* Use as an invalid value. */ 1736 } 1737 } else { 1738 struct sctp_data_chunk *chunk, chunk_buf; 1739 1740 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1741 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1742 chk_flags = chunk->ch.chunk_flags; 1743 clen = sizeof(struct sctp_data_chunk); 1744 tsn = ntohl(chunk->dp.tsn); 1745 sid = ntohs(chunk->dp.sid); 1746 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1747 fsn = tsn; 1748 ppid = chunk->dp.ppid; 1749 } 1750 if ((size_t)chk_length == clen) { 1751 /* 1752 * Need to send an abort since we had a empty data chunk. 1753 */ 1754 op_err = sctp_generate_no_user_data_cause(tsn); 1755 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1756 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1757 *abort_flag = 1; 1758 return (0); 1759 } 1760 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1761 asoc->send_sack = 1; 1762 } 1763 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1765 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1766 } 1767 if (stcb == NULL) { 1768 return (0); 1769 } 1770 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1771 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1772 /* It is a duplicate */ 1773 SCTP_STAT_INCR(sctps_recvdupdata); 1774 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1775 /* Record a dup for the next outbound sack */ 1776 asoc->dup_tsns[asoc->numduptsns] = tsn; 1777 asoc->numduptsns++; 1778 } 1779 asoc->send_sack = 1; 1780 return (0); 1781 } 1782 /* Calculate the number of TSN's between the base and this TSN */ 1783 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1784 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1785 /* Can't hold the bit in the mapping at max array, toss it */ 1786 return (0); 1787 } 1788 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1789 SCTP_TCB_LOCK_ASSERT(stcb); 1790 if (sctp_expand_mapping_array(asoc, gap)) { 1791 /* Can't expand, drop it */ 1792 return (0); 1793 } 1794 } 1795 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1796 *high_tsn = tsn; 1797 } 1798 /* See if we have received this one already */ 1799 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1800 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1801 SCTP_STAT_INCR(sctps_recvdupdata); 1802 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1803 /* Record a dup for the next outbound sack */ 1804 asoc->dup_tsns[asoc->numduptsns] = tsn; 1805 asoc->numduptsns++; 1806 } 1807 asoc->send_sack = 1; 1808 return (0); 1809 } 1810 /* 1811 * Check to see about the GONE flag, duplicates would cause a sack 1812 * to be sent up above 1813 */ 1814 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1815 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1816 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1817 /* 1818 * wait a minute, this guy is gone, there is no longer a 1819 * receiver. Send peer an ABORT! 1820 */ 1821 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1822 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1823 *abort_flag = 1; 1824 return (0); 1825 } 1826 /* 1827 * Now before going further we see if there is room. If NOT then we 1828 * MAY let one through only IF this TSN is the one we are waiting 1829 * for on a partial delivery API. 1830 */ 1831 1832 /* Is the stream valid? */ 1833 if (sid >= asoc->streamincnt) { 1834 struct sctp_error_invalid_stream *cause; 1835 1836 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1837 0, M_NOWAIT, 1, MT_DATA); 1838 if (op_err != NULL) { 1839 /* add some space up front so prepend will work well */ 1840 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1841 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1842 /* 1843 * Error causes are just param's and this one has 1844 * two back to back phdr, one with the error type 1845 * and size, the other with the streamid and a rsvd 1846 */ 1847 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1848 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1849 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1850 cause->stream_id = htons(sid); 1851 cause->reserved = htons(0); 1852 sctp_queue_op_err(stcb, op_err); 1853 } 1854 SCTP_STAT_INCR(sctps_badsid); 1855 SCTP_TCB_LOCK_ASSERT(stcb); 1856 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1857 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1858 asoc->highest_tsn_inside_nr_map = tsn; 1859 } 1860 if (tsn == (asoc->cumulative_tsn + 1)) { 1861 /* Update cum-ack */ 1862 asoc->cumulative_tsn = tsn; 1863 } 1864 return (0); 1865 } 1866 /* 1867 * If its a fragmented message, lets see if we can find the control 1868 * on the reassembly queues. 1869 */ 1870 if ((chk_type == SCTP_IDATA) && 1871 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1872 (fsn == 0)) { 1873 /* 1874 * The first *must* be fsn 0, and other (middle/end) pieces 1875 * can *not* be fsn 0. XXX: This can happen in case of a 1876 * wrap around. Ignore is for now. 1877 */ 1878 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags); 1879 goto err_out; 1880 } 1881 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1882 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1883 chk_flags, control); 1884 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1885 /* See if we can find the re-assembly entity */ 1886 if (control != NULL) { 1887 /* We found something, does it belong? */ 1888 if (ordered && (mid != control->mid)) { 1889 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1890 err_out: 1891 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1892 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 1893 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1894 *abort_flag = 1; 1895 return (0); 1896 } 1897 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1898 /* 1899 * We can't have a switched order with an 1900 * unordered chunk 1901 */ 1902 SCTP_SNPRINTF(msg, sizeof(msg), 1903 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1904 tsn); 1905 goto err_out; 1906 } 1907 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1908 /* 1909 * We can't have a switched unordered with a 1910 * ordered chunk 1911 */ 1912 SCTP_SNPRINTF(msg, sizeof(msg), 1913 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1914 tsn); 1915 goto err_out; 1916 } 1917 } 1918 } else { 1919 /* 1920 * Its a complete segment. Lets validate we don't have a 1921 * re-assembly going on with the same Stream/Seq (for 1922 * ordered) or in the same Stream for unordered. 1923 */ 1924 if (control != NULL) { 1925 if (ordered || asoc->idata_supported) { 1926 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1927 chk_flags, mid); 1928 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1929 goto err_out; 1930 } else { 1931 if ((tsn == control->fsn_included + 1) && 1932 (control->end_added == 0)) { 1933 SCTP_SNPRINTF(msg, sizeof(msg), 1934 "Illegal message sequence, missing end for MID: %8.8x", 1935 control->fsn_included); 1936 goto err_out; 1937 } else { 1938 control = NULL; 1939 } 1940 } 1941 } 1942 } 1943 /* now do the tests */ 1944 if (((asoc->cnt_on_all_streams + 1945 asoc->cnt_on_reasm_queue + 1946 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1947 (((int)asoc->my_rwnd) <= 0)) { 1948 /* 1949 * When we have NO room in the rwnd we check to make sure 1950 * the reader is doing its job... 1951 */ 1952 if (stcb->sctp_socket->so_rcv.sb_cc) { 1953 /* some to read, wake-up */ 1954 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1955 } 1956 /* now is it in the mapping array of what we have accepted? */ 1957 if (chk_type == SCTP_DATA) { 1958 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1959 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1960 /* Nope not in the valid range dump it */ 1961 dump_packet: 1962 sctp_set_rwnd(stcb, asoc); 1963 if ((asoc->cnt_on_all_streams + 1964 asoc->cnt_on_reasm_queue + 1965 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1966 SCTP_STAT_INCR(sctps_datadropchklmt); 1967 } else { 1968 SCTP_STAT_INCR(sctps_datadroprwnd); 1969 } 1970 *break_flag = 1; 1971 return (0); 1972 } 1973 } else { 1974 if (control == NULL) { 1975 goto dump_packet; 1976 } 1977 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1978 goto dump_packet; 1979 } 1980 } 1981 } 1982 #ifdef SCTP_ASOCLOG_OF_TSNS 1983 SCTP_TCB_LOCK_ASSERT(stcb); 1984 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1985 asoc->tsn_in_at = 0; 1986 asoc->tsn_in_wrapped = 1; 1987 } 1988 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1989 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 1990 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 1991 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1992 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1993 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1994 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1995 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1996 asoc->tsn_in_at++; 1997 #endif 1998 /* 1999 * Before we continue lets validate that we are not being fooled by 2000 * an evil attacker. We can only have Nk chunks based on our TSN 2001 * spread allowed by the mapping array N * 8 bits, so there is no 2002 * way our stream sequence numbers could have wrapped. We of course 2003 * only validate the FIRST fragment so the bit must be set. 2004 */ 2005 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 2006 (TAILQ_EMPTY(&asoc->resetHead)) && 2007 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 2008 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 2009 /* The incoming sseq is behind where we last delivered? */ 2010 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 2011 mid, asoc->strmin[sid].last_mid_delivered); 2012 2013 if (asoc->idata_supported) { 2014 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2015 asoc->strmin[sid].last_mid_delivered, 2016 tsn, 2017 sid, 2018 mid); 2019 } else { 2020 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2021 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2022 tsn, 2023 sid, 2024 (uint16_t)mid); 2025 } 2026 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2027 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2028 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 2029 *abort_flag = 1; 2030 return (0); 2031 } 2032 if (chk_type == SCTP_IDATA) { 2033 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2034 } else { 2035 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2036 } 2037 if (last_chunk == 0) { 2038 if (chk_type == SCTP_IDATA) { 2039 dmbuf = SCTP_M_COPYM(*m, 2040 (offset + sizeof(struct sctp_idata_chunk)), 2041 the_len, M_NOWAIT); 2042 } else { 2043 dmbuf = SCTP_M_COPYM(*m, 2044 (offset + sizeof(struct sctp_data_chunk)), 2045 the_len, M_NOWAIT); 2046 } 2047 #ifdef SCTP_MBUF_LOGGING 2048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2049 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2050 } 2051 #endif 2052 } else { 2053 /* We can steal the last chunk */ 2054 int l_len; 2055 2056 dmbuf = *m; 2057 /* lop off the top part */ 2058 if (chk_type == SCTP_IDATA) { 2059 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2060 } else { 2061 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2062 } 2063 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2064 l_len = SCTP_BUF_LEN(dmbuf); 2065 } else { 2066 /* 2067 * need to count up the size hopefully does not hit 2068 * this to often :-0 2069 */ 2070 struct mbuf *lat; 2071 2072 l_len = 0; 2073 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2074 l_len += SCTP_BUF_LEN(lat); 2075 } 2076 } 2077 if (l_len > the_len) { 2078 /* Trim the end round bytes off too */ 2079 m_adj(dmbuf, -(l_len - the_len)); 2080 } 2081 } 2082 if (dmbuf == NULL) { 2083 SCTP_STAT_INCR(sctps_nomem); 2084 return (0); 2085 } 2086 /* 2087 * Now no matter what, we need a control, get one if we don't have 2088 * one (we may have gotten it above when we found the message was 2089 * fragmented 2090 */ 2091 if (control == NULL) { 2092 sctp_alloc_a_readq(stcb, control); 2093 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2094 ppid, 2095 sid, 2096 chk_flags, 2097 NULL, fsn, mid); 2098 if (control == NULL) { 2099 SCTP_STAT_INCR(sctps_nomem); 2100 return (0); 2101 } 2102 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2103 struct mbuf *mm; 2104 2105 control->data = dmbuf; 2106 control->tail_mbuf = NULL; 2107 for (mm = control->data; mm; mm = mm->m_next) { 2108 control->length += SCTP_BUF_LEN(mm); 2109 if (SCTP_BUF_NEXT(mm) == NULL) { 2110 control->tail_mbuf = mm; 2111 } 2112 } 2113 control->end_added = 1; 2114 control->last_frag_seen = 1; 2115 control->first_frag_seen = 1; 2116 control->fsn_included = fsn; 2117 control->top_fsn = fsn; 2118 } 2119 created_control = 1; 2120 } 2121 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2122 chk_flags, ordered, mid, control); 2123 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2124 TAILQ_EMPTY(&asoc->resetHead) && 2125 ((ordered == 0) || 2126 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2127 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2128 /* Candidate for express delivery */ 2129 /* 2130 * Its not fragmented, No PD-API is up, Nothing in the 2131 * delivery queue, Its un-ordered OR ordered and the next to 2132 * deliver AND nothing else is stuck on the stream queue, 2133 * And there is room for it in the socket buffer. Lets just 2134 * stuff it up the buffer.... 2135 */ 2136 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2137 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2138 asoc->highest_tsn_inside_nr_map = tsn; 2139 } 2140 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2141 control, mid); 2142 2143 sctp_add_to_readq(stcb->sctp_ep, stcb, 2144 control, &stcb->sctp_socket->so_rcv, 2145 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2146 2147 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2148 /* for ordered, bump what we delivered */ 2149 asoc->strmin[sid].last_mid_delivered++; 2150 } 2151 SCTP_STAT_INCR(sctps_recvexpress); 2152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2153 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2154 SCTP_STR_LOG_FROM_EXPRS_DEL); 2155 } 2156 control = NULL; 2157 goto finish_express_del; 2158 } 2159 2160 /* Now will we need a chunk too? */ 2161 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2162 sctp_alloc_a_chunk(stcb, chk); 2163 if (chk == NULL) { 2164 /* No memory so we drop the chunk */ 2165 SCTP_STAT_INCR(sctps_nomem); 2166 if (last_chunk == 0) { 2167 /* we copied it, free the copy */ 2168 sctp_m_freem(dmbuf); 2169 } 2170 return (0); 2171 } 2172 chk->rec.data.tsn = tsn; 2173 chk->no_fr_allowed = 0; 2174 chk->rec.data.fsn = fsn; 2175 chk->rec.data.mid = mid; 2176 chk->rec.data.sid = sid; 2177 chk->rec.data.ppid = ppid; 2178 chk->rec.data.context = stcb->asoc.context; 2179 chk->rec.data.doing_fast_retransmit = 0; 2180 chk->rec.data.rcv_flags = chk_flags; 2181 chk->asoc = asoc; 2182 chk->send_size = the_len; 2183 chk->whoTo = net; 2184 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2185 chk, 2186 control, mid); 2187 atomic_add_int(&net->ref_count, 1); 2188 chk->data = dmbuf; 2189 } 2190 /* Set the appropriate TSN mark */ 2191 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2192 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2193 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2194 asoc->highest_tsn_inside_nr_map = tsn; 2195 } 2196 } else { 2197 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2198 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2199 asoc->highest_tsn_inside_map = tsn; 2200 } 2201 } 2202 /* Now is it complete (i.e. not fragmented)? */ 2203 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2204 /* 2205 * Special check for when streams are resetting. We could be 2206 * more smart about this and check the actual stream to see 2207 * if it is not being reset.. that way we would not create a 2208 * HOLB when amongst streams being reset and those not being 2209 * reset. 2210 * 2211 */ 2212 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2213 SCTP_TSN_GT(tsn, liste->tsn)) { 2214 /* 2215 * yep its past where we need to reset... go ahead 2216 * and queue it. 2217 */ 2218 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2219 /* first one on */ 2220 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2221 } else { 2222 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2223 unsigned char inserted = 0; 2224 2225 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2226 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2227 continue; 2228 } else { 2229 /* found it */ 2230 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2231 inserted = 1; 2232 break; 2233 } 2234 } 2235 if (inserted == 0) { 2236 /* 2237 * must be put at end, use prevP 2238 * (all setup from loop) to setup 2239 * nextP. 2240 */ 2241 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2242 } 2243 } 2244 goto finish_express_del; 2245 } 2246 if (chk_flags & SCTP_DATA_UNORDERED) { 2247 /* queue directly into socket buffer */ 2248 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2249 control, mid); 2250 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2251 sctp_add_to_readq(stcb->sctp_ep, stcb, 2252 control, 2253 &stcb->sctp_socket->so_rcv, 1, 2254 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2255 2256 } else { 2257 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2258 mid); 2259 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2260 if (*abort_flag) { 2261 if (last_chunk) { 2262 *m = NULL; 2263 } 2264 return (0); 2265 } 2266 } 2267 goto finish_express_del; 2268 } 2269 /* If we reach here its a reassembly */ 2270 need_reasm_check = 1; 2271 SCTPDBG(SCTP_DEBUG_XXX, 2272 "Queue data to stream for reasm control: %p MID: %u\n", 2273 control, mid); 2274 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2275 if (*abort_flag) { 2276 /* 2277 * the assoc is now gone and chk was put onto the reasm 2278 * queue, which has all been freed. 2279 */ 2280 if (last_chunk) { 2281 *m = NULL; 2282 } 2283 return (0); 2284 } 2285 finish_express_del: 2286 /* Here we tidy up things */ 2287 if (tsn == (asoc->cumulative_tsn + 1)) { 2288 /* Update cum-ack */ 2289 asoc->cumulative_tsn = tsn; 2290 } 2291 if (last_chunk) { 2292 *m = NULL; 2293 } 2294 if (ordered) { 2295 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2296 } else { 2297 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2298 } 2299 SCTP_STAT_INCR(sctps_recvdata); 2300 /* Set it present please */ 2301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2302 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2303 } 2304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2305 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2306 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2307 } 2308 if (need_reasm_check) { 2309 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2310 need_reasm_check = 0; 2311 } 2312 /* check the special flag for stream resets */ 2313 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2314 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2315 /* 2316 * we have finished working through the backlogged TSN's now 2317 * time to reset streams. 1: call reset function. 2: free 2318 * pending_reply space 3: distribute any chunks in 2319 * pending_reply_queue. 2320 */ 2321 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2322 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2323 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2324 SCTP_FREE(liste, SCTP_M_STRESET); 2325 /* sa_ignore FREED_MEMORY */ 2326 liste = TAILQ_FIRST(&asoc->resetHead); 2327 if (TAILQ_EMPTY(&asoc->resetHead)) { 2328 /* All can be removed */ 2329 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2330 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2331 strm = &asoc->strmin[control->sinfo_stream]; 2332 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2333 if (*abort_flag) { 2334 return (0); 2335 } 2336 if (need_reasm_check) { 2337 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2338 need_reasm_check = 0; 2339 } 2340 } 2341 } else { 2342 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2343 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2344 break; 2345 } 2346 /* 2347 * if control->sinfo_tsn is <= liste->tsn we 2348 * can process it which is the NOT of 2349 * control->sinfo_tsn > liste->tsn 2350 */ 2351 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2352 strm = &asoc->strmin[control->sinfo_stream]; 2353 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2354 if (*abort_flag) { 2355 return (0); 2356 } 2357 if (need_reasm_check) { 2358 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2359 need_reasm_check = 0; 2360 } 2361 } 2362 } 2363 } 2364 return (1); 2365 } 2366 2367 static const int8_t sctp_map_lookup_tab[256] = { 2368 0, 1, 0, 2, 0, 1, 0, 3, 2369 0, 1, 0, 2, 0, 1, 0, 4, 2370 0, 1, 0, 2, 0, 1, 0, 3, 2371 0, 1, 0, 2, 0, 1, 0, 5, 2372 0, 1, 0, 2, 0, 1, 0, 3, 2373 0, 1, 0, 2, 0, 1, 0, 4, 2374 0, 1, 0, 2, 0, 1, 0, 3, 2375 0, 1, 0, 2, 0, 1, 0, 6, 2376 0, 1, 0, 2, 0, 1, 0, 3, 2377 0, 1, 0, 2, 0, 1, 0, 4, 2378 0, 1, 0, 2, 0, 1, 0, 3, 2379 0, 1, 0, 2, 0, 1, 0, 5, 2380 0, 1, 0, 2, 0, 1, 0, 3, 2381 0, 1, 0, 2, 0, 1, 0, 4, 2382 0, 1, 0, 2, 0, 1, 0, 3, 2383 0, 1, 0, 2, 0, 1, 0, 7, 2384 0, 1, 0, 2, 0, 1, 0, 3, 2385 0, 1, 0, 2, 0, 1, 0, 4, 2386 0, 1, 0, 2, 0, 1, 0, 3, 2387 0, 1, 0, 2, 0, 1, 0, 5, 2388 0, 1, 0, 2, 0, 1, 0, 3, 2389 0, 1, 0, 2, 0, 1, 0, 4, 2390 0, 1, 0, 2, 0, 1, 0, 3, 2391 0, 1, 0, 2, 0, 1, 0, 6, 2392 0, 1, 0, 2, 0, 1, 0, 3, 2393 0, 1, 0, 2, 0, 1, 0, 4, 2394 0, 1, 0, 2, 0, 1, 0, 3, 2395 0, 1, 0, 2, 0, 1, 0, 5, 2396 0, 1, 0, 2, 0, 1, 0, 3, 2397 0, 1, 0, 2, 0, 1, 0, 4, 2398 0, 1, 0, 2, 0, 1, 0, 3, 2399 0, 1, 0, 2, 0, 1, 0, 8 2400 }; 2401 2402 void 2403 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2404 { 2405 /* 2406 * Now we also need to check the mapping array in a couple of ways. 2407 * 1) Did we move the cum-ack point? 2408 * 2409 * When you first glance at this you might think that all entries 2410 * that make up the position of the cum-ack would be in the 2411 * nr-mapping array only.. i.e. things up to the cum-ack are always 2412 * deliverable. Thats true with one exception, when its a fragmented 2413 * message we may not deliver the data until some threshold (or all 2414 * of it) is in place. So we must OR the nr_mapping_array and 2415 * mapping_array to get a true picture of the cum-ack. 2416 */ 2417 struct sctp_association *asoc; 2418 int at; 2419 uint8_t val; 2420 int slide_from, slide_end, lgap, distance; 2421 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2422 2423 asoc = &stcb->asoc; 2424 2425 old_cumack = asoc->cumulative_tsn; 2426 old_base = asoc->mapping_array_base_tsn; 2427 old_highest = asoc->highest_tsn_inside_map; 2428 /* 2429 * We could probably improve this a small bit by calculating the 2430 * offset of the current cum-ack as the starting point. 2431 */ 2432 at = 0; 2433 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2434 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2435 if (val == 0xff) { 2436 at += 8; 2437 } else { 2438 /* there is a 0 bit */ 2439 at += sctp_map_lookup_tab[val]; 2440 break; 2441 } 2442 } 2443 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2444 2445 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2446 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2447 #ifdef INVARIANTS 2448 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2449 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2450 #else 2451 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2452 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2453 sctp_print_mapping_array(asoc); 2454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2455 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2456 } 2457 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2458 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2459 #endif 2460 } 2461 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2462 highest_tsn = asoc->highest_tsn_inside_nr_map; 2463 } else { 2464 highest_tsn = asoc->highest_tsn_inside_map; 2465 } 2466 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2467 /* The complete array was completed by a single FR */ 2468 /* highest becomes the cum-ack */ 2469 int clr; 2470 #ifdef INVARIANTS 2471 unsigned int i; 2472 #endif 2473 2474 /* clear the array */ 2475 clr = ((at + 7) >> 3); 2476 if (clr > asoc->mapping_array_size) { 2477 clr = asoc->mapping_array_size; 2478 } 2479 memset(asoc->mapping_array, 0, clr); 2480 memset(asoc->nr_mapping_array, 0, clr); 2481 #ifdef INVARIANTS 2482 for (i = 0; i < asoc->mapping_array_size; i++) { 2483 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2484 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2485 sctp_print_mapping_array(asoc); 2486 } 2487 } 2488 #endif 2489 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2490 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2491 } else if (at >= 8) { 2492 /* we can slide the mapping array down */ 2493 /* slide_from holds where we hit the first NON 0xff byte */ 2494 2495 /* 2496 * now calculate the ceiling of the move using our highest 2497 * TSN value 2498 */ 2499 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2500 slide_end = (lgap >> 3); 2501 if (slide_end < slide_from) { 2502 sctp_print_mapping_array(asoc); 2503 #ifdef INVARIANTS 2504 panic("impossible slide"); 2505 #else 2506 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2507 lgap, slide_end, slide_from, at); 2508 return; 2509 #endif 2510 } 2511 if (slide_end > asoc->mapping_array_size) { 2512 #ifdef INVARIANTS 2513 panic("would overrun buffer"); 2514 #else 2515 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2516 asoc->mapping_array_size, slide_end); 2517 slide_end = asoc->mapping_array_size; 2518 #endif 2519 } 2520 distance = (slide_end - slide_from) + 1; 2521 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2522 sctp_log_map(old_base, old_cumack, old_highest, 2523 SCTP_MAP_PREPARE_SLIDE); 2524 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2525 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2526 } 2527 if (distance + slide_from > asoc->mapping_array_size || 2528 distance < 0) { 2529 /* 2530 * Here we do NOT slide forward the array so that 2531 * hopefully when more data comes in to fill it up 2532 * we will be able to slide it forward. Really I 2533 * don't think this should happen :-0 2534 */ 2535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2536 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2537 (uint32_t)asoc->mapping_array_size, 2538 SCTP_MAP_SLIDE_NONE); 2539 } 2540 } else { 2541 int ii; 2542 2543 for (ii = 0; ii < distance; ii++) { 2544 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2545 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2546 } 2547 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2548 asoc->mapping_array[ii] = 0; 2549 asoc->nr_mapping_array[ii] = 0; 2550 } 2551 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2552 asoc->highest_tsn_inside_map += (slide_from << 3); 2553 } 2554 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2555 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2556 } 2557 asoc->mapping_array_base_tsn += (slide_from << 3); 2558 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2559 sctp_log_map(asoc->mapping_array_base_tsn, 2560 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2561 SCTP_MAP_SLIDE_RESULT); 2562 } 2563 } 2564 } 2565 } 2566 2567 void 2568 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2569 { 2570 struct sctp_association *asoc; 2571 uint32_t highest_tsn; 2572 int is_a_gap; 2573 2574 sctp_slide_mapping_arrays(stcb); 2575 asoc = &stcb->asoc; 2576 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2577 highest_tsn = asoc->highest_tsn_inside_nr_map; 2578 } else { 2579 highest_tsn = asoc->highest_tsn_inside_map; 2580 } 2581 /* Is there a gap now? */ 2582 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2583 2584 /* 2585 * Now we need to see if we need to queue a sack or just start the 2586 * timer (if allowed). 2587 */ 2588 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2589 /* 2590 * Ok special case, in SHUTDOWN-SENT case. here we maker 2591 * sure SACK timer is off and instead send a SHUTDOWN and a 2592 * SACK 2593 */ 2594 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2595 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2596 stcb->sctp_ep, stcb, NULL, 2597 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2598 } 2599 sctp_send_shutdown(stcb, 2600 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2601 if (is_a_gap) { 2602 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2603 } 2604 } else { 2605 /* 2606 * CMT DAC algorithm: increase number of packets received 2607 * since last ack 2608 */ 2609 stcb->asoc.cmt_dac_pkts_rcvd++; 2610 2611 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2612 * SACK */ 2613 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2614 * longer is one */ 2615 (stcb->asoc.numduptsns) || /* we have dup's */ 2616 (is_a_gap) || /* is still a gap */ 2617 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2618 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */ 2619 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2620 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2621 (stcb->asoc.send_sack == 0) && 2622 (stcb->asoc.numduptsns == 0) && 2623 (stcb->asoc.delayed_ack) && 2624 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2625 /* 2626 * CMT DAC algorithm: With CMT, delay acks 2627 * even in the face of reordering. 2628 * Therefore, if acks that do not have to be 2629 * sent because of the above reasons, will 2630 * be delayed. That is, acks that would have 2631 * been sent due to gap reports will be 2632 * delayed with DAC. Start the delayed ack 2633 * timer. 2634 */ 2635 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2636 stcb->sctp_ep, stcb, NULL); 2637 } else { 2638 /* 2639 * Ok we must build a SACK since the timer 2640 * is pending, we got our first packet OR 2641 * there are gaps or duplicates. 2642 */ 2643 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 2644 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 2645 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2646 } 2647 } else { 2648 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2649 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2650 stcb->sctp_ep, stcb, NULL); 2651 } 2652 } 2653 } 2654 } 2655 2656 int 2657 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2658 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2659 struct sctp_nets *net, uint32_t *high_tsn) 2660 { 2661 struct sctp_chunkhdr *ch, chunk_buf; 2662 struct sctp_association *asoc; 2663 int num_chunks = 0; /* number of control chunks processed */ 2664 int stop_proc = 0; 2665 int break_flag, last_chunk; 2666 int abort_flag = 0, was_a_gap; 2667 struct mbuf *m; 2668 uint32_t highest_tsn; 2669 uint16_t chk_length; 2670 2671 /* set the rwnd */ 2672 sctp_set_rwnd(stcb, &stcb->asoc); 2673 2674 m = *mm; 2675 SCTP_TCB_LOCK_ASSERT(stcb); 2676 asoc = &stcb->asoc; 2677 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2678 highest_tsn = asoc->highest_tsn_inside_nr_map; 2679 } else { 2680 highest_tsn = asoc->highest_tsn_inside_map; 2681 } 2682 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2683 /* 2684 * setup where we got the last DATA packet from for any SACK that 2685 * may need to go out. Don't bump the net. This is done ONLY when a 2686 * chunk is assigned. 2687 */ 2688 asoc->last_data_chunk_from = net; 2689 2690 /*- 2691 * Now before we proceed we must figure out if this is a wasted 2692 * cluster... i.e. it is a small packet sent in and yet the driver 2693 * underneath allocated a full cluster for it. If so we must copy it 2694 * to a smaller mbuf and free up the cluster mbuf. This will help 2695 * with cluster starvation. 2696 */ 2697 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2698 /* we only handle mbufs that are singletons.. not chains */ 2699 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2700 if (m) { 2701 /* ok lets see if we can copy the data up */ 2702 caddr_t *from, *to; 2703 2704 /* get the pointers and copy */ 2705 to = mtod(m, caddr_t *); 2706 from = mtod((*mm), caddr_t *); 2707 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2708 /* copy the length and free up the old */ 2709 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2710 sctp_m_freem(*mm); 2711 /* success, back copy */ 2712 *mm = m; 2713 } else { 2714 /* We are in trouble in the mbuf world .. yikes */ 2715 m = *mm; 2716 } 2717 } 2718 /* get pointer to the first chunk header */ 2719 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2720 sizeof(struct sctp_chunkhdr), 2721 (uint8_t *)&chunk_buf); 2722 if (ch == NULL) { 2723 return (1); 2724 } 2725 /* 2726 * process all DATA chunks... 2727 */ 2728 *high_tsn = asoc->cumulative_tsn; 2729 break_flag = 0; 2730 asoc->data_pkts_seen++; 2731 while (stop_proc == 0) { 2732 /* validate chunk length */ 2733 chk_length = ntohs(ch->chunk_length); 2734 if (length - *offset < chk_length) { 2735 /* all done, mutulated chunk */ 2736 stop_proc = 1; 2737 continue; 2738 } 2739 if ((asoc->idata_supported == 1) && 2740 (ch->chunk_type == SCTP_DATA)) { 2741 struct mbuf *op_err; 2742 char msg[SCTP_DIAG_INFO_LEN]; 2743 2744 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2745 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2746 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 2747 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2748 return (2); 2749 } 2750 if ((asoc->idata_supported == 0) && 2751 (ch->chunk_type == SCTP_IDATA)) { 2752 struct mbuf *op_err; 2753 char msg[SCTP_DIAG_INFO_LEN]; 2754 2755 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2756 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2757 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22; 2758 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2759 return (2); 2760 } 2761 if ((ch->chunk_type == SCTP_DATA) || 2762 (ch->chunk_type == SCTP_IDATA)) { 2763 uint16_t clen; 2764 2765 if (ch->chunk_type == SCTP_DATA) { 2766 clen = sizeof(struct sctp_data_chunk); 2767 } else { 2768 clen = sizeof(struct sctp_idata_chunk); 2769 } 2770 if (chk_length < clen) { 2771 /* 2772 * Need to send an abort since we had a 2773 * invalid data chunk. 2774 */ 2775 struct mbuf *op_err; 2776 char msg[SCTP_DIAG_INFO_LEN]; 2777 2778 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u", 2779 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2780 chk_length); 2781 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2782 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23; 2783 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2784 return (2); 2785 } 2786 #ifdef SCTP_AUDITING_ENABLED 2787 sctp_audit_log(0xB1, 0); 2788 #endif 2789 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2790 last_chunk = 1; 2791 } else { 2792 last_chunk = 0; 2793 } 2794 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2795 chk_length, net, high_tsn, &abort_flag, &break_flag, 2796 last_chunk, ch->chunk_type)) { 2797 num_chunks++; 2798 } 2799 if (abort_flag) 2800 return (2); 2801 2802 if (break_flag) { 2803 /* 2804 * Set because of out of rwnd space and no 2805 * drop rep space left. 2806 */ 2807 stop_proc = 1; 2808 continue; 2809 } 2810 } else { 2811 /* not a data chunk in the data region */ 2812 switch (ch->chunk_type) { 2813 case SCTP_INITIATION: 2814 case SCTP_INITIATION_ACK: 2815 case SCTP_SELECTIVE_ACK: 2816 case SCTP_NR_SELECTIVE_ACK: 2817 case SCTP_HEARTBEAT_REQUEST: 2818 case SCTP_HEARTBEAT_ACK: 2819 case SCTP_ABORT_ASSOCIATION: 2820 case SCTP_SHUTDOWN: 2821 case SCTP_SHUTDOWN_ACK: 2822 case SCTP_OPERATION_ERROR: 2823 case SCTP_COOKIE_ECHO: 2824 case SCTP_COOKIE_ACK: 2825 case SCTP_ECN_ECHO: 2826 case SCTP_ECN_CWR: 2827 case SCTP_SHUTDOWN_COMPLETE: 2828 case SCTP_AUTHENTICATION: 2829 case SCTP_ASCONF_ACK: 2830 case SCTP_PACKET_DROPPED: 2831 case SCTP_STREAM_RESET: 2832 case SCTP_FORWARD_CUM_TSN: 2833 case SCTP_ASCONF: 2834 { 2835 /* 2836 * Now, what do we do with KNOWN 2837 * chunks that are NOT in the right 2838 * place? 2839 * 2840 * For now, I do nothing but ignore 2841 * them. We may later want to add 2842 * sysctl stuff to switch out and do 2843 * either an ABORT() or possibly 2844 * process them. 2845 */ 2846 struct mbuf *op_err; 2847 char msg[SCTP_DIAG_INFO_LEN]; 2848 2849 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2850 ch->chunk_type); 2851 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2852 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2853 return (2); 2854 } 2855 default: 2856 /* 2857 * Unknown chunk type: use bit rules after 2858 * checking length 2859 */ 2860 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2861 /* 2862 * Need to send an abort since we 2863 * had a invalid chunk. 2864 */ 2865 struct mbuf *op_err; 2866 char msg[SCTP_DIAG_INFO_LEN]; 2867 2868 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length); 2869 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2870 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 2871 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2872 return (2); 2873 } 2874 if (ch->chunk_type & 0x40) { 2875 /* Add a error report to the queue */ 2876 struct mbuf *op_err; 2877 struct sctp_gen_error_cause *cause; 2878 2879 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2880 0, M_NOWAIT, 1, MT_DATA); 2881 if (op_err != NULL) { 2882 cause = mtod(op_err, struct sctp_gen_error_cause *); 2883 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2884 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2885 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2886 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2887 if (SCTP_BUF_NEXT(op_err) != NULL) { 2888 sctp_queue_op_err(stcb, op_err); 2889 } else { 2890 sctp_m_freem(op_err); 2891 } 2892 } 2893 } 2894 if ((ch->chunk_type & 0x80) == 0) { 2895 /* discard the rest of this packet */ 2896 stop_proc = 1; 2897 } /* else skip this bad chunk and 2898 * continue... */ 2899 break; 2900 } /* switch of chunk type */ 2901 } 2902 *offset += SCTP_SIZE32(chk_length); 2903 if ((*offset >= length) || stop_proc) { 2904 /* no more data left in the mbuf chain */ 2905 stop_proc = 1; 2906 continue; 2907 } 2908 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2909 sizeof(struct sctp_chunkhdr), 2910 (uint8_t *)&chunk_buf); 2911 if (ch == NULL) { 2912 *offset = length; 2913 stop_proc = 1; 2914 continue; 2915 } 2916 } 2917 if (break_flag) { 2918 /* 2919 * we need to report rwnd overrun drops. 2920 */ 2921 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2922 } 2923 if (num_chunks) { 2924 /* 2925 * Did we get data, if so update the time for auto-close and 2926 * give peer credit for being alive. 2927 */ 2928 SCTP_STAT_INCR(sctps_recvpktwithdata); 2929 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2930 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2931 stcb->asoc.overall_error_count, 2932 0, 2933 SCTP_FROM_SCTP_INDATA, 2934 __LINE__); 2935 } 2936 stcb->asoc.overall_error_count = 0; 2937 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2938 } 2939 /* now service all of the reassm queue if needed */ 2940 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2941 /* Assure that we ack right away */ 2942 stcb->asoc.send_sack = 1; 2943 } 2944 /* Start a sack timer or QUEUE a SACK for sending */ 2945 sctp_sack_check(stcb, was_a_gap); 2946 return (0); 2947 } 2948 2949 static int 2950 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2951 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2952 int *num_frs, 2953 uint32_t *biggest_newly_acked_tsn, 2954 uint32_t *this_sack_lowest_newack, 2955 int *rto_ok) 2956 { 2957 struct sctp_tmit_chunk *tp1; 2958 unsigned int theTSN; 2959 int j, wake_him = 0, circled = 0; 2960 2961 /* Recover the tp1 we last saw */ 2962 tp1 = *p_tp1; 2963 if (tp1 == NULL) { 2964 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2965 } 2966 for (j = frag_strt; j <= frag_end; j++) { 2967 theTSN = j + last_tsn; 2968 while (tp1) { 2969 if (tp1->rec.data.doing_fast_retransmit) 2970 (*num_frs) += 1; 2971 2972 /*- 2973 * CMT: CUCv2 algorithm. For each TSN being 2974 * processed from the sent queue, track the 2975 * next expected pseudo-cumack, or 2976 * rtx_pseudo_cumack, if required. Separate 2977 * cumack trackers for first transmissions, 2978 * and retransmissions. 2979 */ 2980 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2981 (tp1->whoTo->find_pseudo_cumack == 1) && 2982 (tp1->snd_count == 1)) { 2983 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 2984 tp1->whoTo->find_pseudo_cumack = 0; 2985 } 2986 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2987 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2988 (tp1->snd_count > 1)) { 2989 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 2990 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2991 } 2992 if (tp1->rec.data.tsn == theTSN) { 2993 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2994 /*- 2995 * must be held until 2996 * cum-ack passes 2997 */ 2998 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2999 /*- 3000 * If it is less than RESEND, it is 3001 * now no-longer in flight. 3002 * Higher values may already be set 3003 * via previous Gap Ack Blocks... 3004 * i.e. ACKED or RESEND. 3005 */ 3006 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3007 *biggest_newly_acked_tsn)) { 3008 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 3009 } 3010 /*- 3011 * CMT: SFR algo (and HTNA) - set 3012 * saw_newack to 1 for dest being 3013 * newly acked. update 3014 * this_sack_highest_newack if 3015 * appropriate. 3016 */ 3017 if (tp1->rec.data.chunk_was_revoked == 0) 3018 tp1->whoTo->saw_newack = 1; 3019 3020 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3021 tp1->whoTo->this_sack_highest_newack)) { 3022 tp1->whoTo->this_sack_highest_newack = 3023 tp1->rec.data.tsn; 3024 } 3025 /*- 3026 * CMT DAC algo: also update 3027 * this_sack_lowest_newack 3028 */ 3029 if (*this_sack_lowest_newack == 0) { 3030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3031 sctp_log_sack(*this_sack_lowest_newack, 3032 last_tsn, 3033 tp1->rec.data.tsn, 3034 0, 3035 0, 3036 SCTP_LOG_TSN_ACKED); 3037 } 3038 *this_sack_lowest_newack = tp1->rec.data.tsn; 3039 } 3040 /*- 3041 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3042 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3043 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3044 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3045 * Separate pseudo_cumack trackers for first transmissions and 3046 * retransmissions. 3047 */ 3048 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3049 if (tp1->rec.data.chunk_was_revoked == 0) { 3050 tp1->whoTo->new_pseudo_cumack = 1; 3051 } 3052 tp1->whoTo->find_pseudo_cumack = 1; 3053 } 3054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3055 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3056 } 3057 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3058 if (tp1->rec.data.chunk_was_revoked == 0) { 3059 tp1->whoTo->new_pseudo_cumack = 1; 3060 } 3061 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3062 } 3063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3064 sctp_log_sack(*biggest_newly_acked_tsn, 3065 last_tsn, 3066 tp1->rec.data.tsn, 3067 frag_strt, 3068 frag_end, 3069 SCTP_LOG_TSN_ACKED); 3070 } 3071 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3072 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3073 tp1->whoTo->flight_size, 3074 tp1->book_size, 3075 (uint32_t)(uintptr_t)tp1->whoTo, 3076 tp1->rec.data.tsn); 3077 } 3078 sctp_flight_size_decrease(tp1); 3079 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3080 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3081 tp1); 3082 } 3083 sctp_total_flight_decrease(stcb, tp1); 3084 3085 tp1->whoTo->net_ack += tp1->send_size; 3086 if (tp1->snd_count < 2) { 3087 /*- 3088 * True non-retransmitted chunk 3089 */ 3090 tp1->whoTo->net_ack2 += tp1->send_size; 3091 3092 /*- 3093 * update RTO too ? 3094 */ 3095 if (tp1->do_rtt) { 3096 if (*rto_ok && 3097 sctp_calculate_rto(stcb, 3098 &stcb->asoc, 3099 tp1->whoTo, 3100 &tp1->sent_rcv_time, 3101 SCTP_RTT_FROM_DATA)) { 3102 *rto_ok = 0; 3103 } 3104 if (tp1->whoTo->rto_needed == 0) { 3105 tp1->whoTo->rto_needed = 1; 3106 } 3107 tp1->do_rtt = 0; 3108 } 3109 } 3110 } 3111 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3112 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3113 stcb->asoc.this_sack_highest_gap)) { 3114 stcb->asoc.this_sack_highest_gap = 3115 tp1->rec.data.tsn; 3116 } 3117 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3118 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3119 #ifdef SCTP_AUDITING_ENABLED 3120 sctp_audit_log(0xB2, 3121 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3122 #endif 3123 } 3124 } 3125 /*- 3126 * All chunks NOT UNSENT fall through here and are marked 3127 * (leave PR-SCTP ones that are to skip alone though) 3128 */ 3129 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3130 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3131 tp1->sent = SCTP_DATAGRAM_MARKED; 3132 } 3133 if (tp1->rec.data.chunk_was_revoked) { 3134 /* deflate the cwnd */ 3135 tp1->whoTo->cwnd -= tp1->book_size; 3136 tp1->rec.data.chunk_was_revoked = 0; 3137 } 3138 /* NR Sack code here */ 3139 if (nr_sacking && 3140 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3141 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3142 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3143 #ifdef INVARIANTS 3144 } else { 3145 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3146 #endif 3147 } 3148 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3149 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3150 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3151 stcb->asoc.trigger_reset = 1; 3152 } 3153 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3154 if (tp1->data) { 3155 /* 3156 * sa_ignore 3157 * NO_NULL_CHK 3158 */ 3159 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3160 sctp_m_freem(tp1->data); 3161 tp1->data = NULL; 3162 } 3163 wake_him++; 3164 } 3165 } 3166 break; 3167 } /* if (tp1->tsn == theTSN) */ 3168 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3169 break; 3170 } 3171 tp1 = TAILQ_NEXT(tp1, sctp_next); 3172 if ((tp1 == NULL) && (circled == 0)) { 3173 circled++; 3174 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3175 } 3176 } /* end while (tp1) */ 3177 if (tp1 == NULL) { 3178 circled = 0; 3179 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3180 } 3181 /* In case the fragments were not in order we must reset */ 3182 } /* end for (j = fragStart */ 3183 *p_tp1 = tp1; 3184 return (wake_him); /* Return value only used for nr-sack */ 3185 } 3186 3187 static int 3188 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3189 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3190 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3191 int num_seg, int num_nr_seg, int *rto_ok) 3192 { 3193 struct sctp_gap_ack_block *frag, block; 3194 struct sctp_tmit_chunk *tp1; 3195 int i; 3196 int num_frs = 0; 3197 int chunk_freed; 3198 int non_revocable; 3199 uint16_t frag_strt, frag_end, prev_frag_end; 3200 3201 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3202 prev_frag_end = 0; 3203 chunk_freed = 0; 3204 3205 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3206 if (i == num_seg) { 3207 prev_frag_end = 0; 3208 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3209 } 3210 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3211 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3212 *offset += sizeof(block); 3213 if (frag == NULL) { 3214 return (chunk_freed); 3215 } 3216 frag_strt = ntohs(frag->start); 3217 frag_end = ntohs(frag->end); 3218 3219 if (frag_strt > frag_end) { 3220 /* This gap report is malformed, skip it. */ 3221 continue; 3222 } 3223 if (frag_strt <= prev_frag_end) { 3224 /* This gap report is not in order, so restart. */ 3225 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3226 } 3227 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3228 *biggest_tsn_acked = last_tsn + frag_end; 3229 } 3230 if (i < num_seg) { 3231 non_revocable = 0; 3232 } else { 3233 non_revocable = 1; 3234 } 3235 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3236 non_revocable, &num_frs, biggest_newly_acked_tsn, 3237 this_sack_lowest_newack, rto_ok)) { 3238 chunk_freed = 1; 3239 } 3240 prev_frag_end = frag_end; 3241 } 3242 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3243 if (num_frs) 3244 sctp_log_fr(*biggest_tsn_acked, 3245 *biggest_newly_acked_tsn, 3246 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3247 } 3248 return (chunk_freed); 3249 } 3250 3251 static void 3252 sctp_check_for_revoked(struct sctp_tcb *stcb, 3253 struct sctp_association *asoc, uint32_t cumack, 3254 uint32_t biggest_tsn_acked) 3255 { 3256 struct sctp_tmit_chunk *tp1; 3257 3258 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3259 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3260 /* 3261 * ok this guy is either ACK or MARKED. If it is 3262 * ACKED it has been previously acked but not this 3263 * time i.e. revoked. If it is MARKED it was ACK'ed 3264 * again. 3265 */ 3266 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3267 break; 3268 } 3269 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3270 /* it has been revoked */ 3271 tp1->sent = SCTP_DATAGRAM_SENT; 3272 tp1->rec.data.chunk_was_revoked = 1; 3273 /* 3274 * We must add this stuff back in to assure 3275 * timers and such get started. 3276 */ 3277 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3278 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3279 tp1->whoTo->flight_size, 3280 tp1->book_size, 3281 (uint32_t)(uintptr_t)tp1->whoTo, 3282 tp1->rec.data.tsn); 3283 } 3284 sctp_flight_size_increase(tp1); 3285 sctp_total_flight_increase(stcb, tp1); 3286 /* 3287 * We inflate the cwnd to compensate for our 3288 * artificial inflation of the flight_size. 3289 */ 3290 tp1->whoTo->cwnd += tp1->book_size; 3291 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3292 sctp_log_sack(asoc->last_acked_seq, 3293 cumack, 3294 tp1->rec.data.tsn, 3295 0, 3296 0, 3297 SCTP_LOG_TSN_REVOKED); 3298 } 3299 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3300 /* it has been re-acked in this SACK */ 3301 tp1->sent = SCTP_DATAGRAM_ACKED; 3302 } 3303 } 3304 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3305 break; 3306 } 3307 } 3308 3309 static void 3310 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3311 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3312 { 3313 struct sctp_tmit_chunk *tp1; 3314 int strike_flag = 0; 3315 struct timeval now; 3316 int tot_retrans = 0; 3317 uint32_t sending_seq; 3318 struct sctp_nets *net; 3319 int num_dests_sacked = 0; 3320 3321 /* 3322 * select the sending_seq, this is either the next thing ready to be 3323 * sent but not transmitted, OR, the next seq we assign. 3324 */ 3325 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3326 if (tp1 == NULL) { 3327 sending_seq = asoc->sending_seq; 3328 } else { 3329 sending_seq = tp1->rec.data.tsn; 3330 } 3331 3332 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3333 if ((asoc->sctp_cmt_on_off > 0) && 3334 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3335 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3336 if (net->saw_newack) 3337 num_dests_sacked++; 3338 } 3339 } 3340 if (stcb->asoc.prsctp_supported) { 3341 (void)SCTP_GETTIME_TIMEVAL(&now); 3342 } 3343 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3344 strike_flag = 0; 3345 if (tp1->no_fr_allowed) { 3346 /* this one had a timeout or something */ 3347 continue; 3348 } 3349 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3350 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3351 sctp_log_fr(biggest_tsn_newly_acked, 3352 tp1->rec.data.tsn, 3353 tp1->sent, 3354 SCTP_FR_LOG_CHECK_STRIKE); 3355 } 3356 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3357 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3358 /* done */ 3359 break; 3360 } 3361 if (stcb->asoc.prsctp_supported) { 3362 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3363 /* Is it expired? */ 3364 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3365 /* Yes so drop it */ 3366 if (tp1->data != NULL) { 3367 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3368 SCTP_SO_NOT_LOCKED); 3369 } 3370 continue; 3371 } 3372 } 3373 } 3374 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3375 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3376 /* we are beyond the tsn in the sack */ 3377 break; 3378 } 3379 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3380 /* either a RESEND, ACKED, or MARKED */ 3381 /* skip */ 3382 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3383 /* Continue strikin FWD-TSN chunks */ 3384 tp1->rec.data.fwd_tsn_cnt++; 3385 } 3386 continue; 3387 } 3388 /* 3389 * CMT : SFR algo (covers part of DAC and HTNA as well) 3390 */ 3391 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3392 /* 3393 * No new acks were receieved for data sent to this 3394 * dest. Therefore, according to the SFR algo for 3395 * CMT, no data sent to this dest can be marked for 3396 * FR using this SACK. 3397 */ 3398 continue; 3399 } else if (tp1->whoTo && 3400 SCTP_TSN_GT(tp1->rec.data.tsn, 3401 tp1->whoTo->this_sack_highest_newack) && 3402 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3403 /* 3404 * CMT: New acks were receieved for data sent to 3405 * this dest. But no new acks were seen for data 3406 * sent after tp1. Therefore, according to the SFR 3407 * algo for CMT, tp1 cannot be marked for FR using 3408 * this SACK. This step covers part of the DAC algo 3409 * and the HTNA algo as well. 3410 */ 3411 continue; 3412 } 3413 /* 3414 * Here we check to see if we were have already done a FR 3415 * and if so we see if the biggest TSN we saw in the sack is 3416 * smaller than the recovery point. If so we don't strike 3417 * the tsn... otherwise we CAN strike the TSN. 3418 */ 3419 /* 3420 * @@@ JRI: Check for CMT if (accum_moved && 3421 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3422 * 0)) { 3423 */ 3424 if (accum_moved && asoc->fast_retran_loss_recovery) { 3425 /* 3426 * Strike the TSN if in fast-recovery and cum-ack 3427 * moved. 3428 */ 3429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3430 sctp_log_fr(biggest_tsn_newly_acked, 3431 tp1->rec.data.tsn, 3432 tp1->sent, 3433 SCTP_FR_LOG_STRIKE_CHUNK); 3434 } 3435 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3436 tp1->sent++; 3437 } 3438 if ((asoc->sctp_cmt_on_off > 0) && 3439 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3440 /* 3441 * CMT DAC algorithm: If SACK flag is set to 3442 * 0, then lowest_newack test will not pass 3443 * because it would have been set to the 3444 * cumack earlier. If not already to be 3445 * rtx'd, If not a mixed sack and if tp1 is 3446 * not between two sacked TSNs, then mark by 3447 * one more. NOTE that we are marking by one 3448 * additional time since the SACK DAC flag 3449 * indicates that two packets have been 3450 * received after this missing TSN. 3451 */ 3452 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3453 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3455 sctp_log_fr(16 + num_dests_sacked, 3456 tp1->rec.data.tsn, 3457 tp1->sent, 3458 SCTP_FR_LOG_STRIKE_CHUNK); 3459 } 3460 tp1->sent++; 3461 } 3462 } 3463 } else if ((tp1->rec.data.doing_fast_retransmit) && 3464 (asoc->sctp_cmt_on_off == 0)) { 3465 /* 3466 * For those that have done a FR we must take 3467 * special consideration if we strike. I.e the 3468 * biggest_newly_acked must be higher than the 3469 * sending_seq at the time we did the FR. 3470 */ 3471 if ( 3472 #ifdef SCTP_FR_TO_ALTERNATE 3473 /* 3474 * If FR's go to new networks, then we must only do 3475 * this for singly homed asoc's. However if the FR's 3476 * go to the same network (Armando's work) then its 3477 * ok to FR multiple times. 3478 */ 3479 (asoc->numnets < 2) 3480 #else 3481 (1) 3482 #endif 3483 ) { 3484 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3485 tp1->rec.data.fast_retran_tsn)) { 3486 /* 3487 * Strike the TSN, since this ack is 3488 * beyond where things were when we 3489 * did a FR. 3490 */ 3491 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3492 sctp_log_fr(biggest_tsn_newly_acked, 3493 tp1->rec.data.tsn, 3494 tp1->sent, 3495 SCTP_FR_LOG_STRIKE_CHUNK); 3496 } 3497 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3498 tp1->sent++; 3499 } 3500 strike_flag = 1; 3501 if ((asoc->sctp_cmt_on_off > 0) && 3502 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3503 /* 3504 * CMT DAC algorithm: If 3505 * SACK flag is set to 0, 3506 * then lowest_newack test 3507 * will not pass because it 3508 * would have been set to 3509 * the cumack earlier. If 3510 * not already to be rtx'd, 3511 * If not a mixed sack and 3512 * if tp1 is not between two 3513 * sacked TSNs, then mark by 3514 * one more. NOTE that we 3515 * are marking by one 3516 * additional time since the 3517 * SACK DAC flag indicates 3518 * that two packets have 3519 * been received after this 3520 * missing TSN. 3521 */ 3522 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3523 (num_dests_sacked == 1) && 3524 SCTP_TSN_GT(this_sack_lowest_newack, 3525 tp1->rec.data.tsn)) { 3526 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3527 sctp_log_fr(32 + num_dests_sacked, 3528 tp1->rec.data.tsn, 3529 tp1->sent, 3530 SCTP_FR_LOG_STRIKE_CHUNK); 3531 } 3532 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3533 tp1->sent++; 3534 } 3535 } 3536 } 3537 } 3538 } 3539 /* 3540 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3541 * algo covers HTNA. 3542 */ 3543 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3544 biggest_tsn_newly_acked)) { 3545 /* 3546 * We don't strike these: This is the HTNA 3547 * algorithm i.e. we don't strike If our TSN is 3548 * larger than the Highest TSN Newly Acked. 3549 */ 3550 ; 3551 } else { 3552 /* Strike the TSN */ 3553 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3554 sctp_log_fr(biggest_tsn_newly_acked, 3555 tp1->rec.data.tsn, 3556 tp1->sent, 3557 SCTP_FR_LOG_STRIKE_CHUNK); 3558 } 3559 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3560 tp1->sent++; 3561 } 3562 if ((asoc->sctp_cmt_on_off > 0) && 3563 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3564 /* 3565 * CMT DAC algorithm: If SACK flag is set to 3566 * 0, then lowest_newack test will not pass 3567 * because it would have been set to the 3568 * cumack earlier. If not already to be 3569 * rtx'd, If not a mixed sack and if tp1 is 3570 * not between two sacked TSNs, then mark by 3571 * one more. NOTE that we are marking by one 3572 * additional time since the SACK DAC flag 3573 * indicates that two packets have been 3574 * received after this missing TSN. 3575 */ 3576 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3577 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3579 sctp_log_fr(48 + num_dests_sacked, 3580 tp1->rec.data.tsn, 3581 tp1->sent, 3582 SCTP_FR_LOG_STRIKE_CHUNK); 3583 } 3584 tp1->sent++; 3585 } 3586 } 3587 } 3588 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3589 struct sctp_nets *alt; 3590 3591 /* fix counts and things */ 3592 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3593 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3594 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3595 tp1->book_size, 3596 (uint32_t)(uintptr_t)tp1->whoTo, 3597 tp1->rec.data.tsn); 3598 } 3599 if (tp1->whoTo) { 3600 tp1->whoTo->net_ack++; 3601 sctp_flight_size_decrease(tp1); 3602 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3603 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3604 tp1); 3605 } 3606 } 3607 3608 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3609 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3610 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3611 } 3612 /* add back to the rwnd */ 3613 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3614 3615 /* remove from the total flight */ 3616 sctp_total_flight_decrease(stcb, tp1); 3617 3618 if ((stcb->asoc.prsctp_supported) && 3619 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3620 /* 3621 * Has it been retransmitted tv_sec times? - 3622 * we store the retran count there. 3623 */ 3624 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3625 /* Yes, so drop it */ 3626 if (tp1->data != NULL) { 3627 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3628 SCTP_SO_NOT_LOCKED); 3629 } 3630 /* Make sure to flag we had a FR */ 3631 if (tp1->whoTo != NULL) { 3632 tp1->whoTo->net_ack++; 3633 } 3634 continue; 3635 } 3636 } 3637 /* 3638 * SCTP_PRINTF("OK, we are now ready to FR this 3639 * guy\n"); 3640 */ 3641 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3642 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3643 0, SCTP_FR_MARKED); 3644 } 3645 if (strike_flag) { 3646 /* This is a subsequent FR */ 3647 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3648 } 3649 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3650 if (asoc->sctp_cmt_on_off > 0) { 3651 /* 3652 * CMT: Using RTX_SSTHRESH policy for CMT. 3653 * If CMT is being used, then pick dest with 3654 * largest ssthresh for any retransmission. 3655 */ 3656 tp1->no_fr_allowed = 1; 3657 alt = tp1->whoTo; 3658 /* sa_ignore NO_NULL_CHK */ 3659 if (asoc->sctp_cmt_pf > 0) { 3660 /* 3661 * JRS 5/18/07 - If CMT PF is on, 3662 * use the PF version of 3663 * find_alt_net() 3664 */ 3665 alt = sctp_find_alternate_net(stcb, alt, 2); 3666 } else { 3667 /* 3668 * JRS 5/18/07 - If only CMT is on, 3669 * use the CMT version of 3670 * find_alt_net() 3671 */ 3672 /* sa_ignore NO_NULL_CHK */ 3673 alt = sctp_find_alternate_net(stcb, alt, 1); 3674 } 3675 if (alt == NULL) { 3676 alt = tp1->whoTo; 3677 } 3678 /* 3679 * CUCv2: If a different dest is picked for 3680 * the retransmission, then new 3681 * (rtx-)pseudo_cumack needs to be tracked 3682 * for orig dest. Let CUCv2 track new (rtx-) 3683 * pseudo-cumack always. 3684 */ 3685 if (tp1->whoTo) { 3686 tp1->whoTo->find_pseudo_cumack = 1; 3687 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3688 } 3689 } else { /* CMT is OFF */ 3690 #ifdef SCTP_FR_TO_ALTERNATE 3691 /* Can we find an alternate? */ 3692 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3693 #else 3694 /* 3695 * default behavior is to NOT retransmit 3696 * FR's to an alternate. Armando Caro's 3697 * paper details why. 3698 */ 3699 alt = tp1->whoTo; 3700 #endif 3701 } 3702 3703 tp1->rec.data.doing_fast_retransmit = 1; 3704 tot_retrans++; 3705 /* mark the sending seq for possible subsequent FR's */ 3706 /* 3707 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3708 * (uint32_t)tpi->rec.data.tsn); 3709 */ 3710 if (TAILQ_EMPTY(&asoc->send_queue)) { 3711 /* 3712 * If the queue of send is empty then its 3713 * the next sequence number that will be 3714 * assigned so we subtract one from this to 3715 * get the one we last sent. 3716 */ 3717 tp1->rec.data.fast_retran_tsn = sending_seq; 3718 } else { 3719 /* 3720 * If there are chunks on the send queue 3721 * (unsent data that has made it from the 3722 * stream queues but not out the door, we 3723 * take the first one (which will have the 3724 * lowest TSN) and subtract one to get the 3725 * one we last sent. 3726 */ 3727 struct sctp_tmit_chunk *ttt; 3728 3729 ttt = TAILQ_FIRST(&asoc->send_queue); 3730 tp1->rec.data.fast_retran_tsn = 3731 ttt->rec.data.tsn; 3732 } 3733 3734 if (tp1->do_rtt) { 3735 /* 3736 * this guy had a RTO calculation pending on 3737 * it, cancel it 3738 */ 3739 if ((tp1->whoTo != NULL) && 3740 (tp1->whoTo->rto_needed == 0)) { 3741 tp1->whoTo->rto_needed = 1; 3742 } 3743 tp1->do_rtt = 0; 3744 } 3745 if (alt != tp1->whoTo) { 3746 /* yes, there is an alternate. */ 3747 sctp_free_remote_addr(tp1->whoTo); 3748 /* sa_ignore FREED_MEMORY */ 3749 tp1->whoTo = alt; 3750 atomic_add_int(&alt->ref_count, 1); 3751 } 3752 } 3753 } 3754 } 3755 3756 struct sctp_tmit_chunk * 3757 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3758 struct sctp_association *asoc) 3759 { 3760 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3761 struct timeval now; 3762 int now_filled = 0; 3763 3764 if (asoc->prsctp_supported == 0) { 3765 return (NULL); 3766 } 3767 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3768 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3769 tp1->sent != SCTP_DATAGRAM_RESEND && 3770 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3771 /* no chance to advance, out of here */ 3772 break; 3773 } 3774 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3775 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3776 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3777 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3778 asoc->advanced_peer_ack_point, 3779 tp1->rec.data.tsn, 0, 0); 3780 } 3781 } 3782 if (!PR_SCTP_ENABLED(tp1->flags)) { 3783 /* 3784 * We can't fwd-tsn past any that are reliable aka 3785 * retransmitted until the asoc fails. 3786 */ 3787 break; 3788 } 3789 if (!now_filled) { 3790 (void)SCTP_GETTIME_TIMEVAL(&now); 3791 now_filled = 1; 3792 } 3793 /* 3794 * now we got a chunk which is marked for another 3795 * retransmission to a PR-stream but has run out its chances 3796 * already maybe OR has been marked to skip now. Can we skip 3797 * it if its a resend? 3798 */ 3799 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3800 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3801 /* 3802 * Now is this one marked for resend and its time is 3803 * now up? 3804 */ 3805 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3806 /* Yes so drop it */ 3807 if (tp1->data) { 3808 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3809 1, SCTP_SO_NOT_LOCKED); 3810 } 3811 } else { 3812 /* 3813 * No, we are done when hit one for resend 3814 * whos time as not expired. 3815 */ 3816 break; 3817 } 3818 } 3819 /* 3820 * Ok now if this chunk is marked to drop it we can clean up 3821 * the chunk, advance our peer ack point and we can check 3822 * the next chunk. 3823 */ 3824 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3825 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3826 /* advance PeerAckPoint goes forward */ 3827 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3828 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3829 a_adv = tp1; 3830 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3831 /* No update but we do save the chk */ 3832 a_adv = tp1; 3833 } 3834 } else { 3835 /* 3836 * If it is still in RESEND we can advance no 3837 * further 3838 */ 3839 break; 3840 } 3841 } 3842 return (a_adv); 3843 } 3844 3845 static int 3846 sctp_fs_audit(struct sctp_association *asoc) 3847 { 3848 struct sctp_tmit_chunk *chk; 3849 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3850 int ret; 3851 #ifndef INVARIANTS 3852 int entry_flight, entry_cnt; 3853 #endif 3854 3855 ret = 0; 3856 #ifndef INVARIANTS 3857 entry_flight = asoc->total_flight; 3858 entry_cnt = asoc->total_flight_count; 3859 #endif 3860 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3861 return (0); 3862 3863 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3864 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3865 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3866 chk->rec.data.tsn, 3867 chk->send_size, 3868 chk->snd_count); 3869 inflight++; 3870 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3871 resend++; 3872 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3873 inbetween++; 3874 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3875 above++; 3876 } else { 3877 acked++; 3878 } 3879 } 3880 3881 if ((inflight > 0) || (inbetween > 0)) { 3882 #ifdef INVARIANTS 3883 panic("Flight size-express incorrect? \n"); 3884 #else 3885 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3886 entry_flight, entry_cnt); 3887 3888 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3889 inflight, inbetween, resend, above, acked); 3890 ret = 1; 3891 #endif 3892 } 3893 return (ret); 3894 } 3895 3896 static void 3897 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3898 struct sctp_association *asoc, 3899 struct sctp_tmit_chunk *tp1) 3900 { 3901 tp1->window_probe = 0; 3902 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3903 /* TSN's skipped we do NOT move back. */ 3904 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3905 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3906 tp1->book_size, 3907 (uint32_t)(uintptr_t)tp1->whoTo, 3908 tp1->rec.data.tsn); 3909 return; 3910 } 3911 /* First setup this by shrinking flight */ 3912 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3913 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3914 tp1); 3915 } 3916 sctp_flight_size_decrease(tp1); 3917 sctp_total_flight_decrease(stcb, tp1); 3918 /* Now mark for resend */ 3919 tp1->sent = SCTP_DATAGRAM_RESEND; 3920 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3921 3922 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3923 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3924 tp1->whoTo->flight_size, 3925 tp1->book_size, 3926 (uint32_t)(uintptr_t)tp1->whoTo, 3927 tp1->rec.data.tsn); 3928 } 3929 } 3930 3931 void 3932 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3933 uint32_t rwnd, int *abort_now, int ecne_seen) 3934 { 3935 struct sctp_nets *net; 3936 struct sctp_association *asoc; 3937 struct sctp_tmit_chunk *tp1, *tp2; 3938 uint32_t old_rwnd; 3939 int win_probe_recovery = 0; 3940 int win_probe_recovered = 0; 3941 int j, done_once = 0; 3942 int rto_ok = 1; 3943 uint32_t send_s; 3944 3945 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3946 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3947 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3948 } 3949 SCTP_TCB_LOCK_ASSERT(stcb); 3950 #ifdef SCTP_ASOCLOG_OF_TSNS 3951 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3952 stcb->asoc.cumack_log_at++; 3953 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3954 stcb->asoc.cumack_log_at = 0; 3955 } 3956 #endif 3957 asoc = &stcb->asoc; 3958 old_rwnd = asoc->peers_rwnd; 3959 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3960 /* old ack */ 3961 return; 3962 } else if (asoc->last_acked_seq == cumack) { 3963 /* Window update sack */ 3964 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3965 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3966 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3967 /* SWS sender side engages */ 3968 asoc->peers_rwnd = 0; 3969 } 3970 if (asoc->peers_rwnd > old_rwnd) { 3971 goto again; 3972 } 3973 return; 3974 } 3975 3976 /* First setup for CC stuff */ 3977 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3978 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3979 /* Drag along the window_tsn for cwr's */ 3980 net->cwr_window_tsn = cumack; 3981 } 3982 net->prev_cwnd = net->cwnd; 3983 net->net_ack = 0; 3984 net->net_ack2 = 0; 3985 3986 /* 3987 * CMT: Reset CUC and Fast recovery algo variables before 3988 * SACK processing 3989 */ 3990 net->new_pseudo_cumack = 0; 3991 net->will_exit_fast_recovery = 0; 3992 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3993 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3994 } 3995 } 3996 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3997 tp1 = TAILQ_LAST(&asoc->sent_queue, 3998 sctpchunk_listhead); 3999 send_s = tp1->rec.data.tsn + 1; 4000 } else { 4001 send_s = asoc->sending_seq; 4002 } 4003 if (SCTP_TSN_GE(cumack, send_s)) { 4004 struct mbuf *op_err; 4005 char msg[SCTP_DIAG_INFO_LEN]; 4006 4007 *abort_now = 1; 4008 /* XXX */ 4009 SCTP_SNPRINTF(msg, sizeof(msg), 4010 "Cum ack %8.8x greater or equal than TSN %8.8x", 4011 cumack, send_s); 4012 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4013 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4014 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4015 return; 4016 } 4017 asoc->this_sack_highest_gap = cumack; 4018 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4019 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4020 stcb->asoc.overall_error_count, 4021 0, 4022 SCTP_FROM_SCTP_INDATA, 4023 __LINE__); 4024 } 4025 stcb->asoc.overall_error_count = 0; 4026 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4027 /* process the new consecutive TSN first */ 4028 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4029 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4030 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4031 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4032 } 4033 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4034 /* 4035 * If it is less than ACKED, it is 4036 * now no-longer in flight. Higher 4037 * values may occur during marking 4038 */ 4039 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4040 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4041 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4042 tp1->whoTo->flight_size, 4043 tp1->book_size, 4044 (uint32_t)(uintptr_t)tp1->whoTo, 4045 tp1->rec.data.tsn); 4046 } 4047 sctp_flight_size_decrease(tp1); 4048 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4049 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4050 tp1); 4051 } 4052 /* sa_ignore NO_NULL_CHK */ 4053 sctp_total_flight_decrease(stcb, tp1); 4054 } 4055 tp1->whoTo->net_ack += tp1->send_size; 4056 if (tp1->snd_count < 2) { 4057 /* 4058 * True non-retransmitted 4059 * chunk 4060 */ 4061 tp1->whoTo->net_ack2 += 4062 tp1->send_size; 4063 4064 /* update RTO too? */ 4065 if (tp1->do_rtt) { 4066 if (rto_ok && 4067 sctp_calculate_rto(stcb, 4068 &stcb->asoc, 4069 tp1->whoTo, 4070 &tp1->sent_rcv_time, 4071 SCTP_RTT_FROM_DATA)) { 4072 rto_ok = 0; 4073 } 4074 if (tp1->whoTo->rto_needed == 0) { 4075 tp1->whoTo->rto_needed = 1; 4076 } 4077 tp1->do_rtt = 0; 4078 } 4079 } 4080 /* 4081 * CMT: CUCv2 algorithm. From the 4082 * cumack'd TSNs, for each TSN being 4083 * acked for the first time, set the 4084 * following variables for the 4085 * corresp destination. 4086 * new_pseudo_cumack will trigger a 4087 * cwnd update. 4088 * find_(rtx_)pseudo_cumack will 4089 * trigger search for the next 4090 * expected (rtx-)pseudo-cumack. 4091 */ 4092 tp1->whoTo->new_pseudo_cumack = 1; 4093 tp1->whoTo->find_pseudo_cumack = 1; 4094 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4096 /* sa_ignore NO_NULL_CHK */ 4097 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4098 } 4099 } 4100 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4101 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4102 } 4103 if (tp1->rec.data.chunk_was_revoked) { 4104 /* deflate the cwnd */ 4105 tp1->whoTo->cwnd -= tp1->book_size; 4106 tp1->rec.data.chunk_was_revoked = 0; 4107 } 4108 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4109 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4110 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4111 #ifdef INVARIANTS 4112 } else { 4113 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4114 #endif 4115 } 4116 } 4117 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4118 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4119 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4120 asoc->trigger_reset = 1; 4121 } 4122 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4123 if (tp1->data) { 4124 /* sa_ignore NO_NULL_CHK */ 4125 sctp_free_bufspace(stcb, asoc, tp1, 1); 4126 sctp_m_freem(tp1->data); 4127 tp1->data = NULL; 4128 } 4129 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4130 sctp_log_sack(asoc->last_acked_seq, 4131 cumack, 4132 tp1->rec.data.tsn, 4133 0, 4134 0, 4135 SCTP_LOG_FREE_SENT); 4136 } 4137 asoc->sent_queue_cnt--; 4138 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4139 } else { 4140 break; 4141 } 4142 } 4143 } 4144 /* sa_ignore NO_NULL_CHK */ 4145 if (stcb->sctp_socket) { 4146 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4147 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4148 /* sa_ignore NO_NULL_CHK */ 4149 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4150 } 4151 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4152 } else { 4153 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4154 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4155 } 4156 } 4157 4158 /* JRS - Use the congestion control given in the CC module */ 4159 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4160 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4161 if (net->net_ack2 > 0) { 4162 /* 4163 * Karn's rule applies to clearing error 4164 * count, this is optional. 4165 */ 4166 net->error_count = 0; 4167 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4168 /* addr came good */ 4169 net->dest_state |= SCTP_ADDR_REACHABLE; 4170 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4171 0, (void *)net, SCTP_SO_NOT_LOCKED); 4172 } 4173 if (net == stcb->asoc.primary_destination) { 4174 if (stcb->asoc.alternate) { 4175 /* 4176 * release the alternate, 4177 * primary is good 4178 */ 4179 sctp_free_remote_addr(stcb->asoc.alternate); 4180 stcb->asoc.alternate = NULL; 4181 } 4182 } 4183 if (net->dest_state & SCTP_ADDR_PF) { 4184 net->dest_state &= ~SCTP_ADDR_PF; 4185 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4186 stcb->sctp_ep, stcb, net, 4187 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4188 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4189 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4190 /* Done with this net */ 4191 net->net_ack = 0; 4192 } 4193 /* restore any doubled timers */ 4194 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4195 if (net->RTO < stcb->asoc.minrto) { 4196 net->RTO = stcb->asoc.minrto; 4197 } 4198 if (net->RTO > stcb->asoc.maxrto) { 4199 net->RTO = stcb->asoc.maxrto; 4200 } 4201 } 4202 } 4203 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4204 } 4205 asoc->last_acked_seq = cumack; 4206 4207 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4208 /* nothing left in-flight */ 4209 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4210 net->flight_size = 0; 4211 net->partial_bytes_acked = 0; 4212 } 4213 asoc->total_flight = 0; 4214 asoc->total_flight_count = 0; 4215 } 4216 4217 /* RWND update */ 4218 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4219 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4220 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4221 /* SWS sender side engages */ 4222 asoc->peers_rwnd = 0; 4223 } 4224 if (asoc->peers_rwnd > old_rwnd) { 4225 win_probe_recovery = 1; 4226 } 4227 /* Now assure a timer where data is queued at */ 4228 again: 4229 j = 0; 4230 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4231 if (win_probe_recovery && (net->window_probe)) { 4232 win_probe_recovered = 1; 4233 /* 4234 * Find first chunk that was used with window probe 4235 * and clear the sent 4236 */ 4237 /* sa_ignore FREED_MEMORY */ 4238 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4239 if (tp1->window_probe) { 4240 /* move back to data send queue */ 4241 sctp_window_probe_recovery(stcb, asoc, tp1); 4242 break; 4243 } 4244 } 4245 } 4246 if (net->flight_size) { 4247 j++; 4248 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4249 if (net->window_probe) { 4250 net->window_probe = 0; 4251 } 4252 } else { 4253 if (net->window_probe) { 4254 /* 4255 * In window probes we must assure a timer 4256 * is still running there 4257 */ 4258 net->window_probe = 0; 4259 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4260 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4261 } 4262 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4263 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4264 stcb, net, 4265 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4266 } 4267 } 4268 } 4269 if ((j == 0) && 4270 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4271 (asoc->sent_queue_retran_cnt == 0) && 4272 (win_probe_recovered == 0) && 4273 (done_once == 0)) { 4274 /* 4275 * huh, this should not happen unless all packets are 4276 * PR-SCTP and marked to skip of course. 4277 */ 4278 if (sctp_fs_audit(asoc)) { 4279 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4280 net->flight_size = 0; 4281 } 4282 asoc->total_flight = 0; 4283 asoc->total_flight_count = 0; 4284 asoc->sent_queue_retran_cnt = 0; 4285 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4286 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4287 sctp_flight_size_increase(tp1); 4288 sctp_total_flight_increase(stcb, tp1); 4289 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4290 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4291 } 4292 } 4293 } 4294 done_once = 1; 4295 goto again; 4296 } 4297 /**********************************/ 4298 /* Now what about shutdown issues */ 4299 /**********************************/ 4300 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4301 /* nothing left on sendqueue.. consider done */ 4302 /* clean up */ 4303 if ((asoc->stream_queue_cnt == 1) && 4304 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4305 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4306 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4307 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4308 } 4309 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4310 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4311 (asoc->stream_queue_cnt == 1) && 4312 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4313 struct mbuf *op_err; 4314 4315 *abort_now = 1; 4316 /* XXX */ 4317 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4318 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28; 4319 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4320 return; 4321 } 4322 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4323 (asoc->stream_queue_cnt == 0)) { 4324 struct sctp_nets *netp; 4325 4326 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4327 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4328 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4329 } 4330 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4331 sctp_stop_timers_for_shutdown(stcb); 4332 if (asoc->alternate) { 4333 netp = asoc->alternate; 4334 } else { 4335 netp = asoc->primary_destination; 4336 } 4337 sctp_send_shutdown(stcb, netp); 4338 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4339 stcb->sctp_ep, stcb, netp); 4340 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4341 stcb->sctp_ep, stcb, NULL); 4342 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4343 (asoc->stream_queue_cnt == 0)) { 4344 struct sctp_nets *netp; 4345 4346 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4347 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 4348 sctp_stop_timers_for_shutdown(stcb); 4349 if (asoc->alternate) { 4350 netp = asoc->alternate; 4351 } else { 4352 netp = asoc->primary_destination; 4353 } 4354 sctp_send_shutdown_ack(stcb, netp); 4355 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4356 stcb->sctp_ep, stcb, netp); 4357 } 4358 } 4359 /*********************************************/ 4360 /* Here we perform PR-SCTP procedures */ 4361 /* (section 4.2) */ 4362 /*********************************************/ 4363 /* C1. update advancedPeerAckPoint */ 4364 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4365 asoc->advanced_peer_ack_point = cumack; 4366 } 4367 /* PR-Sctp issues need to be addressed too */ 4368 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4369 struct sctp_tmit_chunk *lchk; 4370 uint32_t old_adv_peer_ack_point; 4371 4372 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4373 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4374 /* C3. See if we need to send a Fwd-TSN */ 4375 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4376 /* 4377 * ISSUE with ECN, see FWD-TSN processing. 4378 */ 4379 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4380 send_forward_tsn(stcb, asoc); 4381 } else if (lchk) { 4382 /* try to FR fwd-tsn's that get lost too */ 4383 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4384 send_forward_tsn(stcb, asoc); 4385 } 4386 } 4387 } 4388 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 4389 if (lchk->whoTo != NULL) { 4390 break; 4391 } 4392 } 4393 if (lchk != NULL) { 4394 /* Assure a timer is up */ 4395 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4396 stcb->sctp_ep, stcb, lchk->whoTo); 4397 } 4398 } 4399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4400 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4401 rwnd, 4402 stcb->asoc.peers_rwnd, 4403 stcb->asoc.total_flight, 4404 stcb->asoc.total_output_queue_size); 4405 } 4406 } 4407 4408 void 4409 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4410 struct sctp_tcb *stcb, 4411 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4412 int *abort_now, uint8_t flags, 4413 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4414 { 4415 struct sctp_association *asoc; 4416 struct sctp_tmit_chunk *tp1, *tp2; 4417 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4418 uint16_t wake_him = 0; 4419 uint32_t send_s = 0; 4420 long j; 4421 int accum_moved = 0; 4422 int will_exit_fast_recovery = 0; 4423 uint32_t a_rwnd, old_rwnd; 4424 int win_probe_recovery = 0; 4425 int win_probe_recovered = 0; 4426 struct sctp_nets *net = NULL; 4427 int done_once; 4428 int rto_ok = 1; 4429 uint8_t reneged_all = 0; 4430 uint8_t cmt_dac_flag; 4431 4432 /* 4433 * we take any chance we can to service our queues since we cannot 4434 * get awoken when the socket is read from :< 4435 */ 4436 /* 4437 * Now perform the actual SACK handling: 1) Verify that it is not an 4438 * old sack, if so discard. 2) If there is nothing left in the send 4439 * queue (cum-ack is equal to last acked) then you have a duplicate 4440 * too, update any rwnd change and verify no timers are running. 4441 * then return. 3) Process any new consequtive data i.e. cum-ack 4442 * moved process these first and note that it moved. 4) Process any 4443 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4444 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4445 * sync up flightsizes and things, stop all timers and also check 4446 * for shutdown_pending state. If so then go ahead and send off the 4447 * shutdown. If in shutdown recv, send off the shutdown-ack and 4448 * start that timer, Ret. 9) Strike any non-acked things and do FR 4449 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4450 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4451 * if in shutdown_recv state. 4452 */ 4453 SCTP_TCB_LOCK_ASSERT(stcb); 4454 /* CMT DAC algo */ 4455 this_sack_lowest_newack = 0; 4456 SCTP_STAT_INCR(sctps_slowpath_sack); 4457 last_tsn = cum_ack; 4458 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4459 #ifdef SCTP_ASOCLOG_OF_TSNS 4460 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4461 stcb->asoc.cumack_log_at++; 4462 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4463 stcb->asoc.cumack_log_at = 0; 4464 } 4465 #endif 4466 a_rwnd = rwnd; 4467 4468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4469 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4470 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4471 } 4472 4473 old_rwnd = stcb->asoc.peers_rwnd; 4474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4475 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4476 stcb->asoc.overall_error_count, 4477 0, 4478 SCTP_FROM_SCTP_INDATA, 4479 __LINE__); 4480 } 4481 stcb->asoc.overall_error_count = 0; 4482 asoc = &stcb->asoc; 4483 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4484 sctp_log_sack(asoc->last_acked_seq, 4485 cum_ack, 4486 0, 4487 num_seg, 4488 num_dup, 4489 SCTP_LOG_NEW_SACK); 4490 } 4491 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4492 uint16_t i; 4493 uint32_t *dupdata, dblock; 4494 4495 for (i = 0; i < num_dup; i++) { 4496 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4497 sizeof(uint32_t), (uint8_t *)&dblock); 4498 if (dupdata == NULL) { 4499 break; 4500 } 4501 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4502 } 4503 } 4504 /* reality check */ 4505 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4506 tp1 = TAILQ_LAST(&asoc->sent_queue, 4507 sctpchunk_listhead); 4508 send_s = tp1->rec.data.tsn + 1; 4509 } else { 4510 tp1 = NULL; 4511 send_s = asoc->sending_seq; 4512 } 4513 if (SCTP_TSN_GE(cum_ack, send_s)) { 4514 struct mbuf *op_err; 4515 char msg[SCTP_DIAG_INFO_LEN]; 4516 4517 /* 4518 * no way, we have not even sent this TSN out yet. Peer is 4519 * hopelessly messed up with us. 4520 */ 4521 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4522 cum_ack, send_s); 4523 if (tp1) { 4524 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4525 tp1->rec.data.tsn, (void *)tp1); 4526 } 4527 hopeless_peer: 4528 *abort_now = 1; 4529 /* XXX */ 4530 SCTP_SNPRINTF(msg, sizeof(msg), 4531 "Cum ack %8.8x greater or equal than TSN %8.8x", 4532 cum_ack, send_s); 4533 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4534 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29; 4535 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4536 return; 4537 } 4538 /**********************/ 4539 /* 1) check the range */ 4540 /**********************/ 4541 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4542 /* acking something behind */ 4543 return; 4544 } 4545 4546 /* update the Rwnd of the peer */ 4547 if (TAILQ_EMPTY(&asoc->sent_queue) && 4548 TAILQ_EMPTY(&asoc->send_queue) && 4549 (asoc->stream_queue_cnt == 0)) { 4550 /* nothing left on send/sent and strmq */ 4551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4552 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4553 asoc->peers_rwnd, 0, 0, a_rwnd); 4554 } 4555 asoc->peers_rwnd = a_rwnd; 4556 if (asoc->sent_queue_retran_cnt) { 4557 asoc->sent_queue_retran_cnt = 0; 4558 } 4559 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4560 /* SWS sender side engages */ 4561 asoc->peers_rwnd = 0; 4562 } 4563 /* stop any timers */ 4564 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4565 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4566 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4567 net->partial_bytes_acked = 0; 4568 net->flight_size = 0; 4569 } 4570 asoc->total_flight = 0; 4571 asoc->total_flight_count = 0; 4572 return; 4573 } 4574 /* 4575 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4576 * things. The total byte count acked is tracked in netAckSz AND 4577 * netAck2 is used to track the total bytes acked that are un- 4578 * amibguious and were never retransmitted. We track these on a per 4579 * destination address basis. 4580 */ 4581 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4582 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4583 /* Drag along the window_tsn for cwr's */ 4584 net->cwr_window_tsn = cum_ack; 4585 } 4586 net->prev_cwnd = net->cwnd; 4587 net->net_ack = 0; 4588 net->net_ack2 = 0; 4589 4590 /* 4591 * CMT: Reset CUC and Fast recovery algo variables before 4592 * SACK processing 4593 */ 4594 net->new_pseudo_cumack = 0; 4595 net->will_exit_fast_recovery = 0; 4596 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4597 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4598 } 4599 4600 /* 4601 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4602 * to be greater than the cumack. Also reset saw_newack to 0 4603 * for all dests. 4604 */ 4605 net->saw_newack = 0; 4606 net->this_sack_highest_newack = last_tsn; 4607 } 4608 /* process the new consecutive TSN first */ 4609 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4610 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4611 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4612 accum_moved = 1; 4613 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4614 /* 4615 * If it is less than ACKED, it is 4616 * now no-longer in flight. Higher 4617 * values may occur during marking 4618 */ 4619 if ((tp1->whoTo->dest_state & 4620 SCTP_ADDR_UNCONFIRMED) && 4621 (tp1->snd_count < 2)) { 4622 /* 4623 * If there was no retran 4624 * and the address is 4625 * un-confirmed and we sent 4626 * there and are now 4627 * sacked.. its confirmed, 4628 * mark it so. 4629 */ 4630 tp1->whoTo->dest_state &= 4631 ~SCTP_ADDR_UNCONFIRMED; 4632 } 4633 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4635 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4636 tp1->whoTo->flight_size, 4637 tp1->book_size, 4638 (uint32_t)(uintptr_t)tp1->whoTo, 4639 tp1->rec.data.tsn); 4640 } 4641 sctp_flight_size_decrease(tp1); 4642 sctp_total_flight_decrease(stcb, tp1); 4643 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4644 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4645 tp1); 4646 } 4647 } 4648 tp1->whoTo->net_ack += tp1->send_size; 4649 4650 /* CMT SFR and DAC algos */ 4651 this_sack_lowest_newack = tp1->rec.data.tsn; 4652 tp1->whoTo->saw_newack = 1; 4653 4654 if (tp1->snd_count < 2) { 4655 /* 4656 * True non-retransmitted 4657 * chunk 4658 */ 4659 tp1->whoTo->net_ack2 += 4660 tp1->send_size; 4661 4662 /* update RTO too? */ 4663 if (tp1->do_rtt) { 4664 if (rto_ok && 4665 sctp_calculate_rto(stcb, 4666 &stcb->asoc, 4667 tp1->whoTo, 4668 &tp1->sent_rcv_time, 4669 SCTP_RTT_FROM_DATA)) { 4670 rto_ok = 0; 4671 } 4672 if (tp1->whoTo->rto_needed == 0) { 4673 tp1->whoTo->rto_needed = 1; 4674 } 4675 tp1->do_rtt = 0; 4676 } 4677 } 4678 /* 4679 * CMT: CUCv2 algorithm. From the 4680 * cumack'd TSNs, for each TSN being 4681 * acked for the first time, set the 4682 * following variables for the 4683 * corresp destination. 4684 * new_pseudo_cumack will trigger a 4685 * cwnd update. 4686 * find_(rtx_)pseudo_cumack will 4687 * trigger search for the next 4688 * expected (rtx-)pseudo-cumack. 4689 */ 4690 tp1->whoTo->new_pseudo_cumack = 1; 4691 tp1->whoTo->find_pseudo_cumack = 1; 4692 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4694 sctp_log_sack(asoc->last_acked_seq, 4695 cum_ack, 4696 tp1->rec.data.tsn, 4697 0, 4698 0, 4699 SCTP_LOG_TSN_ACKED); 4700 } 4701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4702 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4703 } 4704 } 4705 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4706 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4707 #ifdef SCTP_AUDITING_ENABLED 4708 sctp_audit_log(0xB3, 4709 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4710 #endif 4711 } 4712 if (tp1->rec.data.chunk_was_revoked) { 4713 /* deflate the cwnd */ 4714 tp1->whoTo->cwnd -= tp1->book_size; 4715 tp1->rec.data.chunk_was_revoked = 0; 4716 } 4717 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4718 tp1->sent = SCTP_DATAGRAM_ACKED; 4719 } 4720 } 4721 } else { 4722 break; 4723 } 4724 } 4725 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4726 /* always set this up to cum-ack */ 4727 asoc->this_sack_highest_gap = last_tsn; 4728 4729 if ((num_seg > 0) || (num_nr_seg > 0)) { 4730 /* 4731 * thisSackHighestGap will increase while handling NEW 4732 * segments this_sack_highest_newack will increase while 4733 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4734 * used for CMT DAC algo. saw_newack will also change. 4735 */ 4736 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4737 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4738 num_seg, num_nr_seg, &rto_ok)) { 4739 wake_him++; 4740 } 4741 /* 4742 * validate the biggest_tsn_acked in the gap acks if strict 4743 * adherence is wanted. 4744 */ 4745 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4746 /* 4747 * peer is either confused or we are under attack. 4748 * We must abort. 4749 */ 4750 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4751 biggest_tsn_acked, send_s); 4752 goto hopeless_peer; 4753 } 4754 } 4755 /*******************************************/ 4756 /* cancel ALL T3-send timer if accum moved */ 4757 /*******************************************/ 4758 if (asoc->sctp_cmt_on_off > 0) { 4759 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4760 if (net->new_pseudo_cumack) 4761 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4762 stcb, net, 4763 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4764 } 4765 } else { 4766 if (accum_moved) { 4767 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4768 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4769 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 4770 } 4771 } 4772 } 4773 /********************************************/ 4774 /* drop the acked chunks from the sentqueue */ 4775 /********************************************/ 4776 asoc->last_acked_seq = cum_ack; 4777 4778 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4779 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4780 break; 4781 } 4782 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4783 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4784 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4785 #ifdef INVARIANTS 4786 } else { 4787 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4788 #endif 4789 } 4790 } 4791 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4792 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4793 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4794 asoc->trigger_reset = 1; 4795 } 4796 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4797 if (PR_SCTP_ENABLED(tp1->flags)) { 4798 if (asoc->pr_sctp_cnt != 0) 4799 asoc->pr_sctp_cnt--; 4800 } 4801 asoc->sent_queue_cnt--; 4802 if (tp1->data) { 4803 /* sa_ignore NO_NULL_CHK */ 4804 sctp_free_bufspace(stcb, asoc, tp1, 1); 4805 sctp_m_freem(tp1->data); 4806 tp1->data = NULL; 4807 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4808 asoc->sent_queue_cnt_removeable--; 4809 } 4810 } 4811 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4812 sctp_log_sack(asoc->last_acked_seq, 4813 cum_ack, 4814 tp1->rec.data.tsn, 4815 0, 4816 0, 4817 SCTP_LOG_FREE_SENT); 4818 } 4819 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4820 wake_him++; 4821 } 4822 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4823 #ifdef INVARIANTS 4824 panic("Warning flight size is positive and should be 0"); 4825 #else 4826 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4827 asoc->total_flight); 4828 #endif 4829 asoc->total_flight = 0; 4830 } 4831 4832 /* sa_ignore NO_NULL_CHK */ 4833 if ((wake_him) && (stcb->sctp_socket)) { 4834 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4836 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4837 } 4838 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4839 } else { 4840 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4841 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4842 } 4843 } 4844 4845 if (asoc->fast_retran_loss_recovery && accum_moved) { 4846 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4847 /* Setup so we will exit RFC2582 fast recovery */ 4848 will_exit_fast_recovery = 1; 4849 } 4850 } 4851 /* 4852 * Check for revoked fragments: 4853 * 4854 * if Previous sack - Had no frags then we can't have any revoked if 4855 * Previous sack - Had frag's then - If we now have frags aka 4856 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4857 * some of them. else - The peer revoked all ACKED fragments, since 4858 * we had some before and now we have NONE. 4859 */ 4860 4861 if (num_seg) { 4862 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4863 asoc->saw_sack_with_frags = 1; 4864 } else if (asoc->saw_sack_with_frags) { 4865 int cnt_revoked = 0; 4866 4867 /* Peer revoked all dg's marked or acked */ 4868 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4869 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4870 tp1->sent = SCTP_DATAGRAM_SENT; 4871 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4872 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4873 tp1->whoTo->flight_size, 4874 tp1->book_size, 4875 (uint32_t)(uintptr_t)tp1->whoTo, 4876 tp1->rec.data.tsn); 4877 } 4878 sctp_flight_size_increase(tp1); 4879 sctp_total_flight_increase(stcb, tp1); 4880 tp1->rec.data.chunk_was_revoked = 1; 4881 /* 4882 * To ensure that this increase in 4883 * flightsize, which is artificial, does not 4884 * throttle the sender, we also increase the 4885 * cwnd artificially. 4886 */ 4887 tp1->whoTo->cwnd += tp1->book_size; 4888 cnt_revoked++; 4889 } 4890 } 4891 if (cnt_revoked) { 4892 reneged_all = 1; 4893 } 4894 asoc->saw_sack_with_frags = 0; 4895 } 4896 if (num_nr_seg > 0) 4897 asoc->saw_sack_with_nr_frags = 1; 4898 else 4899 asoc->saw_sack_with_nr_frags = 0; 4900 4901 /* JRS - Use the congestion control given in the CC module */ 4902 if (ecne_seen == 0) { 4903 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4904 if (net->net_ack2 > 0) { 4905 /* 4906 * Karn's rule applies to clearing error 4907 * count, this is optional. 4908 */ 4909 net->error_count = 0; 4910 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4911 /* addr came good */ 4912 net->dest_state |= SCTP_ADDR_REACHABLE; 4913 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4914 0, (void *)net, SCTP_SO_NOT_LOCKED); 4915 } 4916 4917 if (net == stcb->asoc.primary_destination) { 4918 if (stcb->asoc.alternate) { 4919 /* 4920 * release the alternate, 4921 * primary is good 4922 */ 4923 sctp_free_remote_addr(stcb->asoc.alternate); 4924 stcb->asoc.alternate = NULL; 4925 } 4926 } 4927 4928 if (net->dest_state & SCTP_ADDR_PF) { 4929 net->dest_state &= ~SCTP_ADDR_PF; 4930 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4931 stcb->sctp_ep, stcb, net, 4932 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 4933 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4934 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4935 /* Done with this net */ 4936 net->net_ack = 0; 4937 } 4938 /* restore any doubled timers */ 4939 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4940 if (net->RTO < stcb->asoc.minrto) { 4941 net->RTO = stcb->asoc.minrto; 4942 } 4943 if (net->RTO > stcb->asoc.maxrto) { 4944 net->RTO = stcb->asoc.maxrto; 4945 } 4946 } 4947 } 4948 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4949 } 4950 4951 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4952 /* nothing left in-flight */ 4953 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4954 /* stop all timers */ 4955 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4956 stcb, net, 4957 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34); 4958 net->flight_size = 0; 4959 net->partial_bytes_acked = 0; 4960 } 4961 asoc->total_flight = 0; 4962 asoc->total_flight_count = 0; 4963 } 4964 4965 /**********************************/ 4966 /* Now what about shutdown issues */ 4967 /**********************************/ 4968 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4969 /* nothing left on sendqueue.. consider done */ 4970 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4971 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4972 asoc->peers_rwnd, 0, 0, a_rwnd); 4973 } 4974 asoc->peers_rwnd = a_rwnd; 4975 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4976 /* SWS sender side engages */ 4977 asoc->peers_rwnd = 0; 4978 } 4979 /* clean up */ 4980 if ((asoc->stream_queue_cnt == 1) && 4981 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4982 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4983 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4984 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4985 } 4986 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4987 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4988 (asoc->stream_queue_cnt == 1) && 4989 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4990 struct mbuf *op_err; 4991 4992 *abort_now = 1; 4993 /* XXX */ 4994 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4995 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35; 4996 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4997 return; 4998 } 4999 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5000 (asoc->stream_queue_cnt == 0)) { 5001 struct sctp_nets *netp; 5002 5003 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 5004 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5005 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5006 } 5007 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 5008 sctp_stop_timers_for_shutdown(stcb); 5009 if (asoc->alternate) { 5010 netp = asoc->alternate; 5011 } else { 5012 netp = asoc->primary_destination; 5013 } 5014 sctp_send_shutdown(stcb, netp); 5015 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5016 stcb->sctp_ep, stcb, netp); 5017 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5018 stcb->sctp_ep, stcb, NULL); 5019 return; 5020 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5021 (asoc->stream_queue_cnt == 0)) { 5022 struct sctp_nets *netp; 5023 5024 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5025 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 5026 sctp_stop_timers_for_shutdown(stcb); 5027 if (asoc->alternate) { 5028 netp = asoc->alternate; 5029 } else { 5030 netp = asoc->primary_destination; 5031 } 5032 sctp_send_shutdown_ack(stcb, netp); 5033 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5034 stcb->sctp_ep, stcb, netp); 5035 return; 5036 } 5037 } 5038 /* 5039 * Now here we are going to recycle net_ack for a different use... 5040 * HEADS UP. 5041 */ 5042 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5043 net->net_ack = 0; 5044 } 5045 5046 /* 5047 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5048 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5049 * automatically ensure that. 5050 */ 5051 if ((asoc->sctp_cmt_on_off > 0) && 5052 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5053 (cmt_dac_flag == 0)) { 5054 this_sack_lowest_newack = cum_ack; 5055 } 5056 if ((num_seg > 0) || (num_nr_seg > 0)) { 5057 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5058 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5059 } 5060 /* JRS - Use the congestion control given in the CC module */ 5061 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5062 5063 /* Now are we exiting loss recovery ? */ 5064 if (will_exit_fast_recovery) { 5065 /* Ok, we must exit fast recovery */ 5066 asoc->fast_retran_loss_recovery = 0; 5067 } 5068 if ((asoc->sat_t3_loss_recovery) && 5069 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5070 /* end satellite t3 loss recovery */ 5071 asoc->sat_t3_loss_recovery = 0; 5072 } 5073 /* 5074 * CMT Fast recovery 5075 */ 5076 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5077 if (net->will_exit_fast_recovery) { 5078 /* Ok, we must exit fast recovery */ 5079 net->fast_retran_loss_recovery = 0; 5080 } 5081 } 5082 5083 /* Adjust and set the new rwnd value */ 5084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5085 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5086 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5087 } 5088 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5089 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5090 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5091 /* SWS sender side engages */ 5092 asoc->peers_rwnd = 0; 5093 } 5094 if (asoc->peers_rwnd > old_rwnd) { 5095 win_probe_recovery = 1; 5096 } 5097 5098 /* 5099 * Now we must setup so we have a timer up for anyone with 5100 * outstanding data. 5101 */ 5102 done_once = 0; 5103 again: 5104 j = 0; 5105 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5106 if (win_probe_recovery && (net->window_probe)) { 5107 win_probe_recovered = 1; 5108 /*- 5109 * Find first chunk that was used with 5110 * window probe and clear the event. Put 5111 * it back into the send queue as if has 5112 * not been sent. 5113 */ 5114 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5115 if (tp1->window_probe) { 5116 sctp_window_probe_recovery(stcb, asoc, tp1); 5117 break; 5118 } 5119 } 5120 } 5121 if (net->flight_size) { 5122 j++; 5123 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5124 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5125 stcb->sctp_ep, stcb, net); 5126 } 5127 if (net->window_probe) { 5128 net->window_probe = 0; 5129 } 5130 } else { 5131 if (net->window_probe) { 5132 /* 5133 * In window probes we must assure a timer 5134 * is still running there 5135 */ 5136 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5137 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5138 stcb->sctp_ep, stcb, net); 5139 } 5140 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5141 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5142 stcb, net, 5143 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36); 5144 } 5145 } 5146 } 5147 if ((j == 0) && 5148 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5149 (asoc->sent_queue_retran_cnt == 0) && 5150 (win_probe_recovered == 0) && 5151 (done_once == 0)) { 5152 /* 5153 * huh, this should not happen unless all packets are 5154 * PR-SCTP and marked to skip of course. 5155 */ 5156 if (sctp_fs_audit(asoc)) { 5157 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5158 net->flight_size = 0; 5159 } 5160 asoc->total_flight = 0; 5161 asoc->total_flight_count = 0; 5162 asoc->sent_queue_retran_cnt = 0; 5163 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5164 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5165 sctp_flight_size_increase(tp1); 5166 sctp_total_flight_increase(stcb, tp1); 5167 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5168 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5169 } 5170 } 5171 } 5172 done_once = 1; 5173 goto again; 5174 } 5175 /*********************************************/ 5176 /* Here we perform PR-SCTP procedures */ 5177 /* (section 4.2) */ 5178 /*********************************************/ 5179 /* C1. update advancedPeerAckPoint */ 5180 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5181 asoc->advanced_peer_ack_point = cum_ack; 5182 } 5183 /* C2. try to further move advancedPeerAckPoint ahead */ 5184 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5185 struct sctp_tmit_chunk *lchk; 5186 uint32_t old_adv_peer_ack_point; 5187 5188 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5189 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5190 /* C3. See if we need to send a Fwd-TSN */ 5191 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5192 /* 5193 * ISSUE with ECN, see FWD-TSN processing. 5194 */ 5195 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5196 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5197 0xee, cum_ack, asoc->advanced_peer_ack_point, 5198 old_adv_peer_ack_point); 5199 } 5200 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5201 send_forward_tsn(stcb, asoc); 5202 } else if (lchk) { 5203 /* try to FR fwd-tsn's that get lost too */ 5204 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5205 send_forward_tsn(stcb, asoc); 5206 } 5207 } 5208 } 5209 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { 5210 if (lchk->whoTo != NULL) { 5211 break; 5212 } 5213 } 5214 if (lchk != NULL) { 5215 /* Assure a timer is up */ 5216 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5217 stcb->sctp_ep, stcb, lchk->whoTo); 5218 } 5219 } 5220 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5221 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5222 a_rwnd, 5223 stcb->asoc.peers_rwnd, 5224 stcb->asoc.total_flight, 5225 stcb->asoc.total_output_queue_size); 5226 } 5227 } 5228 5229 void 5230 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5231 { 5232 /* Copy cum-ack */ 5233 uint32_t cum_ack, a_rwnd; 5234 5235 cum_ack = ntohl(cp->cumulative_tsn_ack); 5236 /* Arrange so a_rwnd does NOT change */ 5237 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5238 5239 /* Now call the express sack handling */ 5240 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5241 } 5242 5243 static void 5244 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5245 struct sctp_stream_in *strmin) 5246 { 5247 struct sctp_queued_to_read *control, *ncontrol; 5248 struct sctp_association *asoc; 5249 uint32_t mid; 5250 int need_reasm_check = 0; 5251 5252 asoc = &stcb->asoc; 5253 mid = strmin->last_mid_delivered; 5254 /* 5255 * First deliver anything prior to and including the stream no that 5256 * came in. 5257 */ 5258 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5259 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5260 /* this is deliverable now */ 5261 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5262 if (control->on_strm_q) { 5263 if (control->on_strm_q == SCTP_ON_ORDERED) { 5264 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5265 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5266 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5267 #ifdef INVARIANTS 5268 } else { 5269 panic("strmin: %p ctl: %p unknown %d", 5270 strmin, control, control->on_strm_q); 5271 #endif 5272 } 5273 control->on_strm_q = 0; 5274 } 5275 /* subtract pending on streams */ 5276 if (asoc->size_on_all_streams >= control->length) { 5277 asoc->size_on_all_streams -= control->length; 5278 } else { 5279 #ifdef INVARIANTS 5280 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5281 #else 5282 asoc->size_on_all_streams = 0; 5283 #endif 5284 } 5285 sctp_ucount_decr(asoc->cnt_on_all_streams); 5286 /* deliver it to at least the delivery-q */ 5287 if (stcb->sctp_socket) { 5288 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5289 sctp_add_to_readq(stcb->sctp_ep, stcb, 5290 control, 5291 &stcb->sctp_socket->so_rcv, 5292 1, SCTP_READ_LOCK_HELD, 5293 SCTP_SO_NOT_LOCKED); 5294 } 5295 } else { 5296 /* Its a fragmented message */ 5297 if (control->first_frag_seen) { 5298 /* 5299 * Make it so this is next to 5300 * deliver, we restore later 5301 */ 5302 strmin->last_mid_delivered = control->mid - 1; 5303 need_reasm_check = 1; 5304 break; 5305 } 5306 } 5307 } else { 5308 /* no more delivery now. */ 5309 break; 5310 } 5311 } 5312 if (need_reasm_check) { 5313 int ret; 5314 5315 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5316 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5317 /* Restore the next to deliver unless we are ahead */ 5318 strmin->last_mid_delivered = mid; 5319 } 5320 if (ret == 0) { 5321 /* Left the front Partial one on */ 5322 return; 5323 } 5324 need_reasm_check = 0; 5325 } 5326 /* 5327 * now we must deliver things in queue the normal way if any are 5328 * now ready. 5329 */ 5330 mid = strmin->last_mid_delivered + 1; 5331 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5332 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5333 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5334 /* this is deliverable now */ 5335 if (control->on_strm_q) { 5336 if (control->on_strm_q == SCTP_ON_ORDERED) { 5337 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5338 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5339 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5340 #ifdef INVARIANTS 5341 } else { 5342 panic("strmin: %p ctl: %p unknown %d", 5343 strmin, control, control->on_strm_q); 5344 #endif 5345 } 5346 control->on_strm_q = 0; 5347 } 5348 /* subtract pending on streams */ 5349 if (asoc->size_on_all_streams >= control->length) { 5350 asoc->size_on_all_streams -= control->length; 5351 } else { 5352 #ifdef INVARIANTS 5353 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5354 #else 5355 asoc->size_on_all_streams = 0; 5356 #endif 5357 } 5358 sctp_ucount_decr(asoc->cnt_on_all_streams); 5359 /* deliver it to at least the delivery-q */ 5360 strmin->last_mid_delivered = control->mid; 5361 if (stcb->sctp_socket) { 5362 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5363 sctp_add_to_readq(stcb->sctp_ep, stcb, 5364 control, 5365 &stcb->sctp_socket->so_rcv, 1, 5366 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5367 } 5368 mid = strmin->last_mid_delivered + 1; 5369 } else { 5370 /* Its a fragmented message */ 5371 if (control->first_frag_seen) { 5372 /* 5373 * Make it so this is next to 5374 * deliver 5375 */ 5376 strmin->last_mid_delivered = control->mid - 1; 5377 need_reasm_check = 1; 5378 break; 5379 } 5380 } 5381 } else { 5382 break; 5383 } 5384 } 5385 if (need_reasm_check) { 5386 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5387 } 5388 } 5389 5390 static void 5391 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5392 struct sctp_association *asoc, struct sctp_stream_in *strm, 5393 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn) 5394 { 5395 struct sctp_tmit_chunk *chk, *nchk; 5396 5397 /* 5398 * For now large messages held on the stream reasm that are complete 5399 * will be tossed too. We could in theory do more work to spin 5400 * through and stop after dumping one msg aka seeing the start of a 5401 * new msg at the head, and call the delivery function... to see if 5402 * it can be delivered... But for now we just dump everything on the 5403 * queue. 5404 */ 5405 if (!asoc->idata_supported && !ordered && 5406 control->first_frag_seen && 5407 SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5408 return; 5409 } 5410 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5411 /* Purge hanging chunks */ 5412 if (!asoc->idata_supported && !ordered) { 5413 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5414 break; 5415 } 5416 } 5417 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5418 if (asoc->size_on_reasm_queue >= chk->send_size) { 5419 asoc->size_on_reasm_queue -= chk->send_size; 5420 } else { 5421 #ifdef INVARIANTS 5422 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5423 #else 5424 asoc->size_on_reasm_queue = 0; 5425 #endif 5426 } 5427 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5428 if (chk->data) { 5429 sctp_m_freem(chk->data); 5430 chk->data = NULL; 5431 } 5432 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5433 } 5434 if (!TAILQ_EMPTY(&control->reasm)) { 5435 /* This has to be old data, unordered */ 5436 if (control->data) { 5437 sctp_m_freem(control->data); 5438 control->data = NULL; 5439 } 5440 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5441 chk = TAILQ_FIRST(&control->reasm); 5442 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5443 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5444 sctp_add_chk_to_control(control, strm, stcb, asoc, 5445 chk, SCTP_READ_LOCK_HELD); 5446 } 5447 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5448 return; 5449 } 5450 if (control->on_strm_q == SCTP_ON_ORDERED) { 5451 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5452 if (asoc->size_on_all_streams >= control->length) { 5453 asoc->size_on_all_streams -= control->length; 5454 } else { 5455 #ifdef INVARIANTS 5456 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5457 #else 5458 asoc->size_on_all_streams = 0; 5459 #endif 5460 } 5461 sctp_ucount_decr(asoc->cnt_on_all_streams); 5462 control->on_strm_q = 0; 5463 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5464 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5465 control->on_strm_q = 0; 5466 #ifdef INVARIANTS 5467 } else if (control->on_strm_q) { 5468 panic("strm: %p ctl: %p unknown %d", 5469 strm, control, control->on_strm_q); 5470 #endif 5471 } 5472 control->on_strm_q = 0; 5473 if (control->on_read_q == 0) { 5474 sctp_free_remote_addr(control->whoFrom); 5475 if (control->data) { 5476 sctp_m_freem(control->data); 5477 control->data = NULL; 5478 } 5479 sctp_free_a_readq(stcb, control); 5480 } 5481 } 5482 5483 void 5484 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5485 struct sctp_forward_tsn_chunk *fwd, 5486 int *abort_flag, struct mbuf *m, int offset) 5487 { 5488 /* The pr-sctp fwd tsn */ 5489 /* 5490 * here we will perform all the data receiver side steps for 5491 * processing FwdTSN, as required in by pr-sctp draft: 5492 * 5493 * Assume we get FwdTSN(x): 5494 * 5495 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5496 * + others we have 3) examine and update re-ordering queue on 5497 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5498 * report where we are. 5499 */ 5500 struct sctp_association *asoc; 5501 uint32_t new_cum_tsn, gap; 5502 unsigned int i, fwd_sz, m_size; 5503 uint32_t str_seq; 5504 struct sctp_stream_in *strm; 5505 struct sctp_queued_to_read *control, *sv; 5506 5507 asoc = &stcb->asoc; 5508 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5509 SCTPDBG(SCTP_DEBUG_INDATA1, 5510 "Bad size too small/big fwd-tsn\n"); 5511 return; 5512 } 5513 m_size = (stcb->asoc.mapping_array_size << 3); 5514 /*************************************************************/ 5515 /* 1. Here we update local cumTSN and shift the bitmap array */ 5516 /*************************************************************/ 5517 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5518 5519 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5520 /* Already got there ... */ 5521 return; 5522 } 5523 /* 5524 * now we know the new TSN is more advanced, let's find the actual 5525 * gap 5526 */ 5527 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5528 asoc->cumulative_tsn = new_cum_tsn; 5529 if (gap >= m_size) { 5530 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5531 struct mbuf *op_err; 5532 char msg[SCTP_DIAG_INFO_LEN]; 5533 5534 /* 5535 * out of range (of single byte chunks in the rwnd I 5536 * give out). This must be an attacker. 5537 */ 5538 *abort_flag = 1; 5539 SCTP_SNPRINTF(msg, sizeof(msg), 5540 "New cum ack %8.8x too high, highest TSN %8.8x", 5541 new_cum_tsn, asoc->highest_tsn_inside_map); 5542 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5543 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37; 5544 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5545 return; 5546 } 5547 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5548 5549 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5550 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5551 asoc->highest_tsn_inside_map = new_cum_tsn; 5552 5553 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5554 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5555 5556 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5557 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5558 } 5559 } else { 5560 SCTP_TCB_LOCK_ASSERT(stcb); 5561 for (i = 0; i <= gap; i++) { 5562 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5563 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5564 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5565 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5566 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5567 } 5568 } 5569 } 5570 } 5571 /*************************************************************/ 5572 /* 2. Clear up re-assembly queue */ 5573 /*************************************************************/ 5574 5575 /* This is now done as part of clearing up the stream/seq */ 5576 if (asoc->idata_supported == 0) { 5577 uint16_t sid; 5578 5579 /* Flush all the un-ordered data based on cum-tsn */ 5580 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5581 for (sid = 0; sid < asoc->streamincnt; sid++) { 5582 strm = &asoc->strmin[sid]; 5583 if (!TAILQ_EMPTY(&strm->uno_inqueue)) { 5584 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn); 5585 } 5586 } 5587 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5588 } 5589 /*******************************************************/ 5590 /* 3. Update the PR-stream re-ordering queues and fix */ 5591 /* delivery issues as needed. */ 5592 /*******************************************************/ 5593 fwd_sz -= sizeof(*fwd); 5594 if (m && fwd_sz) { 5595 /* New method. */ 5596 unsigned int num_str; 5597 uint32_t mid; 5598 uint16_t sid; 5599 uint16_t ordered, flags; 5600 struct sctp_strseq *stseq, strseqbuf; 5601 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5602 5603 offset += sizeof(*fwd); 5604 5605 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5606 if (asoc->idata_supported) { 5607 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5608 } else { 5609 num_str = fwd_sz / sizeof(struct sctp_strseq); 5610 } 5611 for (i = 0; i < num_str; i++) { 5612 if (asoc->idata_supported) { 5613 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5614 sizeof(struct sctp_strseq_mid), 5615 (uint8_t *)&strseqbuf_m); 5616 offset += sizeof(struct sctp_strseq_mid); 5617 if (stseq_m == NULL) { 5618 break; 5619 } 5620 sid = ntohs(stseq_m->sid); 5621 mid = ntohl(stseq_m->mid); 5622 flags = ntohs(stseq_m->flags); 5623 if (flags & PR_SCTP_UNORDERED_FLAG) { 5624 ordered = 0; 5625 } else { 5626 ordered = 1; 5627 } 5628 } else { 5629 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5630 sizeof(struct sctp_strseq), 5631 (uint8_t *)&strseqbuf); 5632 offset += sizeof(struct sctp_strseq); 5633 if (stseq == NULL) { 5634 break; 5635 } 5636 sid = ntohs(stseq->sid); 5637 mid = (uint32_t)ntohs(stseq->ssn); 5638 ordered = 1; 5639 } 5640 /* Convert */ 5641 5642 /* now process */ 5643 5644 /* 5645 * Ok we now look for the stream/seq on the read 5646 * queue where its not all delivered. If we find it 5647 * we transmute the read entry into a PDI_ABORTED. 5648 */ 5649 if (sid >= asoc->streamincnt) { 5650 /* screwed up streams, stop! */ 5651 break; 5652 } 5653 if ((asoc->str_of_pdapi == sid) && 5654 (asoc->ssn_of_pdapi == mid)) { 5655 /* 5656 * If this is the one we were partially 5657 * delivering now then we no longer are. 5658 * Note this will change with the reassembly 5659 * re-write. 5660 */ 5661 asoc->fragmented_delivery_inprogress = 0; 5662 } 5663 strm = &asoc->strmin[sid]; 5664 if (ordered) { 5665 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 5666 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5667 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); 5668 } 5669 } 5670 } else { 5671 if (asoc->idata_supported) { 5672 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 5673 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5674 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); 5675 } 5676 } 5677 } else { 5678 if (!TAILQ_EMPTY(&strm->uno_inqueue)) { 5679 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn); 5680 } 5681 } 5682 } 5683 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5684 if ((control->sinfo_stream == sid) && 5685 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5686 str_seq = (sid << 16) | (0x0000ffff & mid); 5687 control->pdapi_aborted = 1; 5688 sv = stcb->asoc.control_pdapi; 5689 control->end_added = 1; 5690 if (control->on_strm_q == SCTP_ON_ORDERED) { 5691 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5692 if (asoc->size_on_all_streams >= control->length) { 5693 asoc->size_on_all_streams -= control->length; 5694 } else { 5695 #ifdef INVARIANTS 5696 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5697 #else 5698 asoc->size_on_all_streams = 0; 5699 #endif 5700 } 5701 sctp_ucount_decr(asoc->cnt_on_all_streams); 5702 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5703 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5704 #ifdef INVARIANTS 5705 } else if (control->on_strm_q) { 5706 panic("strm: %p ctl: %p unknown %d", 5707 strm, control, control->on_strm_q); 5708 #endif 5709 } 5710 control->on_strm_q = 0; 5711 stcb->asoc.control_pdapi = control; 5712 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5713 stcb, 5714 SCTP_PARTIAL_DELIVERY_ABORTED, 5715 (void *)&str_seq, 5716 SCTP_SO_NOT_LOCKED); 5717 stcb->asoc.control_pdapi = sv; 5718 break; 5719 } else if ((control->sinfo_stream == sid) && 5720 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5721 /* We are past our victim SSN */ 5722 break; 5723 } 5724 } 5725 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5726 /* Update the sequence number */ 5727 strm->last_mid_delivered = mid; 5728 } 5729 /* now kick the stream the new way */ 5730 /* sa_ignore NO_NULL_CHK */ 5731 sctp_kick_prsctp_reorder_queue(stcb, strm); 5732 } 5733 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5734 } 5735 /* 5736 * Now slide thing forward. 5737 */ 5738 sctp_slide_mapping_arrays(stcb); 5739 } 5740