1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <sys/proc.h> 40 #include <netinet/sctp_var.h> 41 #include <netinet/sctp_sysctl.h> 42 #include <netinet/sctp_header.h> 43 #include <netinet/sctp_pcb.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_auth.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/sctp_input.h> 53 #include <netinet/sctp_crc32.h> 54 #include <netinet/sctp_lock_bsd.h> 55 /* 56 * NOTES: On the outbound side of things I need to check the sack timer to 57 * see if I should generate a sack into the chunk queue (if I have data to 58 * send that is and will be sending it .. for bundling. 59 * 60 * The callback in sctp_usrreq.c will get called when the socket is read from. 61 * This will cause sctp_service_queues() to get called on the top entry in 62 * the list. 63 */ 64 static uint32_t 65 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 66 struct sctp_stream_in *strm, 67 struct sctp_tcb *stcb, 68 struct sctp_association *asoc, 69 struct sctp_tmit_chunk *chk, int lock_held); 70 71 72 void 73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 74 { 75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 76 } 77 78 /* Calculate what the rwnd would be */ 79 uint32_t 80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 81 { 82 uint32_t calc = 0; 83 84 /* 85 * This is really set wrong with respect to a 1-2-m socket. Since 86 * the sb_cc is the count that everyone as put up. When we re-write 87 * sctp_soreceive then we will fix this so that ONLY this 88 * associations data is taken into account. 89 */ 90 if (stcb->sctp_socket == NULL) { 91 return (calc); 92 } 93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 96 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 97 if (stcb->asoc.sb_cc == 0 && 98 asoc->cnt_on_reasm_queue == 0 && 99 asoc->cnt_on_all_streams == 0) { 100 /* Full rwnd granted */ 101 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 102 return (calc); 103 } 104 /* get actual space */ 105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 106 /* 107 * take out what has NOT been put on socket queue and we yet hold 108 * for putting up. 109 */ 110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 111 asoc->cnt_on_reasm_queue * MSIZE)); 112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 113 asoc->cnt_on_all_streams * MSIZE)); 114 if (calc == 0) { 115 /* out of space */ 116 return (calc); 117 } 118 /* what is the overhead of all these rwnd's */ 119 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 120 /* 121 * If the window gets too small due to ctrl-stuff, reduce it to 1, 122 * even it is 0. SWS engaged 123 */ 124 if (calc < stcb->asoc.my_rwnd_control_len) { 125 calc = 1; 126 } 127 return (calc); 128 } 129 130 131 132 /* 133 * Build out our readq entry based on the incoming packet. 134 */ 135 struct sctp_queued_to_read * 136 sctp_build_readq_entry(struct sctp_tcb *stcb, 137 struct sctp_nets *net, 138 uint32_t tsn, uint32_t ppid, 139 uint32_t context, uint16_t sid, 140 uint32_t mid, uint8_t flags, 141 struct mbuf *dm) 142 { 143 struct sctp_queued_to_read *read_queue_e = NULL; 144 145 sctp_alloc_a_readq(stcb, read_queue_e); 146 if (read_queue_e == NULL) { 147 goto failed_build; 148 } 149 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 150 read_queue_e->sinfo_stream = sid; 151 read_queue_e->sinfo_flags = (flags << 8); 152 read_queue_e->sinfo_ppid = ppid; 153 read_queue_e->sinfo_context = context; 154 read_queue_e->sinfo_tsn = tsn; 155 read_queue_e->sinfo_cumtsn = tsn; 156 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 157 read_queue_e->mid = mid; 158 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 159 TAILQ_INIT(&read_queue_e->reasm); 160 read_queue_e->whoFrom = net; 161 atomic_add_int(&net->ref_count, 1); 162 read_queue_e->data = dm; 163 read_queue_e->stcb = stcb; 164 read_queue_e->port_from = stcb->rport; 165 failed_build: 166 return (read_queue_e); 167 } 168 169 struct mbuf * 170 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 171 { 172 struct sctp_extrcvinfo *seinfo; 173 struct sctp_sndrcvinfo *outinfo; 174 struct sctp_rcvinfo *rcvinfo; 175 struct sctp_nxtinfo *nxtinfo; 176 struct cmsghdr *cmh; 177 struct mbuf *ret; 178 int len; 179 int use_extended; 180 int provide_nxt; 181 182 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 184 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 185 /* user does not want any ancillary data */ 186 return (NULL); 187 } 188 len = 0; 189 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 190 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 191 } 192 seinfo = (struct sctp_extrcvinfo *)sinfo; 193 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 194 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 195 provide_nxt = 1; 196 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 197 } else { 198 provide_nxt = 0; 199 } 200 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 202 use_extended = 1; 203 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 204 } else { 205 use_extended = 0; 206 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 207 } 208 } else { 209 use_extended = 0; 210 } 211 212 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 213 if (ret == NULL) { 214 /* No space */ 215 return (ret); 216 } 217 SCTP_BUF_LEN(ret) = 0; 218 219 /* We need a CMSG header followed by the struct */ 220 cmh = mtod(ret, struct cmsghdr *); 221 /* 222 * Make sure that there is no un-initialized padding between the 223 * cmsg header and cmsg data and after the cmsg data. 224 */ 225 memset(cmh, 0, len); 226 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 227 cmh->cmsg_level = IPPROTO_SCTP; 228 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 229 cmh->cmsg_type = SCTP_RCVINFO; 230 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 231 rcvinfo->rcv_sid = sinfo->sinfo_stream; 232 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 233 rcvinfo->rcv_flags = sinfo->sinfo_flags; 234 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 235 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 236 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 237 rcvinfo->rcv_context = sinfo->sinfo_context; 238 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 239 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 240 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 241 } 242 if (provide_nxt) { 243 cmh->cmsg_level = IPPROTO_SCTP; 244 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 245 cmh->cmsg_type = SCTP_NXTINFO; 246 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 247 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 248 nxtinfo->nxt_flags = 0; 249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 250 nxtinfo->nxt_flags |= SCTP_UNORDERED; 251 } 252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 253 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 254 } 255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 256 nxtinfo->nxt_flags |= SCTP_COMPLETE; 257 } 258 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 259 nxtinfo->nxt_length = seinfo->serinfo_next_length; 260 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 261 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 262 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 263 } 264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 265 cmh->cmsg_level = IPPROTO_SCTP; 266 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 267 if (use_extended) { 268 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 269 cmh->cmsg_type = SCTP_EXTRCV; 270 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 271 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 272 } else { 273 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 274 cmh->cmsg_type = SCTP_SNDRCV; 275 *outinfo = *sinfo; 276 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 277 } 278 } 279 return (ret); 280 } 281 282 283 static void 284 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 285 { 286 uint32_t gap, i, cumackp1; 287 int fnd = 0; 288 int in_r = 0, in_nr = 0; 289 290 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 291 return; 292 } 293 cumackp1 = asoc->cumulative_tsn + 1; 294 if (SCTP_TSN_GT(cumackp1, tsn)) { 295 /* 296 * this tsn is behind the cum ack and thus we don't need to 297 * worry about it being moved from one to the other. 298 */ 299 return; 300 } 301 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 302 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 303 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 304 if ((in_r == 0) && (in_nr == 0)) { 305 #ifdef INVARIANTS 306 panic("Things are really messed up now"); 307 #else 308 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 309 sctp_print_mapping_array(asoc); 310 #endif 311 } 312 if (in_nr == 0) 313 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 314 if (in_r) 315 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 316 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 317 asoc->highest_tsn_inside_nr_map = tsn; 318 } 319 if (tsn == asoc->highest_tsn_inside_map) { 320 /* We must back down to see what the new highest is */ 321 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 322 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 323 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 324 asoc->highest_tsn_inside_map = i; 325 fnd = 1; 326 break; 327 } 328 } 329 if (!fnd) { 330 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 331 } 332 } 333 } 334 335 static int 336 sctp_place_control_in_stream(struct sctp_stream_in *strm, 337 struct sctp_association *asoc, 338 struct sctp_queued_to_read *control) 339 { 340 struct sctp_queued_to_read *at; 341 struct sctp_readhead *q; 342 uint8_t flags, unordered; 343 344 flags = (control->sinfo_flags >> 8); 345 unordered = flags & SCTP_DATA_UNORDERED; 346 if (unordered) { 347 q = &strm->uno_inqueue; 348 if (asoc->idata_supported == 0) { 349 if (!TAILQ_EMPTY(q)) { 350 /* 351 * Only one stream can be here in old style 352 * -- abort 353 */ 354 return (-1); 355 } 356 TAILQ_INSERT_TAIL(q, control, next_instrm); 357 control->on_strm_q = SCTP_ON_UNORDERED; 358 return (0); 359 } 360 } else { 361 q = &strm->inqueue; 362 } 363 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 364 control->end_added = 1; 365 control->first_frag_seen = 1; 366 control->last_frag_seen = 1; 367 } 368 if (TAILQ_EMPTY(q)) { 369 /* Empty queue */ 370 TAILQ_INSERT_HEAD(q, control, next_instrm); 371 if (unordered) { 372 control->on_strm_q = SCTP_ON_UNORDERED; 373 } else { 374 control->on_strm_q = SCTP_ON_ORDERED; 375 } 376 return (0); 377 } else { 378 TAILQ_FOREACH(at, q, next_instrm) { 379 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 380 /* 381 * one in queue is bigger than the new one, 382 * insert before this one 383 */ 384 TAILQ_INSERT_BEFORE(at, control, next_instrm); 385 if (unordered) { 386 control->on_strm_q = SCTP_ON_UNORDERED; 387 } else { 388 control->on_strm_q = SCTP_ON_ORDERED; 389 } 390 break; 391 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 392 /* 393 * Gak, He sent me a duplicate msg id 394 * number?? return -1 to abort. 395 */ 396 return (-1); 397 } else { 398 if (TAILQ_NEXT(at, next_instrm) == NULL) { 399 /* 400 * We are at the end, insert it 401 * after this one 402 */ 403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 404 sctp_log_strm_del(control, at, 405 SCTP_STR_LOG_FROM_INSERT_TL); 406 } 407 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 408 if (unordered) { 409 control->on_strm_q = SCTP_ON_UNORDERED; 410 } else { 411 control->on_strm_q = SCTP_ON_ORDERED; 412 } 413 break; 414 } 415 } 416 } 417 } 418 return (0); 419 } 420 421 static void 422 sctp_abort_in_reasm(struct sctp_tcb *stcb, 423 struct sctp_queued_to_read *control, 424 struct sctp_tmit_chunk *chk, 425 int *abort_flag, int opspot) 426 { 427 char msg[SCTP_DIAG_INFO_LEN]; 428 struct mbuf *oper; 429 430 if (stcb->asoc.idata_supported) { 431 snprintf(msg, sizeof(msg), 432 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 433 opspot, 434 control->fsn_included, 435 chk->rec.data.tsn, 436 chk->rec.data.sid, 437 chk->rec.data.fsn, chk->rec.data.mid); 438 } else { 439 snprintf(msg, sizeof(msg), 440 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 441 opspot, 442 control->fsn_included, 443 chk->rec.data.tsn, 444 chk->rec.data.sid, 445 chk->rec.data.fsn, 446 (uint16_t)chk->rec.data.mid); 447 } 448 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 449 sctp_m_freem(chk->data); 450 chk->data = NULL; 451 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 452 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 453 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 454 *abort_flag = 1; 455 } 456 457 static void 458 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 459 { 460 /* 461 * The control could not be placed and must be cleaned. 462 */ 463 struct sctp_tmit_chunk *chk, *nchk; 464 465 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 466 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 467 if (chk->data) 468 sctp_m_freem(chk->data); 469 chk->data = NULL; 470 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 471 } 472 sctp_free_a_readq(stcb, control); 473 } 474 475 /* 476 * Queue the chunk either right into the socket buffer if it is the next one 477 * to go OR put it in the correct place in the delivery queue. If we do 478 * append to the so_buf, keep doing so until we are out of order as 479 * long as the control's entered are non-fragmented. 480 */ 481 static void 482 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 483 struct sctp_association *asoc, 484 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 485 { 486 /* 487 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 488 * all the data in one stream this could happen quite rapidly. One 489 * could use the TSN to keep track of things, but this scheme breaks 490 * down in the other type of stream usage that could occur. Send a 491 * single msg to stream 0, send 4Billion messages to stream 1, now 492 * send a message to stream 0. You have a situation where the TSN 493 * has wrapped but not in the stream. Is this worth worrying about 494 * or should we just change our queue sort at the bottom to be by 495 * TSN. 496 * 497 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 498 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 499 * assignment this could happen... and I don't see how this would be 500 * a violation. So for now I am undecided an will leave the sort by 501 * SSN alone. Maybe a hybred approach is the answer 502 * 503 */ 504 struct sctp_queued_to_read *at; 505 int queue_needed; 506 uint32_t nxt_todel; 507 struct mbuf *op_err; 508 struct sctp_stream_in *strm; 509 char msg[SCTP_DIAG_INFO_LEN]; 510 511 strm = &asoc->strmin[control->sinfo_stream]; 512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 513 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 514 } 515 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 516 /* The incoming sseq is behind where we last delivered? */ 517 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 518 strm->last_mid_delivered, control->mid); 519 /* 520 * throw it in the stream so it gets cleaned up in 521 * association destruction 522 */ 523 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 524 if (asoc->idata_supported) { 525 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 526 strm->last_mid_delivered, control->sinfo_tsn, 527 control->sinfo_stream, control->mid); 528 } else { 529 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 530 (uint16_t)strm->last_mid_delivered, 531 control->sinfo_tsn, 532 control->sinfo_stream, 533 (uint16_t)control->mid); 534 } 535 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 536 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 537 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 538 *abort_flag = 1; 539 return; 540 541 } 542 queue_needed = 1; 543 asoc->size_on_all_streams += control->length; 544 sctp_ucount_incr(asoc->cnt_on_all_streams); 545 nxt_todel = strm->last_mid_delivered + 1; 546 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 547 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 548 struct socket *so; 549 550 so = SCTP_INP_SO(stcb->sctp_ep); 551 atomic_add_int(&stcb->asoc.refcnt, 1); 552 SCTP_TCB_UNLOCK(stcb); 553 SCTP_SOCKET_LOCK(so, 1); 554 SCTP_TCB_LOCK(stcb); 555 atomic_subtract_int(&stcb->asoc.refcnt, 1); 556 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 557 SCTP_SOCKET_UNLOCK(so, 1); 558 return; 559 } 560 #endif 561 /* can be delivered right away? */ 562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 563 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 564 } 565 /* EY it wont be queued if it could be delivered directly */ 566 queue_needed = 0; 567 if (asoc->size_on_all_streams >= control->length) { 568 asoc->size_on_all_streams -= control->length; 569 } else { 570 #ifdef INVARIANTS 571 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 572 #else 573 asoc->size_on_all_streams = 0; 574 #endif 575 } 576 sctp_ucount_decr(asoc->cnt_on_all_streams); 577 strm->last_mid_delivered++; 578 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 579 sctp_add_to_readq(stcb->sctp_ep, stcb, 580 control, 581 &stcb->sctp_socket->so_rcv, 1, 582 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 583 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 584 /* all delivered */ 585 nxt_todel = strm->last_mid_delivered + 1; 586 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 587 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 588 if (control->on_strm_q == SCTP_ON_ORDERED) { 589 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 590 if (asoc->size_on_all_streams >= control->length) { 591 asoc->size_on_all_streams -= control->length; 592 } else { 593 #ifdef INVARIANTS 594 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 595 #else 596 asoc->size_on_all_streams = 0; 597 #endif 598 } 599 sctp_ucount_decr(asoc->cnt_on_all_streams); 600 #ifdef INVARIANTS 601 } else { 602 panic("Huh control: %p is on_strm_q: %d", 603 control, control->on_strm_q); 604 #endif 605 } 606 control->on_strm_q = 0; 607 strm->last_mid_delivered++; 608 /* 609 * We ignore the return of deliver_data here 610 * since we always can hold the chunk on the 611 * d-queue. And we have a finite number that 612 * can be delivered from the strq. 613 */ 614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 615 sctp_log_strm_del(control, NULL, 616 SCTP_STR_LOG_FROM_IMMED_DEL); 617 } 618 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 619 sctp_add_to_readq(stcb->sctp_ep, stcb, 620 control, 621 &stcb->sctp_socket->so_rcv, 1, 622 SCTP_READ_LOCK_NOT_HELD, 623 SCTP_SO_LOCKED); 624 continue; 625 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 626 *need_reasm = 1; 627 } 628 break; 629 } 630 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 631 SCTP_SOCKET_UNLOCK(so, 1); 632 #endif 633 } 634 if (queue_needed) { 635 /* 636 * Ok, we did not deliver this guy, find the correct place 637 * to put it on the queue. 638 */ 639 if (sctp_place_control_in_stream(strm, asoc, control)) { 640 snprintf(msg, sizeof(msg), 641 "Queue to str MID: %u duplicate", 642 control->mid); 643 sctp_clean_up_control(stcb, control); 644 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 645 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 646 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 647 *abort_flag = 1; 648 } 649 } 650 } 651 652 653 static void 654 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 655 { 656 struct mbuf *m, *prev = NULL; 657 struct sctp_tcb *stcb; 658 659 stcb = control->stcb; 660 control->held_length = 0; 661 control->length = 0; 662 m = control->data; 663 while (m) { 664 if (SCTP_BUF_LEN(m) == 0) { 665 /* Skip mbufs with NO length */ 666 if (prev == NULL) { 667 /* First one */ 668 control->data = sctp_m_free(m); 669 m = control->data; 670 } else { 671 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 672 m = SCTP_BUF_NEXT(prev); 673 } 674 if (m == NULL) { 675 control->tail_mbuf = prev; 676 } 677 continue; 678 } 679 prev = m; 680 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 681 if (control->on_read_q) { 682 /* 683 * On read queue so we must increment the SB stuff, 684 * we assume caller has done any locks of SB. 685 */ 686 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 687 } 688 m = SCTP_BUF_NEXT(m); 689 } 690 if (prev) { 691 control->tail_mbuf = prev; 692 } 693 } 694 695 static void 696 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 697 { 698 struct mbuf *prev = NULL; 699 struct sctp_tcb *stcb; 700 701 stcb = control->stcb; 702 if (stcb == NULL) { 703 #ifdef INVARIANTS 704 panic("Control broken"); 705 #else 706 return; 707 #endif 708 } 709 if (control->tail_mbuf == NULL) { 710 /* TSNH */ 711 control->data = m; 712 sctp_setup_tail_pointer(control); 713 return; 714 } 715 control->tail_mbuf->m_next = m; 716 while (m) { 717 if (SCTP_BUF_LEN(m) == 0) { 718 /* Skip mbufs with NO length */ 719 if (prev == NULL) { 720 /* First one */ 721 control->tail_mbuf->m_next = sctp_m_free(m); 722 m = control->tail_mbuf->m_next; 723 } else { 724 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 725 m = SCTP_BUF_NEXT(prev); 726 } 727 if (m == NULL) { 728 control->tail_mbuf = prev; 729 } 730 continue; 731 } 732 prev = m; 733 if (control->on_read_q) { 734 /* 735 * On read queue so we must increment the SB stuff, 736 * we assume caller has done any locks of SB. 737 */ 738 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 739 } 740 *added += SCTP_BUF_LEN(m); 741 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 742 m = SCTP_BUF_NEXT(m); 743 } 744 if (prev) { 745 control->tail_mbuf = prev; 746 } 747 } 748 749 static void 750 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 751 { 752 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 753 nc->sinfo_stream = control->sinfo_stream; 754 nc->mid = control->mid; 755 TAILQ_INIT(&nc->reasm); 756 nc->top_fsn = control->top_fsn; 757 nc->mid = control->mid; 758 nc->sinfo_flags = control->sinfo_flags; 759 nc->sinfo_ppid = control->sinfo_ppid; 760 nc->sinfo_context = control->sinfo_context; 761 nc->fsn_included = 0xffffffff; 762 nc->sinfo_tsn = control->sinfo_tsn; 763 nc->sinfo_cumtsn = control->sinfo_cumtsn; 764 nc->sinfo_assoc_id = control->sinfo_assoc_id; 765 nc->whoFrom = control->whoFrom; 766 atomic_add_int(&nc->whoFrom->ref_count, 1); 767 nc->stcb = control->stcb; 768 nc->port_from = control->port_from; 769 } 770 771 static void 772 sctp_reset_a_control(struct sctp_queued_to_read *control, 773 struct sctp_inpcb *inp, uint32_t tsn) 774 { 775 control->fsn_included = tsn; 776 if (control->on_read_q) { 777 /* 778 * We have to purge it from there, hopefully this will work 779 * :-) 780 */ 781 TAILQ_REMOVE(&inp->read_queue, control, next); 782 control->on_read_q = 0; 783 } 784 } 785 786 static int 787 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 788 struct sctp_association *asoc, 789 struct sctp_stream_in *strm, 790 struct sctp_queued_to_read *control, 791 uint32_t pd_point, 792 int inp_read_lock_held) 793 { 794 /* 795 * Special handling for the old un-ordered data chunk. All the 796 * chunks/TSN's go to mid 0. So we have to do the old style watching 797 * to see if we have it all. If you return one, no other control 798 * entries on the un-ordered queue will be looked at. In theory 799 * there should be no others entries in reality, unless the guy is 800 * sending both unordered NDATA and unordered DATA... 801 */ 802 struct sctp_tmit_chunk *chk, *lchk, *tchk; 803 uint32_t fsn; 804 struct sctp_queued_to_read *nc; 805 int cnt_added; 806 807 if (control->first_frag_seen == 0) { 808 /* Nothing we can do, we have not seen the first piece yet */ 809 return (1); 810 } 811 /* Collapse any we can */ 812 cnt_added = 0; 813 restart: 814 fsn = control->fsn_included + 1; 815 /* Now what can we add? */ 816 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 817 if (chk->rec.data.fsn == fsn) { 818 /* Ok lets add it */ 819 sctp_alloc_a_readq(stcb, nc); 820 if (nc == NULL) { 821 break; 822 } 823 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 824 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 825 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD); 826 fsn++; 827 cnt_added++; 828 chk = NULL; 829 if (control->end_added) { 830 /* We are done */ 831 if (!TAILQ_EMPTY(&control->reasm)) { 832 /* 833 * Ok we have to move anything left 834 * on the control queue to a new 835 * control. 836 */ 837 sctp_build_readq_entry_from_ctl(nc, control); 838 tchk = TAILQ_FIRST(&control->reasm); 839 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 840 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 841 if (asoc->size_on_reasm_queue >= tchk->send_size) { 842 asoc->size_on_reasm_queue -= tchk->send_size; 843 } else { 844 #ifdef INVARIANTS 845 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 846 #else 847 asoc->size_on_reasm_queue = 0; 848 #endif 849 } 850 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 851 nc->first_frag_seen = 1; 852 nc->fsn_included = tchk->rec.data.fsn; 853 nc->data = tchk->data; 854 nc->sinfo_ppid = tchk->rec.data.ppid; 855 nc->sinfo_tsn = tchk->rec.data.tsn; 856 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 857 tchk->data = NULL; 858 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 859 sctp_setup_tail_pointer(nc); 860 tchk = TAILQ_FIRST(&control->reasm); 861 } 862 /* Spin the rest onto the queue */ 863 while (tchk) { 864 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 865 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 866 tchk = TAILQ_FIRST(&control->reasm); 867 } 868 /* 869 * Now lets add it to the queue 870 * after removing control 871 */ 872 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 873 nc->on_strm_q = SCTP_ON_UNORDERED; 874 if (control->on_strm_q) { 875 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 876 control->on_strm_q = 0; 877 } 878 } 879 if (control->pdapi_started) { 880 strm->pd_api_started = 0; 881 control->pdapi_started = 0; 882 } 883 if (control->on_strm_q) { 884 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 885 control->on_strm_q = 0; 886 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 887 } 888 if (control->on_read_q == 0) { 889 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 890 &stcb->sctp_socket->so_rcv, control->end_added, 891 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 892 } 893 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 894 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 895 /* 896 * Switch to the new guy and 897 * continue 898 */ 899 control = nc; 900 goto restart; 901 } else { 902 if (nc->on_strm_q == 0) { 903 sctp_free_a_readq(stcb, nc); 904 } 905 } 906 return (1); 907 } else { 908 sctp_free_a_readq(stcb, nc); 909 } 910 } else { 911 /* Can't add more */ 912 break; 913 } 914 } 915 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 916 strm->pd_api_started = 1; 917 control->pdapi_started = 1; 918 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 919 &stcb->sctp_socket->so_rcv, control->end_added, 920 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 921 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 922 return (0); 923 } else { 924 return (1); 925 } 926 } 927 928 static void 929 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 930 struct sctp_association *asoc, 931 struct sctp_queued_to_read *control, 932 struct sctp_tmit_chunk *chk, 933 int *abort_flag) 934 { 935 struct sctp_tmit_chunk *at; 936 int inserted; 937 938 /* 939 * Here we need to place the chunk into the control structure sorted 940 * in the correct order. 941 */ 942 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 943 /* Its the very first one. */ 944 SCTPDBG(SCTP_DEBUG_XXX, 945 "chunk is a first fsn: %u becomes fsn_included\n", 946 chk->rec.data.fsn); 947 if (control->first_frag_seen) { 948 /* 949 * In old un-ordered we can reassembly on one 950 * control multiple messages. As long as the next 951 * FIRST is greater then the old first (TSN i.e. FSN 952 * wise) 953 */ 954 struct mbuf *tdata; 955 uint32_t tmp; 956 957 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 958 /* 959 * Easy way the start of a new guy beyond 960 * the lowest 961 */ 962 goto place_chunk; 963 } 964 if ((chk->rec.data.fsn == control->fsn_included) || 965 (control->pdapi_started)) { 966 /* 967 * Ok this should not happen, if it does we 968 * started the pd-api on the higher TSN 969 * (since the equals part is a TSN failure 970 * it must be that). 971 * 972 * We are completly hosed in that case since 973 * I have no way to recover. This really 974 * will only happen if we can get more TSN's 975 * higher before the pd-api-point. 976 */ 977 sctp_abort_in_reasm(stcb, control, chk, 978 abort_flag, 979 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 980 981 return; 982 } 983 /* 984 * Ok we have two firsts and the one we just got is 985 * smaller than the one we previously placed.. yuck! 986 * We must swap them out. 987 */ 988 /* swap the mbufs */ 989 tdata = control->data; 990 control->data = chk->data; 991 chk->data = tdata; 992 /* Save the lengths */ 993 chk->send_size = control->length; 994 /* Recompute length of control and tail pointer */ 995 sctp_setup_tail_pointer(control); 996 /* Fix the FSN included */ 997 tmp = control->fsn_included; 998 control->fsn_included = chk->rec.data.fsn; 999 chk->rec.data.fsn = tmp; 1000 /* Fix the TSN included */ 1001 tmp = control->sinfo_tsn; 1002 control->sinfo_tsn = chk->rec.data.tsn; 1003 chk->rec.data.tsn = tmp; 1004 /* Fix the PPID included */ 1005 tmp = control->sinfo_ppid; 1006 control->sinfo_ppid = chk->rec.data.ppid; 1007 chk->rec.data.ppid = tmp; 1008 /* Fix tail pointer */ 1009 goto place_chunk; 1010 } 1011 control->first_frag_seen = 1; 1012 control->fsn_included = chk->rec.data.fsn; 1013 control->top_fsn = chk->rec.data.fsn; 1014 control->sinfo_tsn = chk->rec.data.tsn; 1015 control->sinfo_ppid = chk->rec.data.ppid; 1016 control->data = chk->data; 1017 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1018 chk->data = NULL; 1019 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1020 sctp_setup_tail_pointer(control); 1021 return; 1022 } 1023 place_chunk: 1024 inserted = 0; 1025 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1026 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1027 /* 1028 * This one in queue is bigger than the new one, 1029 * insert the new one before at. 1030 */ 1031 asoc->size_on_reasm_queue += chk->send_size; 1032 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1033 inserted = 1; 1034 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1035 break; 1036 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1037 /* 1038 * They sent a duplicate fsn number. This really 1039 * should not happen since the FSN is a TSN and it 1040 * should have been dropped earlier. 1041 */ 1042 sctp_abort_in_reasm(stcb, control, chk, 1043 abort_flag, 1044 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1045 return; 1046 } 1047 } 1048 if (inserted == 0) { 1049 /* Its at the end */ 1050 asoc->size_on_reasm_queue += chk->send_size; 1051 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1052 control->top_fsn = chk->rec.data.fsn; 1053 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1054 } 1055 } 1056 1057 static int 1058 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1059 struct sctp_stream_in *strm, int inp_read_lock_held) 1060 { 1061 /* 1062 * Given a stream, strm, see if any of the SSN's on it that are 1063 * fragmented are ready to deliver. If so go ahead and place them on 1064 * the read queue. In so placing if we have hit the end, then we 1065 * need to remove them from the stream's queue. 1066 */ 1067 struct sctp_queued_to_read *control, *nctl = NULL; 1068 uint32_t next_to_del; 1069 uint32_t pd_point; 1070 int ret = 0; 1071 1072 if (stcb->sctp_socket) { 1073 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1074 stcb->sctp_ep->partial_delivery_point); 1075 } else { 1076 pd_point = stcb->sctp_ep->partial_delivery_point; 1077 } 1078 control = TAILQ_FIRST(&strm->uno_inqueue); 1079 1080 if ((control != NULL) && 1081 (asoc->idata_supported == 0)) { 1082 /* Special handling needed for "old" data format */ 1083 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1084 goto done_un; 1085 } 1086 } 1087 if (strm->pd_api_started) { 1088 /* Can't add more */ 1089 return (0); 1090 } 1091 while (control) { 1092 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1093 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1094 nctl = TAILQ_NEXT(control, next_instrm); 1095 if (control->end_added) { 1096 /* We just put the last bit on */ 1097 if (control->on_strm_q) { 1098 #ifdef INVARIANTS 1099 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1100 panic("Huh control: %p on_q: %d -- not unordered?", 1101 control, control->on_strm_q); 1102 } 1103 #endif 1104 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1105 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1106 control->on_strm_q = 0; 1107 } 1108 if (control->on_read_q == 0) { 1109 sctp_add_to_readq(stcb->sctp_ep, stcb, 1110 control, 1111 &stcb->sctp_socket->so_rcv, control->end_added, 1112 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1113 } 1114 } else { 1115 /* Can we do a PD-API for this un-ordered guy? */ 1116 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1117 strm->pd_api_started = 1; 1118 control->pdapi_started = 1; 1119 sctp_add_to_readq(stcb->sctp_ep, stcb, 1120 control, 1121 &stcb->sctp_socket->so_rcv, control->end_added, 1122 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1123 1124 break; 1125 } 1126 } 1127 control = nctl; 1128 } 1129 done_un: 1130 control = TAILQ_FIRST(&strm->inqueue); 1131 if (strm->pd_api_started) { 1132 /* Can't add more */ 1133 return (0); 1134 } 1135 if (control == NULL) { 1136 return (ret); 1137 } 1138 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1139 /* 1140 * Ok the guy at the top was being partially delivered 1141 * completed, so we remove it. Note the pd_api flag was 1142 * taken off when the chunk was merged on in 1143 * sctp_queue_data_for_reasm below. 1144 */ 1145 nctl = TAILQ_NEXT(control, next_instrm); 1146 SCTPDBG(SCTP_DEBUG_XXX, 1147 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1148 control, control->end_added, control->mid, 1149 control->top_fsn, control->fsn_included, 1150 strm->last_mid_delivered); 1151 if (control->end_added) { 1152 if (control->on_strm_q) { 1153 #ifdef INVARIANTS 1154 if (control->on_strm_q != SCTP_ON_ORDERED) { 1155 panic("Huh control: %p on_q: %d -- not ordered?", 1156 control, control->on_strm_q); 1157 } 1158 #endif 1159 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1160 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1161 if (asoc->size_on_all_streams >= control->length) { 1162 asoc->size_on_all_streams -= control->length; 1163 } else { 1164 #ifdef INVARIANTS 1165 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1166 #else 1167 asoc->size_on_all_streams = 0; 1168 #endif 1169 } 1170 sctp_ucount_decr(asoc->cnt_on_all_streams); 1171 control->on_strm_q = 0; 1172 } 1173 if (strm->pd_api_started && control->pdapi_started) { 1174 control->pdapi_started = 0; 1175 strm->pd_api_started = 0; 1176 } 1177 if (control->on_read_q == 0) { 1178 sctp_add_to_readq(stcb->sctp_ep, stcb, 1179 control, 1180 &stcb->sctp_socket->so_rcv, control->end_added, 1181 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1182 } 1183 control = nctl; 1184 } 1185 } 1186 if (strm->pd_api_started) { 1187 /* 1188 * Can't add more must have gotten an un-ordered above being 1189 * partially delivered. 1190 */ 1191 return (0); 1192 } 1193 deliver_more: 1194 next_to_del = strm->last_mid_delivered + 1; 1195 if (control) { 1196 SCTPDBG(SCTP_DEBUG_XXX, 1197 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1198 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1199 next_to_del); 1200 nctl = TAILQ_NEXT(control, next_instrm); 1201 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1202 (control->first_frag_seen)) { 1203 int done; 1204 1205 /* Ok we can deliver it onto the stream. */ 1206 if (control->end_added) { 1207 /* We are done with it afterwards */ 1208 if (control->on_strm_q) { 1209 #ifdef INVARIANTS 1210 if (control->on_strm_q != SCTP_ON_ORDERED) { 1211 panic("Huh control: %p on_q: %d -- not ordered?", 1212 control, control->on_strm_q); 1213 } 1214 #endif 1215 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1216 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1217 if (asoc->size_on_all_streams >= control->length) { 1218 asoc->size_on_all_streams -= control->length; 1219 } else { 1220 #ifdef INVARIANTS 1221 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1222 #else 1223 asoc->size_on_all_streams = 0; 1224 #endif 1225 } 1226 sctp_ucount_decr(asoc->cnt_on_all_streams); 1227 control->on_strm_q = 0; 1228 } 1229 ret++; 1230 } 1231 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1232 /* 1233 * A singleton now slipping through - mark 1234 * it non-revokable too 1235 */ 1236 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1237 } else if (control->end_added == 0) { 1238 /* 1239 * Check if we can defer adding until its 1240 * all there 1241 */ 1242 if ((control->length < pd_point) || (strm->pd_api_started)) { 1243 /* 1244 * Don't need it or cannot add more 1245 * (one being delivered that way) 1246 */ 1247 goto out; 1248 } 1249 } 1250 done = (control->end_added) && (control->last_frag_seen); 1251 if (control->on_read_q == 0) { 1252 if (!done) { 1253 if (asoc->size_on_all_streams >= control->length) { 1254 asoc->size_on_all_streams -= control->length; 1255 } else { 1256 #ifdef INVARIANTS 1257 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1258 #else 1259 asoc->size_on_all_streams = 0; 1260 #endif 1261 } 1262 strm->pd_api_started = 1; 1263 control->pdapi_started = 1; 1264 } 1265 sctp_add_to_readq(stcb->sctp_ep, stcb, 1266 control, 1267 &stcb->sctp_socket->so_rcv, control->end_added, 1268 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1269 } 1270 strm->last_mid_delivered = next_to_del; 1271 if (done) { 1272 control = nctl; 1273 goto deliver_more; 1274 } 1275 } 1276 } 1277 out: 1278 return (ret); 1279 } 1280 1281 1282 uint32_t 1283 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1284 struct sctp_stream_in *strm, 1285 struct sctp_tcb *stcb, struct sctp_association *asoc, 1286 struct sctp_tmit_chunk *chk, int hold_rlock) 1287 { 1288 /* 1289 * Given a control and a chunk, merge the data from the chk onto the 1290 * control and free up the chunk resources. 1291 */ 1292 uint32_t added = 0; 1293 int i_locked = 0; 1294 1295 if (control->on_read_q && (hold_rlock == 0)) { 1296 /* 1297 * Its being pd-api'd so we must do some locks. 1298 */ 1299 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1300 i_locked = 1; 1301 } 1302 if (control->data == NULL) { 1303 control->data = chk->data; 1304 sctp_setup_tail_pointer(control); 1305 } else { 1306 sctp_add_to_tail_pointer(control, chk->data, &added); 1307 } 1308 control->fsn_included = chk->rec.data.fsn; 1309 asoc->size_on_reasm_queue -= chk->send_size; 1310 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1311 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1312 chk->data = NULL; 1313 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1314 control->first_frag_seen = 1; 1315 control->sinfo_tsn = chk->rec.data.tsn; 1316 control->sinfo_ppid = chk->rec.data.ppid; 1317 } 1318 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1319 /* Its complete */ 1320 if ((control->on_strm_q) && (control->on_read_q)) { 1321 if (control->pdapi_started) { 1322 control->pdapi_started = 0; 1323 strm->pd_api_started = 0; 1324 } 1325 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1326 /* Unordered */ 1327 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1328 control->on_strm_q = 0; 1329 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1330 /* Ordered */ 1331 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1332 /* 1333 * Don't need to decrement 1334 * size_on_all_streams, since control is on 1335 * the read queue. 1336 */ 1337 sctp_ucount_decr(asoc->cnt_on_all_streams); 1338 control->on_strm_q = 0; 1339 #ifdef INVARIANTS 1340 } else if (control->on_strm_q) { 1341 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1342 control->on_strm_q); 1343 #endif 1344 } 1345 } 1346 control->end_added = 1; 1347 control->last_frag_seen = 1; 1348 } 1349 if (i_locked) { 1350 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1351 } 1352 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1353 return (added); 1354 } 1355 1356 /* 1357 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1358 * queue, see if anthing can be delivered. If so pull it off (or as much as 1359 * we can. If we run out of space then we must dump what we can and set the 1360 * appropriate flag to say we queued what we could. 1361 */ 1362 static void 1363 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1364 struct sctp_queued_to_read *control, 1365 struct sctp_tmit_chunk *chk, 1366 int created_control, 1367 int *abort_flag, uint32_t tsn) 1368 { 1369 uint32_t next_fsn; 1370 struct sctp_tmit_chunk *at, *nat; 1371 struct sctp_stream_in *strm; 1372 int do_wakeup, unordered; 1373 uint32_t lenadded; 1374 1375 strm = &asoc->strmin[control->sinfo_stream]; 1376 /* 1377 * For old un-ordered data chunks. 1378 */ 1379 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1380 unordered = 1; 1381 } else { 1382 unordered = 0; 1383 } 1384 /* Must be added to the stream-in queue */ 1385 if (created_control) { 1386 if (unordered == 0) { 1387 sctp_ucount_incr(asoc->cnt_on_all_streams); 1388 } 1389 if (sctp_place_control_in_stream(strm, asoc, control)) { 1390 /* Duplicate SSN? */ 1391 sctp_abort_in_reasm(stcb, control, chk, 1392 abort_flag, 1393 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1394 sctp_clean_up_control(stcb, control); 1395 return; 1396 } 1397 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1398 /* 1399 * Ok we created this control and now lets validate 1400 * that its legal i.e. there is a B bit set, if not 1401 * and we have up to the cum-ack then its invalid. 1402 */ 1403 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1404 sctp_abort_in_reasm(stcb, control, chk, 1405 abort_flag, 1406 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1407 return; 1408 } 1409 } 1410 } 1411 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1412 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1413 return; 1414 } 1415 /* 1416 * Ok we must queue the chunk into the reasembly portion: o if its 1417 * the first it goes to the control mbuf. o if its not first but the 1418 * next in sequence it goes to the control, and each succeeding one 1419 * in order also goes. o if its not in order we place it on the list 1420 * in its place. 1421 */ 1422 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1423 /* Its the very first one. */ 1424 SCTPDBG(SCTP_DEBUG_XXX, 1425 "chunk is a first fsn: %u becomes fsn_included\n", 1426 chk->rec.data.fsn); 1427 if (control->first_frag_seen) { 1428 /* 1429 * Error on senders part, they either sent us two 1430 * data chunks with FIRST, or they sent two 1431 * un-ordered chunks that were fragmented at the 1432 * same time in the same stream. 1433 */ 1434 sctp_abort_in_reasm(stcb, control, chk, 1435 abort_flag, 1436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1437 return; 1438 } 1439 control->first_frag_seen = 1; 1440 control->sinfo_ppid = chk->rec.data.ppid; 1441 control->sinfo_tsn = chk->rec.data.tsn; 1442 control->fsn_included = chk->rec.data.fsn; 1443 control->data = chk->data; 1444 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1445 chk->data = NULL; 1446 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1447 sctp_setup_tail_pointer(control); 1448 asoc->size_on_all_streams += control->length; 1449 } else { 1450 /* Place the chunk in our list */ 1451 int inserted = 0; 1452 1453 if (control->last_frag_seen == 0) { 1454 /* Still willing to raise highest FSN seen */ 1455 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1456 SCTPDBG(SCTP_DEBUG_XXX, 1457 "We have a new top_fsn: %u\n", 1458 chk->rec.data.fsn); 1459 control->top_fsn = chk->rec.data.fsn; 1460 } 1461 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1462 SCTPDBG(SCTP_DEBUG_XXX, 1463 "The last fsn is now in place fsn: %u\n", 1464 chk->rec.data.fsn); 1465 control->last_frag_seen = 1; 1466 } 1467 if (asoc->idata_supported || control->first_frag_seen) { 1468 /* 1469 * For IDATA we always check since we know 1470 * that the first fragment is 0. For old 1471 * DATA we have to receive the first before 1472 * we know the first FSN (which is the TSN). 1473 */ 1474 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1475 /* 1476 * We have already delivered up to 1477 * this so its a dup 1478 */ 1479 sctp_abort_in_reasm(stcb, control, chk, 1480 abort_flag, 1481 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1482 return; 1483 } 1484 } 1485 } else { 1486 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1487 /* Second last? huh? */ 1488 SCTPDBG(SCTP_DEBUG_XXX, 1489 "Duplicate last fsn: %u (top: %u) -- abort\n", 1490 chk->rec.data.fsn, control->top_fsn); 1491 sctp_abort_in_reasm(stcb, control, 1492 chk, abort_flag, 1493 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1494 return; 1495 } 1496 if (asoc->idata_supported || control->first_frag_seen) { 1497 /* 1498 * For IDATA we always check since we know 1499 * that the first fragment is 0. For old 1500 * DATA we have to receive the first before 1501 * we know the first FSN (which is the TSN). 1502 */ 1503 1504 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1505 /* 1506 * We have already delivered up to 1507 * this so its a dup 1508 */ 1509 SCTPDBG(SCTP_DEBUG_XXX, 1510 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1511 chk->rec.data.fsn, control->fsn_included); 1512 sctp_abort_in_reasm(stcb, control, chk, 1513 abort_flag, 1514 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1515 return; 1516 } 1517 } 1518 /* 1519 * validate not beyond top FSN if we have seen last 1520 * one 1521 */ 1522 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1523 SCTPDBG(SCTP_DEBUG_XXX, 1524 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1525 chk->rec.data.fsn, 1526 control->top_fsn); 1527 sctp_abort_in_reasm(stcb, control, chk, 1528 abort_flag, 1529 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1530 return; 1531 } 1532 } 1533 /* 1534 * If we reach here, we need to place the new chunk in the 1535 * reassembly for this control. 1536 */ 1537 SCTPDBG(SCTP_DEBUG_XXX, 1538 "chunk is a not first fsn: %u needs to be inserted\n", 1539 chk->rec.data.fsn); 1540 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1541 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1542 /* 1543 * This one in queue is bigger than the new 1544 * one, insert the new one before at. 1545 */ 1546 SCTPDBG(SCTP_DEBUG_XXX, 1547 "Insert it before fsn: %u\n", 1548 at->rec.data.fsn); 1549 asoc->size_on_reasm_queue += chk->send_size; 1550 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1551 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1552 inserted = 1; 1553 break; 1554 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1555 /* 1556 * Gak, He sent me a duplicate str seq 1557 * number 1558 */ 1559 /* 1560 * foo bar, I guess I will just free this 1561 * new guy, should we abort too? FIX ME 1562 * MAYBE? Or it COULD be that the SSN's have 1563 * wrapped. Maybe I should compare to TSN 1564 * somehow... sigh for now just blow away 1565 * the chunk! 1566 */ 1567 SCTPDBG(SCTP_DEBUG_XXX, 1568 "Duplicate to fsn: %u -- abort\n", 1569 at->rec.data.fsn); 1570 sctp_abort_in_reasm(stcb, control, 1571 chk, abort_flag, 1572 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1573 return; 1574 } 1575 } 1576 if (inserted == 0) { 1577 /* Goes on the end */ 1578 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1579 chk->rec.data.fsn); 1580 asoc->size_on_reasm_queue += chk->send_size; 1581 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1582 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1583 } 1584 } 1585 /* 1586 * Ok lets see if we can suck any up into the control structure that 1587 * are in seq if it makes sense. 1588 */ 1589 do_wakeup = 0; 1590 /* 1591 * If the first fragment has not been seen there is no sense in 1592 * looking. 1593 */ 1594 if (control->first_frag_seen) { 1595 next_fsn = control->fsn_included + 1; 1596 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1597 if (at->rec.data.fsn == next_fsn) { 1598 /* We can add this one now to the control */ 1599 SCTPDBG(SCTP_DEBUG_XXX, 1600 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1601 control, at, 1602 at->rec.data.fsn, 1603 next_fsn, control->fsn_included); 1604 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1605 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1606 if (control->on_read_q) { 1607 do_wakeup = 1; 1608 } else { 1609 /* 1610 * We only add to the 1611 * size-on-all-streams if its not on 1612 * the read q. The read q flag will 1613 * cause a sballoc so its accounted 1614 * for there. 1615 */ 1616 asoc->size_on_all_streams += lenadded; 1617 } 1618 next_fsn++; 1619 if (control->end_added && control->pdapi_started) { 1620 if (strm->pd_api_started) { 1621 strm->pd_api_started = 0; 1622 control->pdapi_started = 0; 1623 } 1624 if (control->on_read_q == 0) { 1625 sctp_add_to_readq(stcb->sctp_ep, stcb, 1626 control, 1627 &stcb->sctp_socket->so_rcv, control->end_added, 1628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1629 } 1630 break; 1631 } 1632 } else { 1633 break; 1634 } 1635 } 1636 } 1637 if (do_wakeup) { 1638 /* Need to wakeup the reader */ 1639 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1640 } 1641 } 1642 1643 static struct sctp_queued_to_read * 1644 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1645 { 1646 struct sctp_queued_to_read *control; 1647 1648 if (ordered) { 1649 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1650 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1651 break; 1652 } 1653 } 1654 } else { 1655 if (idata_supported) { 1656 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1657 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1658 break; 1659 } 1660 } 1661 } else { 1662 control = TAILQ_FIRST(&strm->uno_inqueue); 1663 } 1664 } 1665 return (control); 1666 } 1667 1668 static int 1669 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1670 struct mbuf **m, int offset, int chk_length, 1671 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1672 int *break_flag, int last_chunk, uint8_t chk_type) 1673 { 1674 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ 1675 uint32_t tsn, fsn, gap, mid; 1676 struct mbuf *dmbuf; 1677 int the_len; 1678 int need_reasm_check = 0; 1679 uint16_t sid; 1680 struct mbuf *op_err; 1681 char msg[SCTP_DIAG_INFO_LEN]; 1682 struct sctp_queued_to_read *control, *ncontrol; 1683 uint32_t ppid; 1684 uint8_t chk_flags; 1685 struct sctp_stream_reset_list *liste; 1686 int ordered; 1687 size_t clen; 1688 int created_control = 0; 1689 1690 if (chk_type == SCTP_IDATA) { 1691 struct sctp_idata_chunk *chunk, chunk_buf; 1692 1693 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1694 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1695 chk_flags = chunk->ch.chunk_flags; 1696 clen = sizeof(struct sctp_idata_chunk); 1697 tsn = ntohl(chunk->dp.tsn); 1698 sid = ntohs(chunk->dp.sid); 1699 mid = ntohl(chunk->dp.mid); 1700 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1701 fsn = 0; 1702 ppid = chunk->dp.ppid_fsn.ppid; 1703 } else { 1704 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1705 ppid = 0xffffffff; /* Use as an invalid value. */ 1706 } 1707 } else { 1708 struct sctp_data_chunk *chunk, chunk_buf; 1709 1710 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1711 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1712 chk_flags = chunk->ch.chunk_flags; 1713 clen = sizeof(struct sctp_data_chunk); 1714 tsn = ntohl(chunk->dp.tsn); 1715 sid = ntohs(chunk->dp.sid); 1716 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1717 fsn = tsn; 1718 ppid = chunk->dp.ppid; 1719 } 1720 if ((size_t)chk_length == clen) { 1721 /* 1722 * Need to send an abort since we had a empty data chunk. 1723 */ 1724 op_err = sctp_generate_no_user_data_cause(tsn); 1725 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1726 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1727 *abort_flag = 1; 1728 return (0); 1729 } 1730 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1731 asoc->send_sack = 1; 1732 } 1733 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1735 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1736 } 1737 if (stcb == NULL) { 1738 return (0); 1739 } 1740 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1741 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1742 /* It is a duplicate */ 1743 SCTP_STAT_INCR(sctps_recvdupdata); 1744 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1745 /* Record a dup for the next outbound sack */ 1746 asoc->dup_tsns[asoc->numduptsns] = tsn; 1747 asoc->numduptsns++; 1748 } 1749 asoc->send_sack = 1; 1750 return (0); 1751 } 1752 /* Calculate the number of TSN's between the base and this TSN */ 1753 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1754 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1755 /* Can't hold the bit in the mapping at max array, toss it */ 1756 return (0); 1757 } 1758 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1759 SCTP_TCB_LOCK_ASSERT(stcb); 1760 if (sctp_expand_mapping_array(asoc, gap)) { 1761 /* Can't expand, drop it */ 1762 return (0); 1763 } 1764 } 1765 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1766 *high_tsn = tsn; 1767 } 1768 /* See if we have received this one already */ 1769 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1770 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1771 SCTP_STAT_INCR(sctps_recvdupdata); 1772 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1773 /* Record a dup for the next outbound sack */ 1774 asoc->dup_tsns[asoc->numduptsns] = tsn; 1775 asoc->numduptsns++; 1776 } 1777 asoc->send_sack = 1; 1778 return (0); 1779 } 1780 /* 1781 * Check to see about the GONE flag, duplicates would cause a sack 1782 * to be sent up above 1783 */ 1784 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1785 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1786 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1787 /* 1788 * wait a minute, this guy is gone, there is no longer a 1789 * receiver. Send peer an ABORT! 1790 */ 1791 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1792 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1793 *abort_flag = 1; 1794 return (0); 1795 } 1796 /* 1797 * Now before going further we see if there is room. If NOT then we 1798 * MAY let one through only IF this TSN is the one we are waiting 1799 * for on a partial delivery API. 1800 */ 1801 1802 /* Is the stream valid? */ 1803 if (sid >= asoc->streamincnt) { 1804 struct sctp_error_invalid_stream *cause; 1805 1806 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1807 0, M_NOWAIT, 1, MT_DATA); 1808 if (op_err != NULL) { 1809 /* add some space up front so prepend will work well */ 1810 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1811 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1812 /* 1813 * Error causes are just param's and this one has 1814 * two back to back phdr, one with the error type 1815 * and size, the other with the streamid and a rsvd 1816 */ 1817 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1818 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1819 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1820 cause->stream_id = htons(sid); 1821 cause->reserved = htons(0); 1822 sctp_queue_op_err(stcb, op_err); 1823 } 1824 SCTP_STAT_INCR(sctps_badsid); 1825 SCTP_TCB_LOCK_ASSERT(stcb); 1826 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1827 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1828 asoc->highest_tsn_inside_nr_map = tsn; 1829 } 1830 if (tsn == (asoc->cumulative_tsn + 1)) { 1831 /* Update cum-ack */ 1832 asoc->cumulative_tsn = tsn; 1833 } 1834 return (0); 1835 } 1836 /* 1837 * If its a fragmented message, lets see if we can find the control 1838 * on the reassembly queues. 1839 */ 1840 if ((chk_type == SCTP_IDATA) && 1841 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1842 (fsn == 0)) { 1843 /* 1844 * The first *must* be fsn 0, and other (middle/end) pieces 1845 * can *not* be fsn 0. XXX: This can happen in case of a 1846 * wrap around. Ignore is for now. 1847 */ 1848 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", 1849 mid, chk_flags); 1850 goto err_out; 1851 } 1852 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1853 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1854 chk_flags, control); 1855 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1856 /* See if we can find the re-assembly entity */ 1857 if (control != NULL) { 1858 /* We found something, does it belong? */ 1859 if (ordered && (mid != control->mid)) { 1860 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1861 err_out: 1862 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1863 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1864 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1865 *abort_flag = 1; 1866 return (0); 1867 } 1868 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1869 /* 1870 * We can't have a switched order with an 1871 * unordered chunk 1872 */ 1873 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1874 tsn); 1875 goto err_out; 1876 } 1877 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1878 /* 1879 * We can't have a switched unordered with a 1880 * ordered chunk 1881 */ 1882 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1883 tsn); 1884 goto err_out; 1885 } 1886 } 1887 } else { 1888 /* 1889 * Its a complete segment. Lets validate we don't have a 1890 * re-assembly going on with the same Stream/Seq (for 1891 * ordered) or in the same Stream for unordered. 1892 */ 1893 if (control != NULL) { 1894 if (ordered || asoc->idata_supported) { 1895 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1896 chk_flags, mid); 1897 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1898 goto err_out; 1899 } else { 1900 if ((tsn == control->fsn_included + 1) && 1901 (control->end_added == 0)) { 1902 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included); 1903 goto err_out; 1904 } else { 1905 control = NULL; 1906 } 1907 } 1908 } 1909 } 1910 /* now do the tests */ 1911 if (((asoc->cnt_on_all_streams + 1912 asoc->cnt_on_reasm_queue + 1913 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1914 (((int)asoc->my_rwnd) <= 0)) { 1915 /* 1916 * When we have NO room in the rwnd we check to make sure 1917 * the reader is doing its job... 1918 */ 1919 if (stcb->sctp_socket->so_rcv.sb_cc) { 1920 /* some to read, wake-up */ 1921 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1922 struct socket *so; 1923 1924 so = SCTP_INP_SO(stcb->sctp_ep); 1925 atomic_add_int(&stcb->asoc.refcnt, 1); 1926 SCTP_TCB_UNLOCK(stcb); 1927 SCTP_SOCKET_LOCK(so, 1); 1928 SCTP_TCB_LOCK(stcb); 1929 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1930 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1931 /* assoc was freed while we were unlocked */ 1932 SCTP_SOCKET_UNLOCK(so, 1); 1933 return (0); 1934 } 1935 #endif 1936 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1937 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1938 SCTP_SOCKET_UNLOCK(so, 1); 1939 #endif 1940 } 1941 /* now is it in the mapping array of what we have accepted? */ 1942 if (chk_type == SCTP_DATA) { 1943 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1944 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1945 /* Nope not in the valid range dump it */ 1946 dump_packet: 1947 sctp_set_rwnd(stcb, asoc); 1948 if ((asoc->cnt_on_all_streams + 1949 asoc->cnt_on_reasm_queue + 1950 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1951 SCTP_STAT_INCR(sctps_datadropchklmt); 1952 } else { 1953 SCTP_STAT_INCR(sctps_datadroprwnd); 1954 } 1955 *break_flag = 1; 1956 return (0); 1957 } 1958 } else { 1959 if (control == NULL) { 1960 goto dump_packet; 1961 } 1962 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1963 goto dump_packet; 1964 } 1965 } 1966 } 1967 #ifdef SCTP_ASOCLOG_OF_TSNS 1968 SCTP_TCB_LOCK_ASSERT(stcb); 1969 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1970 asoc->tsn_in_at = 0; 1971 asoc->tsn_in_wrapped = 1; 1972 } 1973 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1974 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 1975 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 1976 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1977 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1978 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1979 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1980 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1981 asoc->tsn_in_at++; 1982 #endif 1983 /* 1984 * Before we continue lets validate that we are not being fooled by 1985 * an evil attacker. We can only have Nk chunks based on our TSN 1986 * spread allowed by the mapping array N * 8 bits, so there is no 1987 * way our stream sequence numbers could have wrapped. We of course 1988 * only validate the FIRST fragment so the bit must be set. 1989 */ 1990 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 1991 (TAILQ_EMPTY(&asoc->resetHead)) && 1992 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 1993 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 1994 /* The incoming sseq is behind where we last delivered? */ 1995 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 1996 mid, asoc->strmin[sid].last_mid_delivered); 1997 1998 if (asoc->idata_supported) { 1999 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2000 asoc->strmin[sid].last_mid_delivered, 2001 tsn, 2002 sid, 2003 mid); 2004 } else { 2005 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2006 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2007 tsn, 2008 sid, 2009 (uint16_t)mid); 2010 } 2011 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2012 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 2013 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 2014 *abort_flag = 1; 2015 return (0); 2016 } 2017 if (chk_type == SCTP_IDATA) { 2018 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2019 } else { 2020 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2021 } 2022 if (last_chunk == 0) { 2023 if (chk_type == SCTP_IDATA) { 2024 dmbuf = SCTP_M_COPYM(*m, 2025 (offset + sizeof(struct sctp_idata_chunk)), 2026 the_len, M_NOWAIT); 2027 } else { 2028 dmbuf = SCTP_M_COPYM(*m, 2029 (offset + sizeof(struct sctp_data_chunk)), 2030 the_len, M_NOWAIT); 2031 } 2032 #ifdef SCTP_MBUF_LOGGING 2033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2034 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2035 } 2036 #endif 2037 } else { 2038 /* We can steal the last chunk */ 2039 int l_len; 2040 2041 dmbuf = *m; 2042 /* lop off the top part */ 2043 if (chk_type == SCTP_IDATA) { 2044 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2045 } else { 2046 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2047 } 2048 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2049 l_len = SCTP_BUF_LEN(dmbuf); 2050 } else { 2051 /* 2052 * need to count up the size hopefully does not hit 2053 * this to often :-0 2054 */ 2055 struct mbuf *lat; 2056 2057 l_len = 0; 2058 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2059 l_len += SCTP_BUF_LEN(lat); 2060 } 2061 } 2062 if (l_len > the_len) { 2063 /* Trim the end round bytes off too */ 2064 m_adj(dmbuf, -(l_len - the_len)); 2065 } 2066 } 2067 if (dmbuf == NULL) { 2068 SCTP_STAT_INCR(sctps_nomem); 2069 return (0); 2070 } 2071 /* 2072 * Now no matter what, we need a control, get one if we don't have 2073 * one (we may have gotten it above when we found the message was 2074 * fragmented 2075 */ 2076 if (control == NULL) { 2077 sctp_alloc_a_readq(stcb, control); 2078 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2079 ppid, 2080 sid, 2081 chk_flags, 2082 NULL, fsn, mid); 2083 if (control == NULL) { 2084 SCTP_STAT_INCR(sctps_nomem); 2085 return (0); 2086 } 2087 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2088 struct mbuf *mm; 2089 2090 control->data = dmbuf; 2091 for (mm = control->data; mm; mm = mm->m_next) { 2092 control->length += SCTP_BUF_LEN(mm); 2093 } 2094 control->tail_mbuf = NULL; 2095 control->end_added = 1; 2096 control->last_frag_seen = 1; 2097 control->first_frag_seen = 1; 2098 control->fsn_included = fsn; 2099 control->top_fsn = fsn; 2100 } 2101 created_control = 1; 2102 } 2103 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2104 chk_flags, ordered, mid, control); 2105 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2106 TAILQ_EMPTY(&asoc->resetHead) && 2107 ((ordered == 0) || 2108 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2109 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2110 /* Candidate for express delivery */ 2111 /* 2112 * Its not fragmented, No PD-API is up, Nothing in the 2113 * delivery queue, Its un-ordered OR ordered and the next to 2114 * deliver AND nothing else is stuck on the stream queue, 2115 * And there is room for it in the socket buffer. Lets just 2116 * stuff it up the buffer.... 2117 */ 2118 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2119 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2120 asoc->highest_tsn_inside_nr_map = tsn; 2121 } 2122 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2123 control, mid); 2124 2125 sctp_add_to_readq(stcb->sctp_ep, stcb, 2126 control, &stcb->sctp_socket->so_rcv, 2127 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2128 2129 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2130 /* for ordered, bump what we delivered */ 2131 asoc->strmin[sid].last_mid_delivered++; 2132 } 2133 SCTP_STAT_INCR(sctps_recvexpress); 2134 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2135 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2136 SCTP_STR_LOG_FROM_EXPRS_DEL); 2137 } 2138 control = NULL; 2139 goto finish_express_del; 2140 } 2141 /* Now will we need a chunk too? */ 2142 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2143 sctp_alloc_a_chunk(stcb, chk); 2144 if (chk == NULL) { 2145 /* No memory so we drop the chunk */ 2146 SCTP_STAT_INCR(sctps_nomem); 2147 if (last_chunk == 0) { 2148 /* we copied it, free the copy */ 2149 sctp_m_freem(dmbuf); 2150 } 2151 return (0); 2152 } 2153 chk->rec.data.tsn = tsn; 2154 chk->no_fr_allowed = 0; 2155 chk->rec.data.fsn = fsn; 2156 chk->rec.data.mid = mid; 2157 chk->rec.data.sid = sid; 2158 chk->rec.data.ppid = ppid; 2159 chk->rec.data.context = stcb->asoc.context; 2160 chk->rec.data.doing_fast_retransmit = 0; 2161 chk->rec.data.rcv_flags = chk_flags; 2162 chk->asoc = asoc; 2163 chk->send_size = the_len; 2164 chk->whoTo = net; 2165 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2166 chk, 2167 control, mid); 2168 atomic_add_int(&net->ref_count, 1); 2169 chk->data = dmbuf; 2170 } 2171 /* Set the appropriate TSN mark */ 2172 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2173 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2174 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2175 asoc->highest_tsn_inside_nr_map = tsn; 2176 } 2177 } else { 2178 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2179 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2180 asoc->highest_tsn_inside_map = tsn; 2181 } 2182 } 2183 /* Now is it complete (i.e. not fragmented)? */ 2184 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2185 /* 2186 * Special check for when streams are resetting. We could be 2187 * more smart about this and check the actual stream to see 2188 * if it is not being reset.. that way we would not create a 2189 * HOLB when amongst streams being reset and those not being 2190 * reset. 2191 * 2192 */ 2193 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2194 SCTP_TSN_GT(tsn, liste->tsn)) { 2195 /* 2196 * yep its past where we need to reset... go ahead 2197 * and queue it. 2198 */ 2199 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2200 /* first one on */ 2201 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2202 } else { 2203 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2204 unsigned char inserted = 0; 2205 2206 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2207 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2208 2209 continue; 2210 } else { 2211 /* found it */ 2212 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2213 inserted = 1; 2214 break; 2215 } 2216 } 2217 if (inserted == 0) { 2218 /* 2219 * must be put at end, use prevP 2220 * (all setup from loop) to setup 2221 * nextP. 2222 */ 2223 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2224 } 2225 } 2226 goto finish_express_del; 2227 } 2228 if (chk_flags & SCTP_DATA_UNORDERED) { 2229 /* queue directly into socket buffer */ 2230 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2231 control, mid); 2232 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2233 sctp_add_to_readq(stcb->sctp_ep, stcb, 2234 control, 2235 &stcb->sctp_socket->so_rcv, 1, 2236 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2237 2238 } else { 2239 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2240 mid); 2241 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2242 if (*abort_flag) { 2243 if (last_chunk) { 2244 *m = NULL; 2245 } 2246 return (0); 2247 } 2248 } 2249 goto finish_express_del; 2250 } 2251 /* If we reach here its a reassembly */ 2252 need_reasm_check = 1; 2253 SCTPDBG(SCTP_DEBUG_XXX, 2254 "Queue data to stream for reasm control: %p MID: %u\n", 2255 control, mid); 2256 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2257 if (*abort_flag) { 2258 /* 2259 * the assoc is now gone and chk was put onto the reasm 2260 * queue, which has all been freed. 2261 */ 2262 if (last_chunk) { 2263 *m = NULL; 2264 } 2265 return (0); 2266 } 2267 finish_express_del: 2268 /* Here we tidy up things */ 2269 if (tsn == (asoc->cumulative_tsn + 1)) { 2270 /* Update cum-ack */ 2271 asoc->cumulative_tsn = tsn; 2272 } 2273 if (last_chunk) { 2274 *m = NULL; 2275 } 2276 if (ordered) { 2277 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2278 } else { 2279 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2280 } 2281 SCTP_STAT_INCR(sctps_recvdata); 2282 /* Set it present please */ 2283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2284 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2285 } 2286 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2287 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2288 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2289 } 2290 if (need_reasm_check) { 2291 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2292 need_reasm_check = 0; 2293 } 2294 /* check the special flag for stream resets */ 2295 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2296 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2297 /* 2298 * we have finished working through the backlogged TSN's now 2299 * time to reset streams. 1: call reset function. 2: free 2300 * pending_reply space 3: distribute any chunks in 2301 * pending_reply_queue. 2302 */ 2303 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2304 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2305 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2306 SCTP_FREE(liste, SCTP_M_STRESET); 2307 /* sa_ignore FREED_MEMORY */ 2308 liste = TAILQ_FIRST(&asoc->resetHead); 2309 if (TAILQ_EMPTY(&asoc->resetHead)) { 2310 /* All can be removed */ 2311 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2312 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2313 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2314 if (*abort_flag) { 2315 return (0); 2316 } 2317 if (need_reasm_check) { 2318 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2319 need_reasm_check = 0; 2320 } 2321 } 2322 } else { 2323 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2324 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2325 break; 2326 } 2327 /* 2328 * if control->sinfo_tsn is <= liste->tsn we 2329 * can process it which is the NOT of 2330 * control->sinfo_tsn > liste->tsn 2331 */ 2332 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2333 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2334 if (*abort_flag) { 2335 return (0); 2336 } 2337 if (need_reasm_check) { 2338 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2339 need_reasm_check = 0; 2340 } 2341 } 2342 } 2343 } 2344 return (1); 2345 } 2346 2347 static const int8_t sctp_map_lookup_tab[256] = { 2348 0, 1, 0, 2, 0, 1, 0, 3, 2349 0, 1, 0, 2, 0, 1, 0, 4, 2350 0, 1, 0, 2, 0, 1, 0, 3, 2351 0, 1, 0, 2, 0, 1, 0, 5, 2352 0, 1, 0, 2, 0, 1, 0, 3, 2353 0, 1, 0, 2, 0, 1, 0, 4, 2354 0, 1, 0, 2, 0, 1, 0, 3, 2355 0, 1, 0, 2, 0, 1, 0, 6, 2356 0, 1, 0, 2, 0, 1, 0, 3, 2357 0, 1, 0, 2, 0, 1, 0, 4, 2358 0, 1, 0, 2, 0, 1, 0, 3, 2359 0, 1, 0, 2, 0, 1, 0, 5, 2360 0, 1, 0, 2, 0, 1, 0, 3, 2361 0, 1, 0, 2, 0, 1, 0, 4, 2362 0, 1, 0, 2, 0, 1, 0, 3, 2363 0, 1, 0, 2, 0, 1, 0, 7, 2364 0, 1, 0, 2, 0, 1, 0, 3, 2365 0, 1, 0, 2, 0, 1, 0, 4, 2366 0, 1, 0, 2, 0, 1, 0, 3, 2367 0, 1, 0, 2, 0, 1, 0, 5, 2368 0, 1, 0, 2, 0, 1, 0, 3, 2369 0, 1, 0, 2, 0, 1, 0, 4, 2370 0, 1, 0, 2, 0, 1, 0, 3, 2371 0, 1, 0, 2, 0, 1, 0, 6, 2372 0, 1, 0, 2, 0, 1, 0, 3, 2373 0, 1, 0, 2, 0, 1, 0, 4, 2374 0, 1, 0, 2, 0, 1, 0, 3, 2375 0, 1, 0, 2, 0, 1, 0, 5, 2376 0, 1, 0, 2, 0, 1, 0, 3, 2377 0, 1, 0, 2, 0, 1, 0, 4, 2378 0, 1, 0, 2, 0, 1, 0, 3, 2379 0, 1, 0, 2, 0, 1, 0, 8 2380 }; 2381 2382 2383 void 2384 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2385 { 2386 /* 2387 * Now we also need to check the mapping array in a couple of ways. 2388 * 1) Did we move the cum-ack point? 2389 * 2390 * When you first glance at this you might think that all entries 2391 * that make up the position of the cum-ack would be in the 2392 * nr-mapping array only.. i.e. things up to the cum-ack are always 2393 * deliverable. Thats true with one exception, when its a fragmented 2394 * message we may not deliver the data until some threshold (or all 2395 * of it) is in place. So we must OR the nr_mapping_array and 2396 * mapping_array to get a true picture of the cum-ack. 2397 */ 2398 struct sctp_association *asoc; 2399 int at; 2400 uint8_t val; 2401 int slide_from, slide_end, lgap, distance; 2402 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2403 2404 asoc = &stcb->asoc; 2405 2406 old_cumack = asoc->cumulative_tsn; 2407 old_base = asoc->mapping_array_base_tsn; 2408 old_highest = asoc->highest_tsn_inside_map; 2409 /* 2410 * We could probably improve this a small bit by calculating the 2411 * offset of the current cum-ack as the starting point. 2412 */ 2413 at = 0; 2414 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2415 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2416 if (val == 0xff) { 2417 at += 8; 2418 } else { 2419 /* there is a 0 bit */ 2420 at += sctp_map_lookup_tab[val]; 2421 break; 2422 } 2423 } 2424 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2425 2426 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2427 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2428 #ifdef INVARIANTS 2429 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2430 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2431 #else 2432 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2433 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2434 sctp_print_mapping_array(asoc); 2435 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2436 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2437 } 2438 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2439 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2440 #endif 2441 } 2442 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2443 highest_tsn = asoc->highest_tsn_inside_nr_map; 2444 } else { 2445 highest_tsn = asoc->highest_tsn_inside_map; 2446 } 2447 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2448 /* The complete array was completed by a single FR */ 2449 /* highest becomes the cum-ack */ 2450 int clr; 2451 #ifdef INVARIANTS 2452 unsigned int i; 2453 #endif 2454 2455 /* clear the array */ 2456 clr = ((at + 7) >> 3); 2457 if (clr > asoc->mapping_array_size) { 2458 clr = asoc->mapping_array_size; 2459 } 2460 memset(asoc->mapping_array, 0, clr); 2461 memset(asoc->nr_mapping_array, 0, clr); 2462 #ifdef INVARIANTS 2463 for (i = 0; i < asoc->mapping_array_size; i++) { 2464 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2465 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2466 sctp_print_mapping_array(asoc); 2467 } 2468 } 2469 #endif 2470 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2471 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2472 } else if (at >= 8) { 2473 /* we can slide the mapping array down */ 2474 /* slide_from holds where we hit the first NON 0xff byte */ 2475 2476 /* 2477 * now calculate the ceiling of the move using our highest 2478 * TSN value 2479 */ 2480 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2481 slide_end = (lgap >> 3); 2482 if (slide_end < slide_from) { 2483 sctp_print_mapping_array(asoc); 2484 #ifdef INVARIANTS 2485 panic("impossible slide"); 2486 #else 2487 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2488 lgap, slide_end, slide_from, at); 2489 return; 2490 #endif 2491 } 2492 if (slide_end > asoc->mapping_array_size) { 2493 #ifdef INVARIANTS 2494 panic("would overrun buffer"); 2495 #else 2496 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2497 asoc->mapping_array_size, slide_end); 2498 slide_end = asoc->mapping_array_size; 2499 #endif 2500 } 2501 distance = (slide_end - slide_from) + 1; 2502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2503 sctp_log_map(old_base, old_cumack, old_highest, 2504 SCTP_MAP_PREPARE_SLIDE); 2505 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2506 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2507 } 2508 if (distance + slide_from > asoc->mapping_array_size || 2509 distance < 0) { 2510 /* 2511 * Here we do NOT slide forward the array so that 2512 * hopefully when more data comes in to fill it up 2513 * we will be able to slide it forward. Really I 2514 * don't think this should happen :-0 2515 */ 2516 2517 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2518 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2519 (uint32_t)asoc->mapping_array_size, 2520 SCTP_MAP_SLIDE_NONE); 2521 } 2522 } else { 2523 int ii; 2524 2525 for (ii = 0; ii < distance; ii++) { 2526 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2527 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2528 2529 } 2530 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2531 asoc->mapping_array[ii] = 0; 2532 asoc->nr_mapping_array[ii] = 0; 2533 } 2534 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2535 asoc->highest_tsn_inside_map += (slide_from << 3); 2536 } 2537 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2538 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2539 } 2540 asoc->mapping_array_base_tsn += (slide_from << 3); 2541 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2542 sctp_log_map(asoc->mapping_array_base_tsn, 2543 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2544 SCTP_MAP_SLIDE_RESULT); 2545 } 2546 } 2547 } 2548 } 2549 2550 void 2551 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2552 { 2553 struct sctp_association *asoc; 2554 uint32_t highest_tsn; 2555 int is_a_gap; 2556 2557 sctp_slide_mapping_arrays(stcb); 2558 asoc = &stcb->asoc; 2559 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2560 highest_tsn = asoc->highest_tsn_inside_nr_map; 2561 } else { 2562 highest_tsn = asoc->highest_tsn_inside_map; 2563 } 2564 /* Is there a gap now? */ 2565 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2566 2567 /* 2568 * Now we need to see if we need to queue a sack or just start the 2569 * timer (if allowed). 2570 */ 2571 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2572 /* 2573 * Ok special case, in SHUTDOWN-SENT case. here we maker 2574 * sure SACK timer is off and instead send a SHUTDOWN and a 2575 * SACK 2576 */ 2577 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2578 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2579 stcb->sctp_ep, stcb, NULL, 2580 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2581 } 2582 sctp_send_shutdown(stcb, 2583 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2584 if (is_a_gap) { 2585 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2586 } 2587 } else { 2588 /* 2589 * CMT DAC algorithm: increase number of packets received 2590 * since last ack 2591 */ 2592 stcb->asoc.cmt_dac_pkts_rcvd++; 2593 2594 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2595 * SACK */ 2596 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2597 * longer is one */ 2598 (stcb->asoc.numduptsns) || /* we have dup's */ 2599 (is_a_gap) || /* is still a gap */ 2600 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2601 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2602 ) { 2603 2604 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2605 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2606 (stcb->asoc.send_sack == 0) && 2607 (stcb->asoc.numduptsns == 0) && 2608 (stcb->asoc.delayed_ack) && 2609 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2610 2611 /* 2612 * CMT DAC algorithm: With CMT, delay acks 2613 * even in the face of 2614 * 2615 * reordering. Therefore, if acks that do 2616 * not have to be sent because of the above 2617 * reasons, will be delayed. That is, acks 2618 * that would have been sent due to gap 2619 * reports will be delayed with DAC. Start 2620 * the delayed ack timer. 2621 */ 2622 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2623 stcb->sctp_ep, stcb, NULL); 2624 } else { 2625 /* 2626 * Ok we must build a SACK since the timer 2627 * is pending, we got our first packet OR 2628 * there are gaps or duplicates. 2629 */ 2630 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2631 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2632 } 2633 } else { 2634 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2635 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2636 stcb->sctp_ep, stcb, NULL); 2637 } 2638 } 2639 } 2640 } 2641 2642 int 2643 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2644 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2645 struct sctp_nets *net, uint32_t *high_tsn) 2646 { 2647 struct sctp_chunkhdr *ch, chunk_buf; 2648 struct sctp_association *asoc; 2649 int num_chunks = 0; /* number of control chunks processed */ 2650 int stop_proc = 0; 2651 int break_flag, last_chunk; 2652 int abort_flag = 0, was_a_gap; 2653 struct mbuf *m; 2654 uint32_t highest_tsn; 2655 uint16_t chk_length; 2656 2657 /* set the rwnd */ 2658 sctp_set_rwnd(stcb, &stcb->asoc); 2659 2660 m = *mm; 2661 SCTP_TCB_LOCK_ASSERT(stcb); 2662 asoc = &stcb->asoc; 2663 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2664 highest_tsn = asoc->highest_tsn_inside_nr_map; 2665 } else { 2666 highest_tsn = asoc->highest_tsn_inside_map; 2667 } 2668 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2669 /* 2670 * setup where we got the last DATA packet from for any SACK that 2671 * may need to go out. Don't bump the net. This is done ONLY when a 2672 * chunk is assigned. 2673 */ 2674 asoc->last_data_chunk_from = net; 2675 2676 /*- 2677 * Now before we proceed we must figure out if this is a wasted 2678 * cluster... i.e. it is a small packet sent in and yet the driver 2679 * underneath allocated a full cluster for it. If so we must copy it 2680 * to a smaller mbuf and free up the cluster mbuf. This will help 2681 * with cluster starvation. Note for __Panda__ we don't do this 2682 * since it has clusters all the way down to 64 bytes. 2683 */ 2684 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2685 /* we only handle mbufs that are singletons.. not chains */ 2686 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2687 if (m) { 2688 /* ok lets see if we can copy the data up */ 2689 caddr_t *from, *to; 2690 2691 /* get the pointers and copy */ 2692 to = mtod(m, caddr_t *); 2693 from = mtod((*mm), caddr_t *); 2694 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2695 /* copy the length and free up the old */ 2696 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2697 sctp_m_freem(*mm); 2698 /* success, back copy */ 2699 *mm = m; 2700 } else { 2701 /* We are in trouble in the mbuf world .. yikes */ 2702 m = *mm; 2703 } 2704 } 2705 /* get pointer to the first chunk header */ 2706 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2707 sizeof(struct sctp_chunkhdr), 2708 (uint8_t *)&chunk_buf); 2709 if (ch == NULL) { 2710 return (1); 2711 } 2712 /* 2713 * process all DATA chunks... 2714 */ 2715 *high_tsn = asoc->cumulative_tsn; 2716 break_flag = 0; 2717 asoc->data_pkts_seen++; 2718 while (stop_proc == 0) { 2719 /* validate chunk length */ 2720 chk_length = ntohs(ch->chunk_length); 2721 if (length - *offset < chk_length) { 2722 /* all done, mutulated chunk */ 2723 stop_proc = 1; 2724 continue; 2725 } 2726 if ((asoc->idata_supported == 1) && 2727 (ch->chunk_type == SCTP_DATA)) { 2728 struct mbuf *op_err; 2729 char msg[SCTP_DIAG_INFO_LEN]; 2730 2731 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2732 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2733 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2734 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2735 return (2); 2736 } 2737 if ((asoc->idata_supported == 0) && 2738 (ch->chunk_type == SCTP_IDATA)) { 2739 struct mbuf *op_err; 2740 char msg[SCTP_DIAG_INFO_LEN]; 2741 2742 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2743 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2744 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2745 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2746 return (2); 2747 } 2748 if ((ch->chunk_type == SCTP_DATA) || 2749 (ch->chunk_type == SCTP_IDATA)) { 2750 uint16_t clen; 2751 2752 if (ch->chunk_type == SCTP_DATA) { 2753 clen = sizeof(struct sctp_data_chunk); 2754 } else { 2755 clen = sizeof(struct sctp_idata_chunk); 2756 } 2757 if (chk_length < clen) { 2758 /* 2759 * Need to send an abort since we had a 2760 * invalid data chunk. 2761 */ 2762 struct mbuf *op_err; 2763 char msg[SCTP_DIAG_INFO_LEN]; 2764 2765 snprintf(msg, sizeof(msg), "%s chunk of length %u", 2766 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2767 chk_length); 2768 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2769 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2770 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2771 return (2); 2772 } 2773 #ifdef SCTP_AUDITING_ENABLED 2774 sctp_audit_log(0xB1, 0); 2775 #endif 2776 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2777 last_chunk = 1; 2778 } else { 2779 last_chunk = 0; 2780 } 2781 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2782 chk_length, net, high_tsn, &abort_flag, &break_flag, 2783 last_chunk, ch->chunk_type)) { 2784 num_chunks++; 2785 } 2786 if (abort_flag) 2787 return (2); 2788 2789 if (break_flag) { 2790 /* 2791 * Set because of out of rwnd space and no 2792 * drop rep space left. 2793 */ 2794 stop_proc = 1; 2795 continue; 2796 } 2797 } else { 2798 /* not a data chunk in the data region */ 2799 switch (ch->chunk_type) { 2800 case SCTP_INITIATION: 2801 case SCTP_INITIATION_ACK: 2802 case SCTP_SELECTIVE_ACK: 2803 case SCTP_NR_SELECTIVE_ACK: 2804 case SCTP_HEARTBEAT_REQUEST: 2805 case SCTP_HEARTBEAT_ACK: 2806 case SCTP_ABORT_ASSOCIATION: 2807 case SCTP_SHUTDOWN: 2808 case SCTP_SHUTDOWN_ACK: 2809 case SCTP_OPERATION_ERROR: 2810 case SCTP_COOKIE_ECHO: 2811 case SCTP_COOKIE_ACK: 2812 case SCTP_ECN_ECHO: 2813 case SCTP_ECN_CWR: 2814 case SCTP_SHUTDOWN_COMPLETE: 2815 case SCTP_AUTHENTICATION: 2816 case SCTP_ASCONF_ACK: 2817 case SCTP_PACKET_DROPPED: 2818 case SCTP_STREAM_RESET: 2819 case SCTP_FORWARD_CUM_TSN: 2820 case SCTP_ASCONF: 2821 { 2822 /* 2823 * Now, what do we do with KNOWN 2824 * chunks that are NOT in the right 2825 * place? 2826 * 2827 * For now, I do nothing but ignore 2828 * them. We may later want to add 2829 * sysctl stuff to switch out and do 2830 * either an ABORT() or possibly 2831 * process them. 2832 */ 2833 struct mbuf *op_err; 2834 char msg[SCTP_DIAG_INFO_LEN]; 2835 2836 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2837 ch->chunk_type); 2838 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2839 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2840 return (2); 2841 } 2842 default: 2843 /* 2844 * Unknown chunk type: use bit rules after 2845 * checking length 2846 */ 2847 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2848 /* 2849 * Need to send an abort since we 2850 * had a invalid chunk. 2851 */ 2852 struct mbuf *op_err; 2853 char msg[SCTP_DIAG_INFO_LEN]; 2854 2855 snprintf(msg, sizeof(msg), "Chunk of length %u", 2856 chk_length); 2857 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2858 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2859 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2860 return (2); 2861 } 2862 if (ch->chunk_type & 0x40) { 2863 /* Add a error report to the queue */ 2864 struct mbuf *op_err; 2865 struct sctp_gen_error_cause *cause; 2866 2867 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2868 0, M_NOWAIT, 1, MT_DATA); 2869 if (op_err != NULL) { 2870 cause = mtod(op_err, struct sctp_gen_error_cause *); 2871 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2872 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2873 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2874 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2875 if (SCTP_BUF_NEXT(op_err) != NULL) { 2876 sctp_queue_op_err(stcb, op_err); 2877 } else { 2878 sctp_m_freem(op_err); 2879 } 2880 } 2881 } 2882 if ((ch->chunk_type & 0x80) == 0) { 2883 /* discard the rest of this packet */ 2884 stop_proc = 1; 2885 } /* else skip this bad chunk and 2886 * continue... */ 2887 break; 2888 } /* switch of chunk type */ 2889 } 2890 *offset += SCTP_SIZE32(chk_length); 2891 if ((*offset >= length) || stop_proc) { 2892 /* no more data left in the mbuf chain */ 2893 stop_proc = 1; 2894 continue; 2895 } 2896 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2897 sizeof(struct sctp_chunkhdr), 2898 (uint8_t *)&chunk_buf); 2899 if (ch == NULL) { 2900 *offset = length; 2901 stop_proc = 1; 2902 continue; 2903 } 2904 } 2905 if (break_flag) { 2906 /* 2907 * we need to report rwnd overrun drops. 2908 */ 2909 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2910 } 2911 if (num_chunks) { 2912 /* 2913 * Did we get data, if so update the time for auto-close and 2914 * give peer credit for being alive. 2915 */ 2916 SCTP_STAT_INCR(sctps_recvpktwithdata); 2917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2918 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2919 stcb->asoc.overall_error_count, 2920 0, 2921 SCTP_FROM_SCTP_INDATA, 2922 __LINE__); 2923 } 2924 stcb->asoc.overall_error_count = 0; 2925 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2926 } 2927 /* now service all of the reassm queue if needed */ 2928 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2929 /* Assure that we ack right away */ 2930 stcb->asoc.send_sack = 1; 2931 } 2932 /* Start a sack timer or QUEUE a SACK for sending */ 2933 sctp_sack_check(stcb, was_a_gap); 2934 return (0); 2935 } 2936 2937 static int 2938 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2939 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2940 int *num_frs, 2941 uint32_t *biggest_newly_acked_tsn, 2942 uint32_t *this_sack_lowest_newack, 2943 int *rto_ok) 2944 { 2945 struct sctp_tmit_chunk *tp1; 2946 unsigned int theTSN; 2947 int j, wake_him = 0, circled = 0; 2948 2949 /* Recover the tp1 we last saw */ 2950 tp1 = *p_tp1; 2951 if (tp1 == NULL) { 2952 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2953 } 2954 for (j = frag_strt; j <= frag_end; j++) { 2955 theTSN = j + last_tsn; 2956 while (tp1) { 2957 if (tp1->rec.data.doing_fast_retransmit) 2958 (*num_frs) += 1; 2959 2960 /*- 2961 * CMT: CUCv2 algorithm. For each TSN being 2962 * processed from the sent queue, track the 2963 * next expected pseudo-cumack, or 2964 * rtx_pseudo_cumack, if required. Separate 2965 * cumack trackers for first transmissions, 2966 * and retransmissions. 2967 */ 2968 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2969 (tp1->whoTo->find_pseudo_cumack == 1) && 2970 (tp1->snd_count == 1)) { 2971 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 2972 tp1->whoTo->find_pseudo_cumack = 0; 2973 } 2974 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2975 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2976 (tp1->snd_count > 1)) { 2977 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 2978 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2979 } 2980 if (tp1->rec.data.tsn == theTSN) { 2981 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2982 /*- 2983 * must be held until 2984 * cum-ack passes 2985 */ 2986 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2987 /*- 2988 * If it is less than RESEND, it is 2989 * now no-longer in flight. 2990 * Higher values may already be set 2991 * via previous Gap Ack Blocks... 2992 * i.e. ACKED or RESEND. 2993 */ 2994 if (SCTP_TSN_GT(tp1->rec.data.tsn, 2995 *biggest_newly_acked_tsn)) { 2996 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 2997 } 2998 /*- 2999 * CMT: SFR algo (and HTNA) - set 3000 * saw_newack to 1 for dest being 3001 * newly acked. update 3002 * this_sack_highest_newack if 3003 * appropriate. 3004 */ 3005 if (tp1->rec.data.chunk_was_revoked == 0) 3006 tp1->whoTo->saw_newack = 1; 3007 3008 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3009 tp1->whoTo->this_sack_highest_newack)) { 3010 tp1->whoTo->this_sack_highest_newack = 3011 tp1->rec.data.tsn; 3012 } 3013 /*- 3014 * CMT DAC algo: also update 3015 * this_sack_lowest_newack 3016 */ 3017 if (*this_sack_lowest_newack == 0) { 3018 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3019 sctp_log_sack(*this_sack_lowest_newack, 3020 last_tsn, 3021 tp1->rec.data.tsn, 3022 0, 3023 0, 3024 SCTP_LOG_TSN_ACKED); 3025 } 3026 *this_sack_lowest_newack = tp1->rec.data.tsn; 3027 } 3028 /*- 3029 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3030 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3031 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3032 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3033 * Separate pseudo_cumack trackers for first transmissions and 3034 * retransmissions. 3035 */ 3036 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3037 if (tp1->rec.data.chunk_was_revoked == 0) { 3038 tp1->whoTo->new_pseudo_cumack = 1; 3039 } 3040 tp1->whoTo->find_pseudo_cumack = 1; 3041 } 3042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3043 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3044 } 3045 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3046 if (tp1->rec.data.chunk_was_revoked == 0) { 3047 tp1->whoTo->new_pseudo_cumack = 1; 3048 } 3049 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3050 } 3051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3052 sctp_log_sack(*biggest_newly_acked_tsn, 3053 last_tsn, 3054 tp1->rec.data.tsn, 3055 frag_strt, 3056 frag_end, 3057 SCTP_LOG_TSN_ACKED); 3058 } 3059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3060 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3061 tp1->whoTo->flight_size, 3062 tp1->book_size, 3063 (uint32_t)(uintptr_t)tp1->whoTo, 3064 tp1->rec.data.tsn); 3065 } 3066 sctp_flight_size_decrease(tp1); 3067 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3068 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3069 tp1); 3070 } 3071 sctp_total_flight_decrease(stcb, tp1); 3072 3073 tp1->whoTo->net_ack += tp1->send_size; 3074 if (tp1->snd_count < 2) { 3075 /*- 3076 * True non-retransmited chunk 3077 */ 3078 tp1->whoTo->net_ack2 += tp1->send_size; 3079 3080 /*- 3081 * update RTO too ? 3082 */ 3083 if (tp1->do_rtt) { 3084 if (*rto_ok) { 3085 tp1->whoTo->RTO = 3086 sctp_calculate_rto(stcb, 3087 &stcb->asoc, 3088 tp1->whoTo, 3089 &tp1->sent_rcv_time, 3090 SCTP_RTT_FROM_DATA); 3091 *rto_ok = 0; 3092 } 3093 if (tp1->whoTo->rto_needed == 0) { 3094 tp1->whoTo->rto_needed = 1; 3095 } 3096 tp1->do_rtt = 0; 3097 } 3098 } 3099 } 3100 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3101 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3102 stcb->asoc.this_sack_highest_gap)) { 3103 stcb->asoc.this_sack_highest_gap = 3104 tp1->rec.data.tsn; 3105 } 3106 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3107 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3108 #ifdef SCTP_AUDITING_ENABLED 3109 sctp_audit_log(0xB2, 3110 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3111 #endif 3112 } 3113 } 3114 /*- 3115 * All chunks NOT UNSENT fall through here and are marked 3116 * (leave PR-SCTP ones that are to skip alone though) 3117 */ 3118 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3119 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3120 tp1->sent = SCTP_DATAGRAM_MARKED; 3121 } 3122 if (tp1->rec.data.chunk_was_revoked) { 3123 /* deflate the cwnd */ 3124 tp1->whoTo->cwnd -= tp1->book_size; 3125 tp1->rec.data.chunk_was_revoked = 0; 3126 } 3127 /* NR Sack code here */ 3128 if (nr_sacking && 3129 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3130 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3131 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3132 #ifdef INVARIANTS 3133 } else { 3134 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3135 #endif 3136 } 3137 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3138 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3139 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3140 stcb->asoc.trigger_reset = 1; 3141 } 3142 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3143 if (tp1->data) { 3144 /* 3145 * sa_ignore 3146 * NO_NULL_CHK 3147 */ 3148 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3149 sctp_m_freem(tp1->data); 3150 tp1->data = NULL; 3151 } 3152 wake_him++; 3153 } 3154 } 3155 break; 3156 } /* if (tp1->tsn == theTSN) */ 3157 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3158 break; 3159 } 3160 tp1 = TAILQ_NEXT(tp1, sctp_next); 3161 if ((tp1 == NULL) && (circled == 0)) { 3162 circled++; 3163 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3164 } 3165 } /* end while (tp1) */ 3166 if (tp1 == NULL) { 3167 circled = 0; 3168 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3169 } 3170 /* In case the fragments were not in order we must reset */ 3171 } /* end for (j = fragStart */ 3172 *p_tp1 = tp1; 3173 return (wake_him); /* Return value only used for nr-sack */ 3174 } 3175 3176 3177 static int 3178 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3179 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3180 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3181 int num_seg, int num_nr_seg, int *rto_ok) 3182 { 3183 struct sctp_gap_ack_block *frag, block; 3184 struct sctp_tmit_chunk *tp1; 3185 int i; 3186 int num_frs = 0; 3187 int chunk_freed; 3188 int non_revocable; 3189 uint16_t frag_strt, frag_end, prev_frag_end; 3190 3191 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3192 prev_frag_end = 0; 3193 chunk_freed = 0; 3194 3195 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3196 if (i == num_seg) { 3197 prev_frag_end = 0; 3198 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3199 } 3200 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3201 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3202 *offset += sizeof(block); 3203 if (frag == NULL) { 3204 return (chunk_freed); 3205 } 3206 frag_strt = ntohs(frag->start); 3207 frag_end = ntohs(frag->end); 3208 3209 if (frag_strt > frag_end) { 3210 /* This gap report is malformed, skip it. */ 3211 continue; 3212 } 3213 if (frag_strt <= prev_frag_end) { 3214 /* This gap report is not in order, so restart. */ 3215 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3216 } 3217 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3218 *biggest_tsn_acked = last_tsn + frag_end; 3219 } 3220 if (i < num_seg) { 3221 non_revocable = 0; 3222 } else { 3223 non_revocable = 1; 3224 } 3225 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3226 non_revocable, &num_frs, biggest_newly_acked_tsn, 3227 this_sack_lowest_newack, rto_ok)) { 3228 chunk_freed = 1; 3229 } 3230 prev_frag_end = frag_end; 3231 } 3232 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3233 if (num_frs) 3234 sctp_log_fr(*biggest_tsn_acked, 3235 *biggest_newly_acked_tsn, 3236 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3237 } 3238 return (chunk_freed); 3239 } 3240 3241 static void 3242 sctp_check_for_revoked(struct sctp_tcb *stcb, 3243 struct sctp_association *asoc, uint32_t cumack, 3244 uint32_t biggest_tsn_acked) 3245 { 3246 struct sctp_tmit_chunk *tp1; 3247 3248 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3249 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3250 /* 3251 * ok this guy is either ACK or MARKED. If it is 3252 * ACKED it has been previously acked but not this 3253 * time i.e. revoked. If it is MARKED it was ACK'ed 3254 * again. 3255 */ 3256 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3257 break; 3258 } 3259 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3260 /* it has been revoked */ 3261 tp1->sent = SCTP_DATAGRAM_SENT; 3262 tp1->rec.data.chunk_was_revoked = 1; 3263 /* 3264 * We must add this stuff back in to assure 3265 * timers and such get started. 3266 */ 3267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3268 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3269 tp1->whoTo->flight_size, 3270 tp1->book_size, 3271 (uint32_t)(uintptr_t)tp1->whoTo, 3272 tp1->rec.data.tsn); 3273 } 3274 sctp_flight_size_increase(tp1); 3275 sctp_total_flight_increase(stcb, tp1); 3276 /* 3277 * We inflate the cwnd to compensate for our 3278 * artificial inflation of the flight_size. 3279 */ 3280 tp1->whoTo->cwnd += tp1->book_size; 3281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3282 sctp_log_sack(asoc->last_acked_seq, 3283 cumack, 3284 tp1->rec.data.tsn, 3285 0, 3286 0, 3287 SCTP_LOG_TSN_REVOKED); 3288 } 3289 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3290 /* it has been re-acked in this SACK */ 3291 tp1->sent = SCTP_DATAGRAM_ACKED; 3292 } 3293 } 3294 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3295 break; 3296 } 3297 } 3298 3299 3300 static void 3301 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3302 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3303 { 3304 struct sctp_tmit_chunk *tp1; 3305 int strike_flag = 0; 3306 struct timeval now; 3307 int tot_retrans = 0; 3308 uint32_t sending_seq; 3309 struct sctp_nets *net; 3310 int num_dests_sacked = 0; 3311 3312 /* 3313 * select the sending_seq, this is either the next thing ready to be 3314 * sent but not transmitted, OR, the next seq we assign. 3315 */ 3316 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3317 if (tp1 == NULL) { 3318 sending_seq = asoc->sending_seq; 3319 } else { 3320 sending_seq = tp1->rec.data.tsn; 3321 } 3322 3323 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3324 if ((asoc->sctp_cmt_on_off > 0) && 3325 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3326 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3327 if (net->saw_newack) 3328 num_dests_sacked++; 3329 } 3330 } 3331 if (stcb->asoc.prsctp_supported) { 3332 (void)SCTP_GETTIME_TIMEVAL(&now); 3333 } 3334 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3335 strike_flag = 0; 3336 if (tp1->no_fr_allowed) { 3337 /* this one had a timeout or something */ 3338 continue; 3339 } 3340 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3341 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3342 sctp_log_fr(biggest_tsn_newly_acked, 3343 tp1->rec.data.tsn, 3344 tp1->sent, 3345 SCTP_FR_LOG_CHECK_STRIKE); 3346 } 3347 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3348 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3349 /* done */ 3350 break; 3351 } 3352 if (stcb->asoc.prsctp_supported) { 3353 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3354 /* Is it expired? */ 3355 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3356 /* Yes so drop it */ 3357 if (tp1->data != NULL) { 3358 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3359 SCTP_SO_NOT_LOCKED); 3360 } 3361 continue; 3362 } 3363 } 3364 } 3365 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3366 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3367 /* we are beyond the tsn in the sack */ 3368 break; 3369 } 3370 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3371 /* either a RESEND, ACKED, or MARKED */ 3372 /* skip */ 3373 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3374 /* Continue strikin FWD-TSN chunks */ 3375 tp1->rec.data.fwd_tsn_cnt++; 3376 } 3377 continue; 3378 } 3379 /* 3380 * CMT : SFR algo (covers part of DAC and HTNA as well) 3381 */ 3382 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3383 /* 3384 * No new acks were receieved for data sent to this 3385 * dest. Therefore, according to the SFR algo for 3386 * CMT, no data sent to this dest can be marked for 3387 * FR using this SACK. 3388 */ 3389 continue; 3390 } else if (tp1->whoTo && 3391 SCTP_TSN_GT(tp1->rec.data.tsn, 3392 tp1->whoTo->this_sack_highest_newack) && 3393 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3394 /* 3395 * CMT: New acks were receieved for data sent to 3396 * this dest. But no new acks were seen for data 3397 * sent after tp1. Therefore, according to the SFR 3398 * algo for CMT, tp1 cannot be marked for FR using 3399 * this SACK. This step covers part of the DAC algo 3400 * and the HTNA algo as well. 3401 */ 3402 continue; 3403 } 3404 /* 3405 * Here we check to see if we were have already done a FR 3406 * and if so we see if the biggest TSN we saw in the sack is 3407 * smaller than the recovery point. If so we don't strike 3408 * the tsn... otherwise we CAN strike the TSN. 3409 */ 3410 /* 3411 * @@@ JRI: Check for CMT if (accum_moved && 3412 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3413 * 0)) { 3414 */ 3415 if (accum_moved && asoc->fast_retran_loss_recovery) { 3416 /* 3417 * Strike the TSN if in fast-recovery and cum-ack 3418 * moved. 3419 */ 3420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3421 sctp_log_fr(biggest_tsn_newly_acked, 3422 tp1->rec.data.tsn, 3423 tp1->sent, 3424 SCTP_FR_LOG_STRIKE_CHUNK); 3425 } 3426 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3427 tp1->sent++; 3428 } 3429 if ((asoc->sctp_cmt_on_off > 0) && 3430 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3431 /* 3432 * CMT DAC algorithm: If SACK flag is set to 3433 * 0, then lowest_newack test will not pass 3434 * because it would have been set to the 3435 * cumack earlier. If not already to be 3436 * rtx'd, If not a mixed sack and if tp1 is 3437 * not between two sacked TSNs, then mark by 3438 * one more. NOTE that we are marking by one 3439 * additional time since the SACK DAC flag 3440 * indicates that two packets have been 3441 * received after this missing TSN. 3442 */ 3443 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3444 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3445 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3446 sctp_log_fr(16 + num_dests_sacked, 3447 tp1->rec.data.tsn, 3448 tp1->sent, 3449 SCTP_FR_LOG_STRIKE_CHUNK); 3450 } 3451 tp1->sent++; 3452 } 3453 } 3454 } else if ((tp1->rec.data.doing_fast_retransmit) && 3455 (asoc->sctp_cmt_on_off == 0)) { 3456 /* 3457 * For those that have done a FR we must take 3458 * special consideration if we strike. I.e the 3459 * biggest_newly_acked must be higher than the 3460 * sending_seq at the time we did the FR. 3461 */ 3462 if ( 3463 #ifdef SCTP_FR_TO_ALTERNATE 3464 /* 3465 * If FR's go to new networks, then we must only do 3466 * this for singly homed asoc's. However if the FR's 3467 * go to the same network (Armando's work) then its 3468 * ok to FR multiple times. 3469 */ 3470 (asoc->numnets < 2) 3471 #else 3472 (1) 3473 #endif 3474 ) { 3475 3476 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3477 tp1->rec.data.fast_retran_tsn)) { 3478 /* 3479 * Strike the TSN, since this ack is 3480 * beyond where things were when we 3481 * did a FR. 3482 */ 3483 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3484 sctp_log_fr(biggest_tsn_newly_acked, 3485 tp1->rec.data.tsn, 3486 tp1->sent, 3487 SCTP_FR_LOG_STRIKE_CHUNK); 3488 } 3489 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3490 tp1->sent++; 3491 } 3492 strike_flag = 1; 3493 if ((asoc->sctp_cmt_on_off > 0) && 3494 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3495 /* 3496 * CMT DAC algorithm: If 3497 * SACK flag is set to 0, 3498 * then lowest_newack test 3499 * will not pass because it 3500 * would have been set to 3501 * the cumack earlier. If 3502 * not already to be rtx'd, 3503 * If not a mixed sack and 3504 * if tp1 is not between two 3505 * sacked TSNs, then mark by 3506 * one more. NOTE that we 3507 * are marking by one 3508 * additional time since the 3509 * SACK DAC flag indicates 3510 * that two packets have 3511 * been received after this 3512 * missing TSN. 3513 */ 3514 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3515 (num_dests_sacked == 1) && 3516 SCTP_TSN_GT(this_sack_lowest_newack, 3517 tp1->rec.data.tsn)) { 3518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3519 sctp_log_fr(32 + num_dests_sacked, 3520 tp1->rec.data.tsn, 3521 tp1->sent, 3522 SCTP_FR_LOG_STRIKE_CHUNK); 3523 } 3524 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3525 tp1->sent++; 3526 } 3527 } 3528 } 3529 } 3530 } 3531 /* 3532 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3533 * algo covers HTNA. 3534 */ 3535 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3536 biggest_tsn_newly_acked)) { 3537 /* 3538 * We don't strike these: This is the HTNA 3539 * algorithm i.e. we don't strike If our TSN is 3540 * larger than the Highest TSN Newly Acked. 3541 */ 3542 ; 3543 } else { 3544 /* Strike the TSN */ 3545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3546 sctp_log_fr(biggest_tsn_newly_acked, 3547 tp1->rec.data.tsn, 3548 tp1->sent, 3549 SCTP_FR_LOG_STRIKE_CHUNK); 3550 } 3551 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3552 tp1->sent++; 3553 } 3554 if ((asoc->sctp_cmt_on_off > 0) && 3555 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3556 /* 3557 * CMT DAC algorithm: If SACK flag is set to 3558 * 0, then lowest_newack test will not pass 3559 * because it would have been set to the 3560 * cumack earlier. If not already to be 3561 * rtx'd, If not a mixed sack and if tp1 is 3562 * not between two sacked TSNs, then mark by 3563 * one more. NOTE that we are marking by one 3564 * additional time since the SACK DAC flag 3565 * indicates that two packets have been 3566 * received after this missing TSN. 3567 */ 3568 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3569 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3570 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3571 sctp_log_fr(48 + num_dests_sacked, 3572 tp1->rec.data.tsn, 3573 tp1->sent, 3574 SCTP_FR_LOG_STRIKE_CHUNK); 3575 } 3576 tp1->sent++; 3577 } 3578 } 3579 } 3580 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3581 struct sctp_nets *alt; 3582 3583 /* fix counts and things */ 3584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3585 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3586 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3587 tp1->book_size, 3588 (uint32_t)(uintptr_t)tp1->whoTo, 3589 tp1->rec.data.tsn); 3590 } 3591 if (tp1->whoTo) { 3592 tp1->whoTo->net_ack++; 3593 sctp_flight_size_decrease(tp1); 3594 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3595 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3596 tp1); 3597 } 3598 } 3599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3600 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3601 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3602 } 3603 /* add back to the rwnd */ 3604 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3605 3606 /* remove from the total flight */ 3607 sctp_total_flight_decrease(stcb, tp1); 3608 3609 if ((stcb->asoc.prsctp_supported) && 3610 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3611 /* 3612 * Has it been retransmitted tv_sec times? - 3613 * we store the retran count there. 3614 */ 3615 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3616 /* Yes, so drop it */ 3617 if (tp1->data != NULL) { 3618 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3619 SCTP_SO_NOT_LOCKED); 3620 } 3621 /* Make sure to flag we had a FR */ 3622 if (tp1->whoTo != NULL) { 3623 tp1->whoTo->net_ack++; 3624 } 3625 continue; 3626 } 3627 } 3628 /* 3629 * SCTP_PRINTF("OK, we are now ready to FR this 3630 * guy\n"); 3631 */ 3632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3633 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3634 0, SCTP_FR_MARKED); 3635 } 3636 if (strike_flag) { 3637 /* This is a subsequent FR */ 3638 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3639 } 3640 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3641 if (asoc->sctp_cmt_on_off > 0) { 3642 /* 3643 * CMT: Using RTX_SSTHRESH policy for CMT. 3644 * If CMT is being used, then pick dest with 3645 * largest ssthresh for any retransmission. 3646 */ 3647 tp1->no_fr_allowed = 1; 3648 alt = tp1->whoTo; 3649 /* sa_ignore NO_NULL_CHK */ 3650 if (asoc->sctp_cmt_pf > 0) { 3651 /* 3652 * JRS 5/18/07 - If CMT PF is on, 3653 * use the PF version of 3654 * find_alt_net() 3655 */ 3656 alt = sctp_find_alternate_net(stcb, alt, 2); 3657 } else { 3658 /* 3659 * JRS 5/18/07 - If only CMT is on, 3660 * use the CMT version of 3661 * find_alt_net() 3662 */ 3663 /* sa_ignore NO_NULL_CHK */ 3664 alt = sctp_find_alternate_net(stcb, alt, 1); 3665 } 3666 if (alt == NULL) { 3667 alt = tp1->whoTo; 3668 } 3669 /* 3670 * CUCv2: If a different dest is picked for 3671 * the retransmission, then new 3672 * (rtx-)pseudo_cumack needs to be tracked 3673 * for orig dest. Let CUCv2 track new (rtx-) 3674 * pseudo-cumack always. 3675 */ 3676 if (tp1->whoTo) { 3677 tp1->whoTo->find_pseudo_cumack = 1; 3678 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3679 } 3680 } else { /* CMT is OFF */ 3681 3682 #ifdef SCTP_FR_TO_ALTERNATE 3683 /* Can we find an alternate? */ 3684 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3685 #else 3686 /* 3687 * default behavior is to NOT retransmit 3688 * FR's to an alternate. Armando Caro's 3689 * paper details why. 3690 */ 3691 alt = tp1->whoTo; 3692 #endif 3693 } 3694 3695 tp1->rec.data.doing_fast_retransmit = 1; 3696 tot_retrans++; 3697 /* mark the sending seq for possible subsequent FR's */ 3698 /* 3699 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3700 * (uint32_t)tpi->rec.data.tsn); 3701 */ 3702 if (TAILQ_EMPTY(&asoc->send_queue)) { 3703 /* 3704 * If the queue of send is empty then its 3705 * the next sequence number that will be 3706 * assigned so we subtract one from this to 3707 * get the one we last sent. 3708 */ 3709 tp1->rec.data.fast_retran_tsn = sending_seq; 3710 } else { 3711 /* 3712 * If there are chunks on the send queue 3713 * (unsent data that has made it from the 3714 * stream queues but not out the door, we 3715 * take the first one (which will have the 3716 * lowest TSN) and subtract one to get the 3717 * one we last sent. 3718 */ 3719 struct sctp_tmit_chunk *ttt; 3720 3721 ttt = TAILQ_FIRST(&asoc->send_queue); 3722 tp1->rec.data.fast_retran_tsn = 3723 ttt->rec.data.tsn; 3724 } 3725 3726 if (tp1->do_rtt) { 3727 /* 3728 * this guy had a RTO calculation pending on 3729 * it, cancel it 3730 */ 3731 if ((tp1->whoTo != NULL) && 3732 (tp1->whoTo->rto_needed == 0)) { 3733 tp1->whoTo->rto_needed = 1; 3734 } 3735 tp1->do_rtt = 0; 3736 } 3737 if (alt != tp1->whoTo) { 3738 /* yes, there is an alternate. */ 3739 sctp_free_remote_addr(tp1->whoTo); 3740 /* sa_ignore FREED_MEMORY */ 3741 tp1->whoTo = alt; 3742 atomic_add_int(&alt->ref_count, 1); 3743 } 3744 } 3745 } 3746 } 3747 3748 struct sctp_tmit_chunk * 3749 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3750 struct sctp_association *asoc) 3751 { 3752 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3753 struct timeval now; 3754 int now_filled = 0; 3755 3756 if (asoc->prsctp_supported == 0) { 3757 return (NULL); 3758 } 3759 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3760 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3761 tp1->sent != SCTP_DATAGRAM_RESEND && 3762 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3763 /* no chance to advance, out of here */ 3764 break; 3765 } 3766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3767 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3768 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3769 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3770 asoc->advanced_peer_ack_point, 3771 tp1->rec.data.tsn, 0, 0); 3772 } 3773 } 3774 if (!PR_SCTP_ENABLED(tp1->flags)) { 3775 /* 3776 * We can't fwd-tsn past any that are reliable aka 3777 * retransmitted until the asoc fails. 3778 */ 3779 break; 3780 } 3781 if (!now_filled) { 3782 (void)SCTP_GETTIME_TIMEVAL(&now); 3783 now_filled = 1; 3784 } 3785 /* 3786 * now we got a chunk which is marked for another 3787 * retransmission to a PR-stream but has run out its chances 3788 * already maybe OR has been marked to skip now. Can we skip 3789 * it if its a resend? 3790 */ 3791 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3792 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3793 /* 3794 * Now is this one marked for resend and its time is 3795 * now up? 3796 */ 3797 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3798 /* Yes so drop it */ 3799 if (tp1->data) { 3800 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3801 1, SCTP_SO_NOT_LOCKED); 3802 } 3803 } else { 3804 /* 3805 * No, we are done when hit one for resend 3806 * whos time as not expired. 3807 */ 3808 break; 3809 } 3810 } 3811 /* 3812 * Ok now if this chunk is marked to drop it we can clean up 3813 * the chunk, advance our peer ack point and we can check 3814 * the next chunk. 3815 */ 3816 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3817 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3818 /* advance PeerAckPoint goes forward */ 3819 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3820 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3821 a_adv = tp1; 3822 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3823 /* No update but we do save the chk */ 3824 a_adv = tp1; 3825 } 3826 } else { 3827 /* 3828 * If it is still in RESEND we can advance no 3829 * further 3830 */ 3831 break; 3832 } 3833 } 3834 return (a_adv); 3835 } 3836 3837 static int 3838 sctp_fs_audit(struct sctp_association *asoc) 3839 { 3840 struct sctp_tmit_chunk *chk; 3841 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3842 int ret; 3843 #ifndef INVARIANTS 3844 int entry_flight, entry_cnt; 3845 #endif 3846 3847 ret = 0; 3848 #ifndef INVARIANTS 3849 entry_flight = asoc->total_flight; 3850 entry_cnt = asoc->total_flight_count; 3851 #endif 3852 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3853 return (0); 3854 3855 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3856 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3857 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3858 chk->rec.data.tsn, 3859 chk->send_size, 3860 chk->snd_count); 3861 inflight++; 3862 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3863 resend++; 3864 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3865 inbetween++; 3866 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3867 above++; 3868 } else { 3869 acked++; 3870 } 3871 } 3872 3873 if ((inflight > 0) || (inbetween > 0)) { 3874 #ifdef INVARIANTS 3875 panic("Flight size-express incorrect? \n"); 3876 #else 3877 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3878 entry_flight, entry_cnt); 3879 3880 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3881 inflight, inbetween, resend, above, acked); 3882 ret = 1; 3883 #endif 3884 } 3885 return (ret); 3886 } 3887 3888 3889 static void 3890 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3891 struct sctp_association *asoc, 3892 struct sctp_tmit_chunk *tp1) 3893 { 3894 tp1->window_probe = 0; 3895 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3896 /* TSN's skipped we do NOT move back. */ 3897 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3898 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3899 tp1->book_size, 3900 (uint32_t)(uintptr_t)tp1->whoTo, 3901 tp1->rec.data.tsn); 3902 return; 3903 } 3904 /* First setup this by shrinking flight */ 3905 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3906 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3907 tp1); 3908 } 3909 sctp_flight_size_decrease(tp1); 3910 sctp_total_flight_decrease(stcb, tp1); 3911 /* Now mark for resend */ 3912 tp1->sent = SCTP_DATAGRAM_RESEND; 3913 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3914 3915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3916 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3917 tp1->whoTo->flight_size, 3918 tp1->book_size, 3919 (uint32_t)(uintptr_t)tp1->whoTo, 3920 tp1->rec.data.tsn); 3921 } 3922 } 3923 3924 void 3925 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3926 uint32_t rwnd, int *abort_now, int ecne_seen) 3927 { 3928 struct sctp_nets *net; 3929 struct sctp_association *asoc; 3930 struct sctp_tmit_chunk *tp1, *tp2; 3931 uint32_t old_rwnd; 3932 int win_probe_recovery = 0; 3933 int win_probe_recovered = 0; 3934 int j, done_once = 0; 3935 int rto_ok = 1; 3936 uint32_t send_s; 3937 3938 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3939 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3940 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3941 } 3942 SCTP_TCB_LOCK_ASSERT(stcb); 3943 #ifdef SCTP_ASOCLOG_OF_TSNS 3944 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3945 stcb->asoc.cumack_log_at++; 3946 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3947 stcb->asoc.cumack_log_at = 0; 3948 } 3949 #endif 3950 asoc = &stcb->asoc; 3951 old_rwnd = asoc->peers_rwnd; 3952 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3953 /* old ack */ 3954 return; 3955 } else if (asoc->last_acked_seq == cumack) { 3956 /* Window update sack */ 3957 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3958 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3959 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3960 /* SWS sender side engages */ 3961 asoc->peers_rwnd = 0; 3962 } 3963 if (asoc->peers_rwnd > old_rwnd) { 3964 goto again; 3965 } 3966 return; 3967 } 3968 /* First setup for CC stuff */ 3969 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3970 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3971 /* Drag along the window_tsn for cwr's */ 3972 net->cwr_window_tsn = cumack; 3973 } 3974 net->prev_cwnd = net->cwnd; 3975 net->net_ack = 0; 3976 net->net_ack2 = 0; 3977 3978 /* 3979 * CMT: Reset CUC and Fast recovery algo variables before 3980 * SACK processing 3981 */ 3982 net->new_pseudo_cumack = 0; 3983 net->will_exit_fast_recovery = 0; 3984 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3985 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3986 } 3987 } 3988 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3989 tp1 = TAILQ_LAST(&asoc->sent_queue, 3990 sctpchunk_listhead); 3991 send_s = tp1->rec.data.tsn + 1; 3992 } else { 3993 send_s = asoc->sending_seq; 3994 } 3995 if (SCTP_TSN_GE(cumack, send_s)) { 3996 struct mbuf *op_err; 3997 char msg[SCTP_DIAG_INFO_LEN]; 3998 3999 *abort_now = 1; 4000 /* XXX */ 4001 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4002 cumack, send_s); 4003 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4004 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 4005 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4006 return; 4007 } 4008 asoc->this_sack_highest_gap = cumack; 4009 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4010 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4011 stcb->asoc.overall_error_count, 4012 0, 4013 SCTP_FROM_SCTP_INDATA, 4014 __LINE__); 4015 } 4016 stcb->asoc.overall_error_count = 0; 4017 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4018 /* process the new consecutive TSN first */ 4019 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4020 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4021 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4022 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4023 } 4024 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4025 /* 4026 * If it is less than ACKED, it is 4027 * now no-longer in flight. Higher 4028 * values may occur during marking 4029 */ 4030 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4032 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4033 tp1->whoTo->flight_size, 4034 tp1->book_size, 4035 (uint32_t)(uintptr_t)tp1->whoTo, 4036 tp1->rec.data.tsn); 4037 } 4038 sctp_flight_size_decrease(tp1); 4039 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4040 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4041 tp1); 4042 } 4043 /* sa_ignore NO_NULL_CHK */ 4044 sctp_total_flight_decrease(stcb, tp1); 4045 } 4046 tp1->whoTo->net_ack += tp1->send_size; 4047 if (tp1->snd_count < 2) { 4048 /* 4049 * True non-retransmited 4050 * chunk 4051 */ 4052 tp1->whoTo->net_ack2 += 4053 tp1->send_size; 4054 4055 /* update RTO too? */ 4056 if (tp1->do_rtt) { 4057 if (rto_ok) { 4058 tp1->whoTo->RTO = 4059 /* 4060 * sa_ignore 4061 * NO_NULL_CHK 4062 */ 4063 sctp_calculate_rto(stcb, 4064 asoc, tp1->whoTo, 4065 &tp1->sent_rcv_time, 4066 SCTP_RTT_FROM_DATA); 4067 rto_ok = 0; 4068 } 4069 if (tp1->whoTo->rto_needed == 0) { 4070 tp1->whoTo->rto_needed = 1; 4071 } 4072 tp1->do_rtt = 0; 4073 } 4074 } 4075 /* 4076 * CMT: CUCv2 algorithm. From the 4077 * cumack'd TSNs, for each TSN being 4078 * acked for the first time, set the 4079 * following variables for the 4080 * corresp destination. 4081 * new_pseudo_cumack will trigger a 4082 * cwnd update. 4083 * find_(rtx_)pseudo_cumack will 4084 * trigger search for the next 4085 * expected (rtx-)pseudo-cumack. 4086 */ 4087 tp1->whoTo->new_pseudo_cumack = 1; 4088 tp1->whoTo->find_pseudo_cumack = 1; 4089 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4090 4091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4092 /* sa_ignore NO_NULL_CHK */ 4093 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4094 } 4095 } 4096 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4097 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4098 } 4099 if (tp1->rec.data.chunk_was_revoked) { 4100 /* deflate the cwnd */ 4101 tp1->whoTo->cwnd -= tp1->book_size; 4102 tp1->rec.data.chunk_was_revoked = 0; 4103 } 4104 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4105 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4106 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4107 #ifdef INVARIANTS 4108 } else { 4109 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4110 #endif 4111 } 4112 } 4113 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4114 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4115 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4116 asoc->trigger_reset = 1; 4117 } 4118 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4119 if (tp1->data) { 4120 /* sa_ignore NO_NULL_CHK */ 4121 sctp_free_bufspace(stcb, asoc, tp1, 1); 4122 sctp_m_freem(tp1->data); 4123 tp1->data = NULL; 4124 } 4125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4126 sctp_log_sack(asoc->last_acked_seq, 4127 cumack, 4128 tp1->rec.data.tsn, 4129 0, 4130 0, 4131 SCTP_LOG_FREE_SENT); 4132 } 4133 asoc->sent_queue_cnt--; 4134 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4135 } else { 4136 break; 4137 } 4138 } 4139 4140 } 4141 /* sa_ignore NO_NULL_CHK */ 4142 if (stcb->sctp_socket) { 4143 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4144 struct socket *so; 4145 4146 #endif 4147 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4148 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4149 /* sa_ignore NO_NULL_CHK */ 4150 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4151 } 4152 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4153 so = SCTP_INP_SO(stcb->sctp_ep); 4154 atomic_add_int(&stcb->asoc.refcnt, 1); 4155 SCTP_TCB_UNLOCK(stcb); 4156 SCTP_SOCKET_LOCK(so, 1); 4157 SCTP_TCB_LOCK(stcb); 4158 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4159 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4160 /* assoc was freed while we were unlocked */ 4161 SCTP_SOCKET_UNLOCK(so, 1); 4162 return; 4163 } 4164 #endif 4165 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4166 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4167 SCTP_SOCKET_UNLOCK(so, 1); 4168 #endif 4169 } else { 4170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4171 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4172 } 4173 } 4174 4175 /* JRS - Use the congestion control given in the CC module */ 4176 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4177 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4178 if (net->net_ack2 > 0) { 4179 /* 4180 * Karn's rule applies to clearing error 4181 * count, this is optional. 4182 */ 4183 net->error_count = 0; 4184 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4185 /* addr came good */ 4186 net->dest_state |= SCTP_ADDR_REACHABLE; 4187 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4188 0, (void *)net, SCTP_SO_NOT_LOCKED); 4189 } 4190 if (net == stcb->asoc.primary_destination) { 4191 if (stcb->asoc.alternate) { 4192 /* 4193 * release the alternate, 4194 * primary is good 4195 */ 4196 sctp_free_remote_addr(stcb->asoc.alternate); 4197 stcb->asoc.alternate = NULL; 4198 } 4199 } 4200 if (net->dest_state & SCTP_ADDR_PF) { 4201 net->dest_state &= ~SCTP_ADDR_PF; 4202 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4203 stcb->sctp_ep, stcb, net, 4204 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4205 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4206 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4207 /* Done with this net */ 4208 net->net_ack = 0; 4209 } 4210 /* restore any doubled timers */ 4211 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4212 if (net->RTO < stcb->asoc.minrto) { 4213 net->RTO = stcb->asoc.minrto; 4214 } 4215 if (net->RTO > stcb->asoc.maxrto) { 4216 net->RTO = stcb->asoc.maxrto; 4217 } 4218 } 4219 } 4220 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4221 } 4222 asoc->last_acked_seq = cumack; 4223 4224 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4225 /* nothing left in-flight */ 4226 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4227 net->flight_size = 0; 4228 net->partial_bytes_acked = 0; 4229 } 4230 asoc->total_flight = 0; 4231 asoc->total_flight_count = 0; 4232 } 4233 /* RWND update */ 4234 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4235 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4236 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4237 /* SWS sender side engages */ 4238 asoc->peers_rwnd = 0; 4239 } 4240 if (asoc->peers_rwnd > old_rwnd) { 4241 win_probe_recovery = 1; 4242 } 4243 /* Now assure a timer where data is queued at */ 4244 again: 4245 j = 0; 4246 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4247 if (win_probe_recovery && (net->window_probe)) { 4248 win_probe_recovered = 1; 4249 /* 4250 * Find first chunk that was used with window probe 4251 * and clear the sent 4252 */ 4253 /* sa_ignore FREED_MEMORY */ 4254 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4255 if (tp1->window_probe) { 4256 /* move back to data send queue */ 4257 sctp_window_probe_recovery(stcb, asoc, tp1); 4258 break; 4259 } 4260 } 4261 } 4262 if (net->flight_size) { 4263 j++; 4264 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4265 if (net->window_probe) { 4266 net->window_probe = 0; 4267 } 4268 } else { 4269 if (net->window_probe) { 4270 /* 4271 * In window probes we must assure a timer 4272 * is still running there 4273 */ 4274 net->window_probe = 0; 4275 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4276 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4277 } 4278 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4279 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4280 stcb, net, 4281 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4282 } 4283 } 4284 } 4285 if ((j == 0) && 4286 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4287 (asoc->sent_queue_retran_cnt == 0) && 4288 (win_probe_recovered == 0) && 4289 (done_once == 0)) { 4290 /* 4291 * huh, this should not happen unless all packets are 4292 * PR-SCTP and marked to skip of course. 4293 */ 4294 if (sctp_fs_audit(asoc)) { 4295 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4296 net->flight_size = 0; 4297 } 4298 asoc->total_flight = 0; 4299 asoc->total_flight_count = 0; 4300 asoc->sent_queue_retran_cnt = 0; 4301 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4302 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4303 sctp_flight_size_increase(tp1); 4304 sctp_total_flight_increase(stcb, tp1); 4305 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4306 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4307 } 4308 } 4309 } 4310 done_once = 1; 4311 goto again; 4312 } 4313 /**********************************/ 4314 /* Now what about shutdown issues */ 4315 /**********************************/ 4316 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4317 /* nothing left on sendqueue.. consider done */ 4318 /* clean up */ 4319 if ((asoc->stream_queue_cnt == 1) && 4320 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4321 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4322 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4323 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4324 } 4325 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4326 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4327 (asoc->stream_queue_cnt == 1) && 4328 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4329 struct mbuf *op_err; 4330 4331 *abort_now = 1; 4332 /* XXX */ 4333 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4334 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4335 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4336 return; 4337 } 4338 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4339 (asoc->stream_queue_cnt == 0)) { 4340 struct sctp_nets *netp; 4341 4342 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4343 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4344 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4345 } 4346 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4347 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4348 sctp_stop_timers_for_shutdown(stcb); 4349 if (asoc->alternate) { 4350 netp = asoc->alternate; 4351 } else { 4352 netp = asoc->primary_destination; 4353 } 4354 sctp_send_shutdown(stcb, netp); 4355 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4356 stcb->sctp_ep, stcb, netp); 4357 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4358 stcb->sctp_ep, stcb, netp); 4359 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4360 (asoc->stream_queue_cnt == 0)) { 4361 struct sctp_nets *netp; 4362 4363 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4364 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4365 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4366 sctp_stop_timers_for_shutdown(stcb); 4367 if (asoc->alternate) { 4368 netp = asoc->alternate; 4369 } else { 4370 netp = asoc->primary_destination; 4371 } 4372 sctp_send_shutdown_ack(stcb, netp); 4373 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4374 stcb->sctp_ep, stcb, netp); 4375 } 4376 } 4377 /*********************************************/ 4378 /* Here we perform PR-SCTP procedures */ 4379 /* (section 4.2) */ 4380 /*********************************************/ 4381 /* C1. update advancedPeerAckPoint */ 4382 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4383 asoc->advanced_peer_ack_point = cumack; 4384 } 4385 /* PR-Sctp issues need to be addressed too */ 4386 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4387 struct sctp_tmit_chunk *lchk; 4388 uint32_t old_adv_peer_ack_point; 4389 4390 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4391 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4392 /* C3. See if we need to send a Fwd-TSN */ 4393 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4394 /* 4395 * ISSUE with ECN, see FWD-TSN processing. 4396 */ 4397 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4398 send_forward_tsn(stcb, asoc); 4399 } else if (lchk) { 4400 /* try to FR fwd-tsn's that get lost too */ 4401 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4402 send_forward_tsn(stcb, asoc); 4403 } 4404 } 4405 } 4406 if (lchk) { 4407 /* Assure a timer is up */ 4408 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4409 stcb->sctp_ep, stcb, lchk->whoTo); 4410 } 4411 } 4412 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4413 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4414 rwnd, 4415 stcb->asoc.peers_rwnd, 4416 stcb->asoc.total_flight, 4417 stcb->asoc.total_output_queue_size); 4418 } 4419 } 4420 4421 void 4422 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4423 struct sctp_tcb *stcb, 4424 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4425 int *abort_now, uint8_t flags, 4426 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4427 { 4428 struct sctp_association *asoc; 4429 struct sctp_tmit_chunk *tp1, *tp2; 4430 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4431 uint16_t wake_him = 0; 4432 uint32_t send_s = 0; 4433 long j; 4434 int accum_moved = 0; 4435 int will_exit_fast_recovery = 0; 4436 uint32_t a_rwnd, old_rwnd; 4437 int win_probe_recovery = 0; 4438 int win_probe_recovered = 0; 4439 struct sctp_nets *net = NULL; 4440 int done_once; 4441 int rto_ok = 1; 4442 uint8_t reneged_all = 0; 4443 uint8_t cmt_dac_flag; 4444 4445 /* 4446 * we take any chance we can to service our queues since we cannot 4447 * get awoken when the socket is read from :< 4448 */ 4449 /* 4450 * Now perform the actual SACK handling: 1) Verify that it is not an 4451 * old sack, if so discard. 2) If there is nothing left in the send 4452 * queue (cum-ack is equal to last acked) then you have a duplicate 4453 * too, update any rwnd change and verify no timers are running. 4454 * then return. 3) Process any new consequtive data i.e. cum-ack 4455 * moved process these first and note that it moved. 4) Process any 4456 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4457 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4458 * sync up flightsizes and things, stop all timers and also check 4459 * for shutdown_pending state. If so then go ahead and send off the 4460 * shutdown. If in shutdown recv, send off the shutdown-ack and 4461 * start that timer, Ret. 9) Strike any non-acked things and do FR 4462 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4463 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4464 * if in shutdown_recv state. 4465 */ 4466 SCTP_TCB_LOCK_ASSERT(stcb); 4467 /* CMT DAC algo */ 4468 this_sack_lowest_newack = 0; 4469 SCTP_STAT_INCR(sctps_slowpath_sack); 4470 last_tsn = cum_ack; 4471 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4472 #ifdef SCTP_ASOCLOG_OF_TSNS 4473 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4474 stcb->asoc.cumack_log_at++; 4475 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4476 stcb->asoc.cumack_log_at = 0; 4477 } 4478 #endif 4479 a_rwnd = rwnd; 4480 4481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4482 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4483 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4484 } 4485 old_rwnd = stcb->asoc.peers_rwnd; 4486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4487 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4488 stcb->asoc.overall_error_count, 4489 0, 4490 SCTP_FROM_SCTP_INDATA, 4491 __LINE__); 4492 } 4493 stcb->asoc.overall_error_count = 0; 4494 asoc = &stcb->asoc; 4495 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4496 sctp_log_sack(asoc->last_acked_seq, 4497 cum_ack, 4498 0, 4499 num_seg, 4500 num_dup, 4501 SCTP_LOG_NEW_SACK); 4502 } 4503 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4504 uint16_t i; 4505 uint32_t *dupdata, dblock; 4506 4507 for (i = 0; i < num_dup; i++) { 4508 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4509 sizeof(uint32_t), (uint8_t *)&dblock); 4510 if (dupdata == NULL) { 4511 break; 4512 } 4513 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4514 } 4515 } 4516 /* reality check */ 4517 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4518 tp1 = TAILQ_LAST(&asoc->sent_queue, 4519 sctpchunk_listhead); 4520 send_s = tp1->rec.data.tsn + 1; 4521 } else { 4522 tp1 = NULL; 4523 send_s = asoc->sending_seq; 4524 } 4525 if (SCTP_TSN_GE(cum_ack, send_s)) { 4526 struct mbuf *op_err; 4527 char msg[SCTP_DIAG_INFO_LEN]; 4528 4529 /* 4530 * no way, we have not even sent this TSN out yet. Peer is 4531 * hopelessly messed up with us. 4532 */ 4533 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4534 cum_ack, send_s); 4535 if (tp1) { 4536 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4537 tp1->rec.data.tsn, (void *)tp1); 4538 } 4539 hopeless_peer: 4540 *abort_now = 1; 4541 /* XXX */ 4542 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4543 cum_ack, send_s); 4544 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4545 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4546 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4547 return; 4548 } 4549 /**********************/ 4550 /* 1) check the range */ 4551 /**********************/ 4552 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4553 /* acking something behind */ 4554 return; 4555 } 4556 /* update the Rwnd of the peer */ 4557 if (TAILQ_EMPTY(&asoc->sent_queue) && 4558 TAILQ_EMPTY(&asoc->send_queue) && 4559 (asoc->stream_queue_cnt == 0)) { 4560 /* nothing left on send/sent and strmq */ 4561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4562 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4563 asoc->peers_rwnd, 0, 0, a_rwnd); 4564 } 4565 asoc->peers_rwnd = a_rwnd; 4566 if (asoc->sent_queue_retran_cnt) { 4567 asoc->sent_queue_retran_cnt = 0; 4568 } 4569 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4570 /* SWS sender side engages */ 4571 asoc->peers_rwnd = 0; 4572 } 4573 /* stop any timers */ 4574 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4575 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4576 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4577 net->partial_bytes_acked = 0; 4578 net->flight_size = 0; 4579 } 4580 asoc->total_flight = 0; 4581 asoc->total_flight_count = 0; 4582 return; 4583 } 4584 /* 4585 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4586 * things. The total byte count acked is tracked in netAckSz AND 4587 * netAck2 is used to track the total bytes acked that are un- 4588 * amibguious and were never retransmitted. We track these on a per 4589 * destination address basis. 4590 */ 4591 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4592 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4593 /* Drag along the window_tsn for cwr's */ 4594 net->cwr_window_tsn = cum_ack; 4595 } 4596 net->prev_cwnd = net->cwnd; 4597 net->net_ack = 0; 4598 net->net_ack2 = 0; 4599 4600 /* 4601 * CMT: Reset CUC and Fast recovery algo variables before 4602 * SACK processing 4603 */ 4604 net->new_pseudo_cumack = 0; 4605 net->will_exit_fast_recovery = 0; 4606 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4607 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4608 } 4609 /* 4610 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4611 * to be greater than the cumack. Also reset saw_newack to 0 4612 * for all dests. 4613 */ 4614 net->saw_newack = 0; 4615 net->this_sack_highest_newack = last_tsn; 4616 } 4617 /* process the new consecutive TSN first */ 4618 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4619 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4620 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4621 accum_moved = 1; 4622 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4623 /* 4624 * If it is less than ACKED, it is 4625 * now no-longer in flight. Higher 4626 * values may occur during marking 4627 */ 4628 if ((tp1->whoTo->dest_state & 4629 SCTP_ADDR_UNCONFIRMED) && 4630 (tp1->snd_count < 2)) { 4631 /* 4632 * If there was no retran 4633 * and the address is 4634 * un-confirmed and we sent 4635 * there and are now 4636 * sacked.. its confirmed, 4637 * mark it so. 4638 */ 4639 tp1->whoTo->dest_state &= 4640 ~SCTP_ADDR_UNCONFIRMED; 4641 } 4642 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4644 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4645 tp1->whoTo->flight_size, 4646 tp1->book_size, 4647 (uint32_t)(uintptr_t)tp1->whoTo, 4648 tp1->rec.data.tsn); 4649 } 4650 sctp_flight_size_decrease(tp1); 4651 sctp_total_flight_decrease(stcb, tp1); 4652 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4653 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4654 tp1); 4655 } 4656 } 4657 tp1->whoTo->net_ack += tp1->send_size; 4658 4659 /* CMT SFR and DAC algos */ 4660 this_sack_lowest_newack = tp1->rec.data.tsn; 4661 tp1->whoTo->saw_newack = 1; 4662 4663 if (tp1->snd_count < 2) { 4664 /* 4665 * True non-retransmited 4666 * chunk 4667 */ 4668 tp1->whoTo->net_ack2 += 4669 tp1->send_size; 4670 4671 /* update RTO too? */ 4672 if (tp1->do_rtt) { 4673 if (rto_ok) { 4674 tp1->whoTo->RTO = 4675 sctp_calculate_rto(stcb, 4676 asoc, tp1->whoTo, 4677 &tp1->sent_rcv_time, 4678 SCTP_RTT_FROM_DATA); 4679 rto_ok = 0; 4680 } 4681 if (tp1->whoTo->rto_needed == 0) { 4682 tp1->whoTo->rto_needed = 1; 4683 } 4684 tp1->do_rtt = 0; 4685 } 4686 } 4687 /* 4688 * CMT: CUCv2 algorithm. From the 4689 * cumack'd TSNs, for each TSN being 4690 * acked for the first time, set the 4691 * following variables for the 4692 * corresp destination. 4693 * new_pseudo_cumack will trigger a 4694 * cwnd update. 4695 * find_(rtx_)pseudo_cumack will 4696 * trigger search for the next 4697 * expected (rtx-)pseudo-cumack. 4698 */ 4699 tp1->whoTo->new_pseudo_cumack = 1; 4700 tp1->whoTo->find_pseudo_cumack = 1; 4701 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4702 4703 4704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4705 sctp_log_sack(asoc->last_acked_seq, 4706 cum_ack, 4707 tp1->rec.data.tsn, 4708 0, 4709 0, 4710 SCTP_LOG_TSN_ACKED); 4711 } 4712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4713 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4714 } 4715 } 4716 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4717 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4718 #ifdef SCTP_AUDITING_ENABLED 4719 sctp_audit_log(0xB3, 4720 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4721 #endif 4722 } 4723 if (tp1->rec.data.chunk_was_revoked) { 4724 /* deflate the cwnd */ 4725 tp1->whoTo->cwnd -= tp1->book_size; 4726 tp1->rec.data.chunk_was_revoked = 0; 4727 } 4728 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4729 tp1->sent = SCTP_DATAGRAM_ACKED; 4730 } 4731 } 4732 } else { 4733 break; 4734 } 4735 } 4736 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4737 /* always set this up to cum-ack */ 4738 asoc->this_sack_highest_gap = last_tsn; 4739 4740 if ((num_seg > 0) || (num_nr_seg > 0)) { 4741 4742 /* 4743 * thisSackHighestGap will increase while handling NEW 4744 * segments this_sack_highest_newack will increase while 4745 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4746 * used for CMT DAC algo. saw_newack will also change. 4747 */ 4748 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4749 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4750 num_seg, num_nr_seg, &rto_ok)) { 4751 wake_him++; 4752 } 4753 /* 4754 * validate the biggest_tsn_acked in the gap acks if strict 4755 * adherence is wanted. 4756 */ 4757 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4758 /* 4759 * peer is either confused or we are under attack. 4760 * We must abort. 4761 */ 4762 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4763 biggest_tsn_acked, send_s); 4764 goto hopeless_peer; 4765 } 4766 } 4767 /*******************************************/ 4768 /* cancel ALL T3-send timer if accum moved */ 4769 /*******************************************/ 4770 if (asoc->sctp_cmt_on_off > 0) { 4771 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4772 if (net->new_pseudo_cumack) 4773 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4774 stcb, net, 4775 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4776 4777 } 4778 } else { 4779 if (accum_moved) { 4780 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4781 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4782 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4783 } 4784 } 4785 } 4786 /********************************************/ 4787 /* drop the acked chunks from the sentqueue */ 4788 /********************************************/ 4789 asoc->last_acked_seq = cum_ack; 4790 4791 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4792 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4793 break; 4794 } 4795 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4796 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4797 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4798 #ifdef INVARIANTS 4799 } else { 4800 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4801 #endif 4802 } 4803 } 4804 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4805 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4806 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4807 asoc->trigger_reset = 1; 4808 } 4809 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4810 if (PR_SCTP_ENABLED(tp1->flags)) { 4811 if (asoc->pr_sctp_cnt != 0) 4812 asoc->pr_sctp_cnt--; 4813 } 4814 asoc->sent_queue_cnt--; 4815 if (tp1->data) { 4816 /* sa_ignore NO_NULL_CHK */ 4817 sctp_free_bufspace(stcb, asoc, tp1, 1); 4818 sctp_m_freem(tp1->data); 4819 tp1->data = NULL; 4820 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4821 asoc->sent_queue_cnt_removeable--; 4822 } 4823 } 4824 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4825 sctp_log_sack(asoc->last_acked_seq, 4826 cum_ack, 4827 tp1->rec.data.tsn, 4828 0, 4829 0, 4830 SCTP_LOG_FREE_SENT); 4831 } 4832 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4833 wake_him++; 4834 } 4835 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4836 #ifdef INVARIANTS 4837 panic("Warning flight size is positive and should be 0"); 4838 #else 4839 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4840 asoc->total_flight); 4841 #endif 4842 asoc->total_flight = 0; 4843 } 4844 /* sa_ignore NO_NULL_CHK */ 4845 if ((wake_him) && (stcb->sctp_socket)) { 4846 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4847 struct socket *so; 4848 4849 #endif 4850 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4852 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4853 } 4854 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4855 so = SCTP_INP_SO(stcb->sctp_ep); 4856 atomic_add_int(&stcb->asoc.refcnt, 1); 4857 SCTP_TCB_UNLOCK(stcb); 4858 SCTP_SOCKET_LOCK(so, 1); 4859 SCTP_TCB_LOCK(stcb); 4860 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4861 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4862 /* assoc was freed while we were unlocked */ 4863 SCTP_SOCKET_UNLOCK(so, 1); 4864 return; 4865 } 4866 #endif 4867 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4869 SCTP_SOCKET_UNLOCK(so, 1); 4870 #endif 4871 } else { 4872 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4873 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4874 } 4875 } 4876 4877 if (asoc->fast_retran_loss_recovery && accum_moved) { 4878 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4879 /* Setup so we will exit RFC2582 fast recovery */ 4880 will_exit_fast_recovery = 1; 4881 } 4882 } 4883 /* 4884 * Check for revoked fragments: 4885 * 4886 * if Previous sack - Had no frags then we can't have any revoked if 4887 * Previous sack - Had frag's then - If we now have frags aka 4888 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4889 * some of them. else - The peer revoked all ACKED fragments, since 4890 * we had some before and now we have NONE. 4891 */ 4892 4893 if (num_seg) { 4894 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4895 asoc->saw_sack_with_frags = 1; 4896 } else if (asoc->saw_sack_with_frags) { 4897 int cnt_revoked = 0; 4898 4899 /* Peer revoked all dg's marked or acked */ 4900 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4901 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4902 tp1->sent = SCTP_DATAGRAM_SENT; 4903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4904 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4905 tp1->whoTo->flight_size, 4906 tp1->book_size, 4907 (uint32_t)(uintptr_t)tp1->whoTo, 4908 tp1->rec.data.tsn); 4909 } 4910 sctp_flight_size_increase(tp1); 4911 sctp_total_flight_increase(stcb, tp1); 4912 tp1->rec.data.chunk_was_revoked = 1; 4913 /* 4914 * To ensure that this increase in 4915 * flightsize, which is artificial, does not 4916 * throttle the sender, we also increase the 4917 * cwnd artificially. 4918 */ 4919 tp1->whoTo->cwnd += tp1->book_size; 4920 cnt_revoked++; 4921 } 4922 } 4923 if (cnt_revoked) { 4924 reneged_all = 1; 4925 } 4926 asoc->saw_sack_with_frags = 0; 4927 } 4928 if (num_nr_seg > 0) 4929 asoc->saw_sack_with_nr_frags = 1; 4930 else 4931 asoc->saw_sack_with_nr_frags = 0; 4932 4933 /* JRS - Use the congestion control given in the CC module */ 4934 if (ecne_seen == 0) { 4935 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4936 if (net->net_ack2 > 0) { 4937 /* 4938 * Karn's rule applies to clearing error 4939 * count, this is optional. 4940 */ 4941 net->error_count = 0; 4942 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4943 /* addr came good */ 4944 net->dest_state |= SCTP_ADDR_REACHABLE; 4945 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4946 0, (void *)net, SCTP_SO_NOT_LOCKED); 4947 } 4948 if (net == stcb->asoc.primary_destination) { 4949 if (stcb->asoc.alternate) { 4950 /* 4951 * release the alternate, 4952 * primary is good 4953 */ 4954 sctp_free_remote_addr(stcb->asoc.alternate); 4955 stcb->asoc.alternate = NULL; 4956 } 4957 } 4958 if (net->dest_state & SCTP_ADDR_PF) { 4959 net->dest_state &= ~SCTP_ADDR_PF; 4960 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4961 stcb->sctp_ep, stcb, net, 4962 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4963 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4964 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4965 /* Done with this net */ 4966 net->net_ack = 0; 4967 } 4968 /* restore any doubled timers */ 4969 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4970 if (net->RTO < stcb->asoc.minrto) { 4971 net->RTO = stcb->asoc.minrto; 4972 } 4973 if (net->RTO > stcb->asoc.maxrto) { 4974 net->RTO = stcb->asoc.maxrto; 4975 } 4976 } 4977 } 4978 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4979 } 4980 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4981 /* nothing left in-flight */ 4982 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4983 /* stop all timers */ 4984 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4985 stcb, net, 4986 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4987 net->flight_size = 0; 4988 net->partial_bytes_acked = 0; 4989 } 4990 asoc->total_flight = 0; 4991 asoc->total_flight_count = 0; 4992 } 4993 /**********************************/ 4994 /* Now what about shutdown issues */ 4995 /**********************************/ 4996 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4997 /* nothing left on sendqueue.. consider done */ 4998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4999 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5000 asoc->peers_rwnd, 0, 0, a_rwnd); 5001 } 5002 asoc->peers_rwnd = a_rwnd; 5003 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5004 /* SWS sender side engages */ 5005 asoc->peers_rwnd = 0; 5006 } 5007 /* clean up */ 5008 if ((asoc->stream_queue_cnt == 1) && 5009 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5010 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 5011 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 5012 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 5013 } 5014 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5015 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 5016 (asoc->stream_queue_cnt == 1) && 5017 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 5018 struct mbuf *op_err; 5019 5020 *abort_now = 1; 5021 /* XXX */ 5022 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 5023 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 5024 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5025 return; 5026 } 5027 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5028 (asoc->stream_queue_cnt == 0)) { 5029 struct sctp_nets *netp; 5030 5031 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 5032 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5033 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5034 } 5035 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 5036 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5037 sctp_stop_timers_for_shutdown(stcb); 5038 if (asoc->alternate) { 5039 netp = asoc->alternate; 5040 } else { 5041 netp = asoc->primary_destination; 5042 } 5043 sctp_send_shutdown(stcb, netp); 5044 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5045 stcb->sctp_ep, stcb, netp); 5046 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5047 stcb->sctp_ep, stcb, netp); 5048 return; 5049 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5050 (asoc->stream_queue_cnt == 0)) { 5051 struct sctp_nets *netp; 5052 5053 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5054 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 5055 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5056 sctp_stop_timers_for_shutdown(stcb); 5057 if (asoc->alternate) { 5058 netp = asoc->alternate; 5059 } else { 5060 netp = asoc->primary_destination; 5061 } 5062 sctp_send_shutdown_ack(stcb, netp); 5063 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5064 stcb->sctp_ep, stcb, netp); 5065 return; 5066 } 5067 } 5068 /* 5069 * Now here we are going to recycle net_ack for a different use... 5070 * HEADS UP. 5071 */ 5072 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5073 net->net_ack = 0; 5074 } 5075 5076 /* 5077 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5078 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5079 * automatically ensure that. 5080 */ 5081 if ((asoc->sctp_cmt_on_off > 0) && 5082 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5083 (cmt_dac_flag == 0)) { 5084 this_sack_lowest_newack = cum_ack; 5085 } 5086 if ((num_seg > 0) || (num_nr_seg > 0)) { 5087 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5088 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5089 } 5090 /* JRS - Use the congestion control given in the CC module */ 5091 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5092 5093 /* Now are we exiting loss recovery ? */ 5094 if (will_exit_fast_recovery) { 5095 /* Ok, we must exit fast recovery */ 5096 asoc->fast_retran_loss_recovery = 0; 5097 } 5098 if ((asoc->sat_t3_loss_recovery) && 5099 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5100 /* end satellite t3 loss recovery */ 5101 asoc->sat_t3_loss_recovery = 0; 5102 } 5103 /* 5104 * CMT Fast recovery 5105 */ 5106 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5107 if (net->will_exit_fast_recovery) { 5108 /* Ok, we must exit fast recovery */ 5109 net->fast_retran_loss_recovery = 0; 5110 } 5111 } 5112 5113 /* Adjust and set the new rwnd value */ 5114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5115 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5116 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5117 } 5118 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5119 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5120 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5121 /* SWS sender side engages */ 5122 asoc->peers_rwnd = 0; 5123 } 5124 if (asoc->peers_rwnd > old_rwnd) { 5125 win_probe_recovery = 1; 5126 } 5127 /* 5128 * Now we must setup so we have a timer up for anyone with 5129 * outstanding data. 5130 */ 5131 done_once = 0; 5132 again: 5133 j = 0; 5134 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5135 if (win_probe_recovery && (net->window_probe)) { 5136 win_probe_recovered = 1; 5137 /*- 5138 * Find first chunk that was used with 5139 * window probe and clear the event. Put 5140 * it back into the send queue as if has 5141 * not been sent. 5142 */ 5143 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5144 if (tp1->window_probe) { 5145 sctp_window_probe_recovery(stcb, asoc, tp1); 5146 break; 5147 } 5148 } 5149 } 5150 if (net->flight_size) { 5151 j++; 5152 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5153 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5154 stcb->sctp_ep, stcb, net); 5155 } 5156 if (net->window_probe) { 5157 net->window_probe = 0; 5158 } 5159 } else { 5160 if (net->window_probe) { 5161 /* 5162 * In window probes we must assure a timer 5163 * is still running there 5164 */ 5165 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5166 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5167 stcb->sctp_ep, stcb, net); 5168 5169 } 5170 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5171 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5172 stcb, net, 5173 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5174 } 5175 } 5176 } 5177 if ((j == 0) && 5178 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5179 (asoc->sent_queue_retran_cnt == 0) && 5180 (win_probe_recovered == 0) && 5181 (done_once == 0)) { 5182 /* 5183 * huh, this should not happen unless all packets are 5184 * PR-SCTP and marked to skip of course. 5185 */ 5186 if (sctp_fs_audit(asoc)) { 5187 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5188 net->flight_size = 0; 5189 } 5190 asoc->total_flight = 0; 5191 asoc->total_flight_count = 0; 5192 asoc->sent_queue_retran_cnt = 0; 5193 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5194 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5195 sctp_flight_size_increase(tp1); 5196 sctp_total_flight_increase(stcb, tp1); 5197 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5198 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5199 } 5200 } 5201 } 5202 done_once = 1; 5203 goto again; 5204 } 5205 /*********************************************/ 5206 /* Here we perform PR-SCTP procedures */ 5207 /* (section 4.2) */ 5208 /*********************************************/ 5209 /* C1. update advancedPeerAckPoint */ 5210 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5211 asoc->advanced_peer_ack_point = cum_ack; 5212 } 5213 /* C2. try to further move advancedPeerAckPoint ahead */ 5214 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5215 struct sctp_tmit_chunk *lchk; 5216 uint32_t old_adv_peer_ack_point; 5217 5218 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5219 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5220 /* C3. See if we need to send a Fwd-TSN */ 5221 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5222 /* 5223 * ISSUE with ECN, see FWD-TSN processing. 5224 */ 5225 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5226 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5227 0xee, cum_ack, asoc->advanced_peer_ack_point, 5228 old_adv_peer_ack_point); 5229 } 5230 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5231 send_forward_tsn(stcb, asoc); 5232 } else if (lchk) { 5233 /* try to FR fwd-tsn's that get lost too */ 5234 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5235 send_forward_tsn(stcb, asoc); 5236 } 5237 } 5238 } 5239 if (lchk) { 5240 /* Assure a timer is up */ 5241 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5242 stcb->sctp_ep, stcb, lchk->whoTo); 5243 } 5244 } 5245 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5246 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5247 a_rwnd, 5248 stcb->asoc.peers_rwnd, 5249 stcb->asoc.total_flight, 5250 stcb->asoc.total_output_queue_size); 5251 } 5252 } 5253 5254 void 5255 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5256 { 5257 /* Copy cum-ack */ 5258 uint32_t cum_ack, a_rwnd; 5259 5260 cum_ack = ntohl(cp->cumulative_tsn_ack); 5261 /* Arrange so a_rwnd does NOT change */ 5262 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5263 5264 /* Now call the express sack handling */ 5265 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5266 } 5267 5268 static void 5269 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5270 struct sctp_stream_in *strmin) 5271 { 5272 struct sctp_queued_to_read *control, *ncontrol; 5273 struct sctp_association *asoc; 5274 uint32_t mid; 5275 int need_reasm_check = 0; 5276 5277 asoc = &stcb->asoc; 5278 mid = strmin->last_mid_delivered; 5279 /* 5280 * First deliver anything prior to and including the stream no that 5281 * came in. 5282 */ 5283 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5284 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5285 /* this is deliverable now */ 5286 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5287 if (control->on_strm_q) { 5288 if (control->on_strm_q == SCTP_ON_ORDERED) { 5289 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5290 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5291 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5292 #ifdef INVARIANTS 5293 } else { 5294 panic("strmin: %p ctl: %p unknown %d", 5295 strmin, control, control->on_strm_q); 5296 #endif 5297 } 5298 control->on_strm_q = 0; 5299 } 5300 /* subtract pending on streams */ 5301 if (asoc->size_on_all_streams >= control->length) { 5302 asoc->size_on_all_streams -= control->length; 5303 } else { 5304 #ifdef INVARIANTS 5305 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5306 #else 5307 asoc->size_on_all_streams = 0; 5308 #endif 5309 } 5310 sctp_ucount_decr(asoc->cnt_on_all_streams); 5311 /* deliver it to at least the delivery-q */ 5312 if (stcb->sctp_socket) { 5313 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5314 sctp_add_to_readq(stcb->sctp_ep, stcb, 5315 control, 5316 &stcb->sctp_socket->so_rcv, 5317 1, SCTP_READ_LOCK_HELD, 5318 SCTP_SO_NOT_LOCKED); 5319 } 5320 } else { 5321 /* Its a fragmented message */ 5322 if (control->first_frag_seen) { 5323 /* 5324 * Make it so this is next to 5325 * deliver, we restore later 5326 */ 5327 strmin->last_mid_delivered = control->mid - 1; 5328 need_reasm_check = 1; 5329 break; 5330 } 5331 } 5332 } else { 5333 /* no more delivery now. */ 5334 break; 5335 } 5336 } 5337 if (need_reasm_check) { 5338 int ret; 5339 5340 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5341 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5342 /* Restore the next to deliver unless we are ahead */ 5343 strmin->last_mid_delivered = mid; 5344 } 5345 if (ret == 0) { 5346 /* Left the front Partial one on */ 5347 return; 5348 } 5349 need_reasm_check = 0; 5350 } 5351 /* 5352 * now we must deliver things in queue the normal way if any are 5353 * now ready. 5354 */ 5355 mid = strmin->last_mid_delivered + 1; 5356 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5357 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5358 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5359 /* this is deliverable now */ 5360 if (control->on_strm_q) { 5361 if (control->on_strm_q == SCTP_ON_ORDERED) { 5362 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5363 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5364 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5365 #ifdef INVARIANTS 5366 } else { 5367 panic("strmin: %p ctl: %p unknown %d", 5368 strmin, control, control->on_strm_q); 5369 #endif 5370 } 5371 control->on_strm_q = 0; 5372 } 5373 /* subtract pending on streams */ 5374 if (asoc->size_on_all_streams >= control->length) { 5375 asoc->size_on_all_streams -= control->length; 5376 } else { 5377 #ifdef INVARIANTS 5378 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5379 #else 5380 asoc->size_on_all_streams = 0; 5381 #endif 5382 } 5383 sctp_ucount_decr(asoc->cnt_on_all_streams); 5384 /* deliver it to at least the delivery-q */ 5385 strmin->last_mid_delivered = control->mid; 5386 if (stcb->sctp_socket) { 5387 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5388 sctp_add_to_readq(stcb->sctp_ep, stcb, 5389 control, 5390 &stcb->sctp_socket->so_rcv, 1, 5391 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5392 5393 } 5394 mid = strmin->last_mid_delivered + 1; 5395 } else { 5396 /* Its a fragmented message */ 5397 if (control->first_frag_seen) { 5398 /* 5399 * Make it so this is next to 5400 * deliver 5401 */ 5402 strmin->last_mid_delivered = control->mid - 1; 5403 need_reasm_check = 1; 5404 break; 5405 } 5406 } 5407 } else { 5408 break; 5409 } 5410 } 5411 if (need_reasm_check) { 5412 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5413 } 5414 } 5415 5416 5417 5418 static void 5419 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5420 struct sctp_association *asoc, 5421 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn) 5422 { 5423 struct sctp_queued_to_read *control; 5424 struct sctp_stream_in *strm; 5425 struct sctp_tmit_chunk *chk, *nchk; 5426 int cnt_removed = 0; 5427 5428 /* 5429 * For now large messages held on the stream reasm that are complete 5430 * will be tossed too. We could in theory do more work to spin 5431 * through and stop after dumping one msg aka seeing the start of a 5432 * new msg at the head, and call the delivery function... to see if 5433 * it can be delivered... But for now we just dump everything on the 5434 * queue. 5435 */ 5436 strm = &asoc->strmin[stream]; 5437 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported); 5438 if (control == NULL) { 5439 /* Not found */ 5440 return; 5441 } 5442 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5443 return; 5444 } 5445 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5446 /* Purge hanging chunks */ 5447 if (!asoc->idata_supported && (ordered == 0)) { 5448 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5449 break; 5450 } 5451 } 5452 cnt_removed++; 5453 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5454 if (asoc->size_on_reasm_queue >= chk->send_size) { 5455 asoc->size_on_reasm_queue -= chk->send_size; 5456 } else { 5457 #ifdef INVARIANTS 5458 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5459 #else 5460 asoc->size_on_reasm_queue = 0; 5461 #endif 5462 } 5463 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5464 if (chk->data) { 5465 sctp_m_freem(chk->data); 5466 chk->data = NULL; 5467 } 5468 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5469 } 5470 if (!TAILQ_EMPTY(&control->reasm)) { 5471 /* This has to be old data, unordered */ 5472 if (control->data) { 5473 sctp_m_freem(control->data); 5474 control->data = NULL; 5475 } 5476 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5477 chk = TAILQ_FIRST(&control->reasm); 5478 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5479 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5480 sctp_add_chk_to_control(control, strm, stcb, asoc, 5481 chk, SCTP_READ_LOCK_HELD); 5482 } 5483 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5484 return; 5485 } 5486 if (control->on_strm_q == SCTP_ON_ORDERED) { 5487 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5488 if (asoc->size_on_all_streams >= control->length) { 5489 asoc->size_on_all_streams -= control->length; 5490 } else { 5491 #ifdef INVARIANTS 5492 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5493 #else 5494 asoc->size_on_all_streams = 0; 5495 #endif 5496 } 5497 sctp_ucount_decr(asoc->cnt_on_all_streams); 5498 control->on_strm_q = 0; 5499 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5500 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5501 control->on_strm_q = 0; 5502 #ifdef INVARIANTS 5503 } else if (control->on_strm_q) { 5504 panic("strm: %p ctl: %p unknown %d", 5505 strm, control, control->on_strm_q); 5506 #endif 5507 } 5508 control->on_strm_q = 0; 5509 if (control->on_read_q == 0) { 5510 sctp_free_remote_addr(control->whoFrom); 5511 if (control->data) { 5512 sctp_m_freem(control->data); 5513 control->data = NULL; 5514 } 5515 sctp_free_a_readq(stcb, control); 5516 } 5517 } 5518 5519 void 5520 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5521 struct sctp_forward_tsn_chunk *fwd, 5522 int *abort_flag, struct mbuf *m, int offset) 5523 { 5524 /* The pr-sctp fwd tsn */ 5525 /* 5526 * here we will perform all the data receiver side steps for 5527 * processing FwdTSN, as required in by pr-sctp draft: 5528 * 5529 * Assume we get FwdTSN(x): 5530 * 5531 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5532 * + others we have 3) examine and update re-ordering queue on 5533 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5534 * report where we are. 5535 */ 5536 struct sctp_association *asoc; 5537 uint32_t new_cum_tsn, gap; 5538 unsigned int i, fwd_sz, m_size; 5539 uint32_t str_seq; 5540 struct sctp_stream_in *strm; 5541 struct sctp_queued_to_read *control, *sv; 5542 5543 asoc = &stcb->asoc; 5544 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5545 SCTPDBG(SCTP_DEBUG_INDATA1, 5546 "Bad size too small/big fwd-tsn\n"); 5547 return; 5548 } 5549 m_size = (stcb->asoc.mapping_array_size << 3); 5550 /*************************************************************/ 5551 /* 1. Here we update local cumTSN and shift the bitmap array */ 5552 /*************************************************************/ 5553 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5554 5555 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5556 /* Already got there ... */ 5557 return; 5558 } 5559 /* 5560 * now we know the new TSN is more advanced, let's find the actual 5561 * gap 5562 */ 5563 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5564 asoc->cumulative_tsn = new_cum_tsn; 5565 if (gap >= m_size) { 5566 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5567 struct mbuf *op_err; 5568 char msg[SCTP_DIAG_INFO_LEN]; 5569 5570 /* 5571 * out of range (of single byte chunks in the rwnd I 5572 * give out). This must be an attacker. 5573 */ 5574 *abort_flag = 1; 5575 snprintf(msg, sizeof(msg), 5576 "New cum ack %8.8x too high, highest TSN %8.8x", 5577 new_cum_tsn, asoc->highest_tsn_inside_map); 5578 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5579 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5580 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5581 return; 5582 } 5583 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5584 5585 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5586 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5587 asoc->highest_tsn_inside_map = new_cum_tsn; 5588 5589 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5590 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5591 5592 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5593 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5594 } 5595 } else { 5596 SCTP_TCB_LOCK_ASSERT(stcb); 5597 for (i = 0; i <= gap; i++) { 5598 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5599 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5600 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5601 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5602 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5603 } 5604 } 5605 } 5606 } 5607 /*************************************************************/ 5608 /* 2. Clear up re-assembly queue */ 5609 /*************************************************************/ 5610 5611 /* This is now done as part of clearing up the stream/seq */ 5612 if (asoc->idata_supported == 0) { 5613 uint16_t sid; 5614 5615 /* Flush all the un-ordered data based on cum-tsn */ 5616 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5617 for (sid = 0; sid < asoc->streamincnt; sid++) { 5618 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn); 5619 } 5620 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5621 } 5622 /*******************************************************/ 5623 /* 3. Update the PR-stream re-ordering queues and fix */ 5624 /* delivery issues as needed. */ 5625 /*******************************************************/ 5626 fwd_sz -= sizeof(*fwd); 5627 if (m && fwd_sz) { 5628 /* New method. */ 5629 unsigned int num_str; 5630 uint32_t mid, cur_mid; 5631 uint16_t sid; 5632 uint16_t ordered, flags; 5633 struct sctp_strseq *stseq, strseqbuf; 5634 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5635 5636 offset += sizeof(*fwd); 5637 5638 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5639 if (asoc->idata_supported) { 5640 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5641 } else { 5642 num_str = fwd_sz / sizeof(struct sctp_strseq); 5643 } 5644 for (i = 0; i < num_str; i++) { 5645 if (asoc->idata_supported) { 5646 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5647 sizeof(struct sctp_strseq_mid), 5648 (uint8_t *)&strseqbuf_m); 5649 offset += sizeof(struct sctp_strseq_mid); 5650 if (stseq_m == NULL) { 5651 break; 5652 } 5653 sid = ntohs(stseq_m->sid); 5654 mid = ntohl(stseq_m->mid); 5655 flags = ntohs(stseq_m->flags); 5656 if (flags & PR_SCTP_UNORDERED_FLAG) { 5657 ordered = 0; 5658 } else { 5659 ordered = 1; 5660 } 5661 } else { 5662 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5663 sizeof(struct sctp_strseq), 5664 (uint8_t *)&strseqbuf); 5665 offset += sizeof(struct sctp_strseq); 5666 if (stseq == NULL) { 5667 break; 5668 } 5669 sid = ntohs(stseq->sid); 5670 mid = (uint32_t)ntohs(stseq->ssn); 5671 ordered = 1; 5672 } 5673 /* Convert */ 5674 5675 /* now process */ 5676 5677 /* 5678 * Ok we now look for the stream/seq on the read 5679 * queue where its not all delivered. If we find it 5680 * we transmute the read entry into a PDI_ABORTED. 5681 */ 5682 if (sid >= asoc->streamincnt) { 5683 /* screwed up streams, stop! */ 5684 break; 5685 } 5686 if ((asoc->str_of_pdapi == sid) && 5687 (asoc->ssn_of_pdapi == mid)) { 5688 /* 5689 * If this is the one we were partially 5690 * delivering now then we no longer are. 5691 * Note this will change with the reassembly 5692 * re-write. 5693 */ 5694 asoc->fragmented_delivery_inprogress = 0; 5695 } 5696 strm = &asoc->strmin[sid]; 5697 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) { 5698 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn); 5699 } 5700 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5701 if ((control->sinfo_stream == sid) && 5702 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5703 str_seq = (sid << 16) | (0x0000ffff & mid); 5704 control->pdapi_aborted = 1; 5705 sv = stcb->asoc.control_pdapi; 5706 control->end_added = 1; 5707 if (control->on_strm_q == SCTP_ON_ORDERED) { 5708 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5709 if (asoc->size_on_all_streams >= control->length) { 5710 asoc->size_on_all_streams -= control->length; 5711 } else { 5712 #ifdef INVARIANTS 5713 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5714 #else 5715 asoc->size_on_all_streams = 0; 5716 #endif 5717 } 5718 sctp_ucount_decr(asoc->cnt_on_all_streams); 5719 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5720 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5721 #ifdef INVARIANTS 5722 } else if (control->on_strm_q) { 5723 panic("strm: %p ctl: %p unknown %d", 5724 strm, control, control->on_strm_q); 5725 #endif 5726 } 5727 control->on_strm_q = 0; 5728 stcb->asoc.control_pdapi = control; 5729 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5730 stcb, 5731 SCTP_PARTIAL_DELIVERY_ABORTED, 5732 (void *)&str_seq, 5733 SCTP_SO_NOT_LOCKED); 5734 stcb->asoc.control_pdapi = sv; 5735 break; 5736 } else if ((control->sinfo_stream == sid) && 5737 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5738 /* We are past our victim SSN */ 5739 break; 5740 } 5741 } 5742 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5743 /* Update the sequence number */ 5744 strm->last_mid_delivered = mid; 5745 } 5746 /* now kick the stream the new way */ 5747 /* sa_ignore NO_NULL_CHK */ 5748 sctp_kick_prsctp_reorder_queue(stcb, strm); 5749 } 5750 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5751 } 5752 /* 5753 * Now slide thing forward. 5754 */ 5755 sctp_slide_mapping_arrays(stcb); 5756 } 5757