1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <sys/proc.h> 40 #include <netinet/sctp_var.h> 41 #include <netinet/sctp_sysctl.h> 42 #include <netinet/sctp_header.h> 43 #include <netinet/sctp_pcb.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_auth.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/sctp_input.h> 53 #include <netinet/sctp_crc32.h> 54 #include <netinet/sctp_lock_bsd.h> 55 /* 56 * NOTES: On the outbound side of things I need to check the sack timer to 57 * see if I should generate a sack into the chunk queue (if I have data to 58 * send that is and will be sending it .. for bundling. 59 * 60 * The callback in sctp_usrreq.c will get called when the socket is read from. 61 * This will cause sctp_service_queues() to get called on the top entry in 62 * the list. 63 */ 64 static uint32_t 65 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 66 struct sctp_stream_in *strm, 67 struct sctp_tcb *stcb, 68 struct sctp_association *asoc, 69 struct sctp_tmit_chunk *chk, int lock_held); 70 71 72 void 73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 74 { 75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 76 } 77 78 /* Calculate what the rwnd would be */ 79 uint32_t 80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 81 { 82 uint32_t calc = 0; 83 84 /* 85 * This is really set wrong with respect to a 1-2-m socket. Since 86 * the sb_cc is the count that everyone as put up. When we re-write 87 * sctp_soreceive then we will fix this so that ONLY this 88 * associations data is taken into account. 89 */ 90 if (stcb->sctp_socket == NULL) { 91 return (calc); 92 } 93 94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 97 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 98 if (stcb->asoc.sb_cc == 0 && 99 asoc->cnt_on_reasm_queue == 0 && 100 asoc->cnt_on_all_streams == 0) { 101 /* Full rwnd granted */ 102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 103 return (calc); 104 } 105 /* get actual space */ 106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 107 /* 108 * take out what has NOT been put on socket queue and we yet hold 109 * for putting up. 110 */ 111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 112 asoc->cnt_on_reasm_queue * MSIZE)); 113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 114 asoc->cnt_on_all_streams * MSIZE)); 115 if (calc == 0) { 116 /* out of space */ 117 return (calc); 118 } 119 120 /* what is the overhead of all these rwnd's */ 121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 122 /* 123 * If the window gets too small due to ctrl-stuff, reduce it to 1, 124 * even it is 0. SWS engaged 125 */ 126 if (calc < stcb->asoc.my_rwnd_control_len) { 127 calc = 1; 128 } 129 return (calc); 130 } 131 132 133 134 /* 135 * Build out our readq entry based on the incoming packet. 136 */ 137 struct sctp_queued_to_read * 138 sctp_build_readq_entry(struct sctp_tcb *stcb, 139 struct sctp_nets *net, 140 uint32_t tsn, uint32_t ppid, 141 uint32_t context, uint16_t sid, 142 uint32_t mid, uint8_t flags, 143 struct mbuf *dm) 144 { 145 struct sctp_queued_to_read *read_queue_e = NULL; 146 147 sctp_alloc_a_readq(stcb, read_queue_e); 148 if (read_queue_e == NULL) { 149 goto failed_build; 150 } 151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 152 read_queue_e->sinfo_stream = sid; 153 read_queue_e->sinfo_flags = (flags << 8); 154 read_queue_e->sinfo_ppid = ppid; 155 read_queue_e->sinfo_context = context; 156 read_queue_e->sinfo_tsn = tsn; 157 read_queue_e->sinfo_cumtsn = tsn; 158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 159 read_queue_e->mid = mid; 160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 161 TAILQ_INIT(&read_queue_e->reasm); 162 read_queue_e->whoFrom = net; 163 atomic_add_int(&net->ref_count, 1); 164 read_queue_e->data = dm; 165 read_queue_e->stcb = stcb; 166 read_queue_e->port_from = stcb->rport; 167 failed_build: 168 return (read_queue_e); 169 } 170 171 struct mbuf * 172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 173 { 174 struct sctp_extrcvinfo *seinfo; 175 struct sctp_sndrcvinfo *outinfo; 176 struct sctp_rcvinfo *rcvinfo; 177 struct sctp_nxtinfo *nxtinfo; 178 struct cmsghdr *cmh; 179 struct mbuf *ret; 180 int len; 181 int use_extended; 182 int provide_nxt; 183 184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 187 /* user does not want any ancillary data */ 188 return (NULL); 189 } 190 191 len = 0; 192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 194 } 195 seinfo = (struct sctp_extrcvinfo *)sinfo; 196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 198 provide_nxt = 1; 199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 200 } else { 201 provide_nxt = 0; 202 } 203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 205 use_extended = 1; 206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 207 } else { 208 use_extended = 0; 209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 210 } 211 } else { 212 use_extended = 0; 213 } 214 215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 216 if (ret == NULL) { 217 /* No space */ 218 return (ret); 219 } 220 SCTP_BUF_LEN(ret) = 0; 221 222 /* We need a CMSG header followed by the struct */ 223 cmh = mtod(ret, struct cmsghdr *); 224 /* 225 * Make sure that there is no un-initialized padding between the 226 * cmsg header and cmsg data and after the cmsg data. 227 */ 228 memset(cmh, 0, len); 229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 230 cmh->cmsg_level = IPPROTO_SCTP; 231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 232 cmh->cmsg_type = SCTP_RCVINFO; 233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 234 rcvinfo->rcv_sid = sinfo->sinfo_stream; 235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 236 rcvinfo->rcv_flags = sinfo->sinfo_flags; 237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 240 rcvinfo->rcv_context = sinfo->sinfo_context; 241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 244 } 245 if (provide_nxt) { 246 cmh->cmsg_level = IPPROTO_SCTP; 247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 248 cmh->cmsg_type = SCTP_NXTINFO; 249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 251 nxtinfo->nxt_flags = 0; 252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 253 nxtinfo->nxt_flags |= SCTP_UNORDERED; 254 } 255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 257 } 258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 259 nxtinfo->nxt_flags |= SCTP_COMPLETE; 260 } 261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 262 nxtinfo->nxt_length = seinfo->serinfo_next_length; 263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 266 } 267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 268 cmh->cmsg_level = IPPROTO_SCTP; 269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 270 if (use_extended) { 271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 272 cmh->cmsg_type = SCTP_EXTRCV; 273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 275 } else { 276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 277 cmh->cmsg_type = SCTP_SNDRCV; 278 *outinfo = *sinfo; 279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 280 } 281 } 282 return (ret); 283 } 284 285 286 static void 287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 288 { 289 uint32_t gap, i, cumackp1; 290 int fnd = 0; 291 int in_r = 0, in_nr = 0; 292 293 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 294 return; 295 } 296 cumackp1 = asoc->cumulative_tsn + 1; 297 if (SCTP_TSN_GT(cumackp1, tsn)) { 298 /* 299 * this tsn is behind the cum ack and thus we don't need to 300 * worry about it being moved from one to the other. 301 */ 302 return; 303 } 304 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 305 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 306 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 307 if ((in_r == 0) && (in_nr == 0)) { 308 #ifdef INVARIANTS 309 panic("Things are really messed up now"); 310 #else 311 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 312 sctp_print_mapping_array(asoc); 313 #endif 314 } 315 if (in_nr == 0) 316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 317 if (in_r) 318 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 319 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 320 asoc->highest_tsn_inside_nr_map = tsn; 321 } 322 if (tsn == asoc->highest_tsn_inside_map) { 323 /* We must back down to see what the new highest is */ 324 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 325 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 326 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 327 asoc->highest_tsn_inside_map = i; 328 fnd = 1; 329 break; 330 } 331 } 332 if (!fnd) { 333 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 334 } 335 } 336 } 337 338 static int 339 sctp_place_control_in_stream(struct sctp_stream_in *strm, 340 struct sctp_association *asoc, 341 struct sctp_queued_to_read *control) 342 { 343 struct sctp_queued_to_read *at; 344 struct sctp_readhead *q; 345 uint8_t flags, unordered; 346 347 flags = (control->sinfo_flags >> 8); 348 unordered = flags & SCTP_DATA_UNORDERED; 349 if (unordered) { 350 q = &strm->uno_inqueue; 351 if (asoc->idata_supported == 0) { 352 if (!TAILQ_EMPTY(q)) { 353 /* 354 * Only one stream can be here in old style 355 * -- abort 356 */ 357 return (-1); 358 } 359 TAILQ_INSERT_TAIL(q, control, next_instrm); 360 control->on_strm_q = SCTP_ON_UNORDERED; 361 return (0); 362 } 363 } else { 364 q = &strm->inqueue; 365 } 366 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 367 control->end_added = 1; 368 control->first_frag_seen = 1; 369 control->last_frag_seen = 1; 370 } 371 if (TAILQ_EMPTY(q)) { 372 /* Empty queue */ 373 TAILQ_INSERT_HEAD(q, control, next_instrm); 374 if (unordered) { 375 control->on_strm_q = SCTP_ON_UNORDERED; 376 } else { 377 control->on_strm_q = SCTP_ON_ORDERED; 378 } 379 return (0); 380 } else { 381 TAILQ_FOREACH(at, q, next_instrm) { 382 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 383 /* 384 * one in queue is bigger than the new one, 385 * insert before this one 386 */ 387 TAILQ_INSERT_BEFORE(at, control, next_instrm); 388 if (unordered) { 389 control->on_strm_q = SCTP_ON_UNORDERED; 390 } else { 391 control->on_strm_q = SCTP_ON_ORDERED; 392 } 393 break; 394 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 395 /* 396 * Gak, He sent me a duplicate msg id 397 * number?? return -1 to abort. 398 */ 399 return (-1); 400 } else { 401 if (TAILQ_NEXT(at, next_instrm) == NULL) { 402 /* 403 * We are at the end, insert it 404 * after this one 405 */ 406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 407 sctp_log_strm_del(control, at, 408 SCTP_STR_LOG_FROM_INSERT_TL); 409 } 410 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 411 if (unordered) { 412 control->on_strm_q = SCTP_ON_UNORDERED; 413 } else { 414 control->on_strm_q = SCTP_ON_ORDERED; 415 } 416 break; 417 } 418 } 419 } 420 } 421 return (0); 422 } 423 424 static void 425 sctp_abort_in_reasm(struct sctp_tcb *stcb, 426 struct sctp_queued_to_read *control, 427 struct sctp_tmit_chunk *chk, 428 int *abort_flag, int opspot) 429 { 430 char msg[SCTP_DIAG_INFO_LEN]; 431 struct mbuf *oper; 432 433 if (stcb->asoc.idata_supported) { 434 snprintf(msg, sizeof(msg), 435 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 436 opspot, 437 control->fsn_included, 438 chk->rec.data.tsn, 439 chk->rec.data.sid, 440 chk->rec.data.fsn, chk->rec.data.mid); 441 } else { 442 snprintf(msg, sizeof(msg), 443 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 444 opspot, 445 control->fsn_included, 446 chk->rec.data.tsn, 447 chk->rec.data.sid, 448 chk->rec.data.fsn, 449 (uint16_t)chk->rec.data.mid); 450 } 451 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 452 sctp_m_freem(chk->data); 453 chk->data = NULL; 454 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 455 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 456 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 457 *abort_flag = 1; 458 } 459 460 static void 461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 462 { 463 /* 464 * The control could not be placed and must be cleaned. 465 */ 466 struct sctp_tmit_chunk *chk, *nchk; 467 468 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 469 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 470 if (chk->data) 471 sctp_m_freem(chk->data); 472 chk->data = NULL; 473 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 474 } 475 sctp_free_a_readq(stcb, control); 476 } 477 478 /* 479 * Queue the chunk either right into the socket buffer if it is the next one 480 * to go OR put it in the correct place in the delivery queue. If we do 481 * append to the so_buf, keep doing so until we are out of order as 482 * long as the control's entered are non-fragmented. 483 */ 484 static void 485 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 486 struct sctp_association *asoc, 487 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 488 { 489 /* 490 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 491 * all the data in one stream this could happen quite rapidly. One 492 * could use the TSN to keep track of things, but this scheme breaks 493 * down in the other type of stream usage that could occur. Send a 494 * single msg to stream 0, send 4Billion messages to stream 1, now 495 * send a message to stream 0. You have a situation where the TSN 496 * has wrapped but not in the stream. Is this worth worrying about 497 * or should we just change our queue sort at the bottom to be by 498 * TSN. 499 * 500 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 501 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 502 * assignment this could happen... and I don't see how this would be 503 * a violation. So for now I am undecided an will leave the sort by 504 * SSN alone. Maybe a hybred approach is the answer 505 * 506 */ 507 struct sctp_queued_to_read *at; 508 int queue_needed; 509 uint32_t nxt_todel; 510 struct mbuf *op_err; 511 struct sctp_stream_in *strm; 512 char msg[SCTP_DIAG_INFO_LEN]; 513 514 strm = &asoc->strmin[control->sinfo_stream]; 515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 516 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 517 } 518 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 519 /* The incoming sseq is behind where we last delivered? */ 520 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 521 strm->last_mid_delivered, control->mid); 522 /* 523 * throw it in the stream so it gets cleaned up in 524 * association destruction 525 */ 526 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 527 if (asoc->idata_supported) { 528 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 529 strm->last_mid_delivered, control->sinfo_tsn, 530 control->sinfo_stream, control->mid); 531 } else { 532 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 533 (uint16_t)strm->last_mid_delivered, 534 control->sinfo_tsn, 535 control->sinfo_stream, 536 (uint16_t)control->mid); 537 } 538 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 539 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 540 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 541 *abort_flag = 1; 542 return; 543 544 } 545 queue_needed = 1; 546 asoc->size_on_all_streams += control->length; 547 sctp_ucount_incr(asoc->cnt_on_all_streams); 548 nxt_todel = strm->last_mid_delivered + 1; 549 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 550 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 551 struct socket *so; 552 553 so = SCTP_INP_SO(stcb->sctp_ep); 554 atomic_add_int(&stcb->asoc.refcnt, 1); 555 SCTP_TCB_UNLOCK(stcb); 556 SCTP_SOCKET_LOCK(so, 1); 557 SCTP_TCB_LOCK(stcb); 558 atomic_subtract_int(&stcb->asoc.refcnt, 1); 559 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 560 SCTP_SOCKET_UNLOCK(so, 1); 561 return; 562 } 563 #endif 564 /* can be delivered right away? */ 565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 566 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 567 } 568 /* EY it wont be queued if it could be delivered directly */ 569 queue_needed = 0; 570 if (asoc->size_on_all_streams >= control->length) { 571 asoc->size_on_all_streams -= control->length; 572 } else { 573 #ifdef INVARIANTS 574 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 575 #else 576 asoc->size_on_all_streams = 0; 577 #endif 578 } 579 sctp_ucount_decr(asoc->cnt_on_all_streams); 580 strm->last_mid_delivered++; 581 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 582 sctp_add_to_readq(stcb->sctp_ep, stcb, 583 control, 584 &stcb->sctp_socket->so_rcv, 1, 585 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 586 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 587 /* all delivered */ 588 nxt_todel = strm->last_mid_delivered + 1; 589 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 590 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 591 if (control->on_strm_q == SCTP_ON_ORDERED) { 592 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 593 if (asoc->size_on_all_streams >= control->length) { 594 asoc->size_on_all_streams -= control->length; 595 } else { 596 #ifdef INVARIANTS 597 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 598 #else 599 asoc->size_on_all_streams = 0; 600 #endif 601 } 602 sctp_ucount_decr(asoc->cnt_on_all_streams); 603 #ifdef INVARIANTS 604 } else { 605 panic("Huh control: %p is on_strm_q: %d", 606 control, control->on_strm_q); 607 #endif 608 } 609 control->on_strm_q = 0; 610 strm->last_mid_delivered++; 611 /* 612 * We ignore the return of deliver_data here 613 * since we always can hold the chunk on the 614 * d-queue. And we have a finite number that 615 * can be delivered from the strq. 616 */ 617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 618 sctp_log_strm_del(control, NULL, 619 SCTP_STR_LOG_FROM_IMMED_DEL); 620 } 621 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 622 sctp_add_to_readq(stcb->sctp_ep, stcb, 623 control, 624 &stcb->sctp_socket->so_rcv, 1, 625 SCTP_READ_LOCK_NOT_HELD, 626 SCTP_SO_LOCKED); 627 continue; 628 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 629 *need_reasm = 1; 630 } 631 break; 632 } 633 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 634 SCTP_SOCKET_UNLOCK(so, 1); 635 #endif 636 } 637 if (queue_needed) { 638 /* 639 * Ok, we did not deliver this guy, find the correct place 640 * to put it on the queue. 641 */ 642 if (sctp_place_control_in_stream(strm, asoc, control)) { 643 snprintf(msg, sizeof(msg), 644 "Queue to str MID: %u duplicate", 645 control->mid); 646 sctp_clean_up_control(stcb, control); 647 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 648 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 649 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 650 *abort_flag = 1; 651 } 652 } 653 } 654 655 656 static void 657 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 658 { 659 struct mbuf *m, *prev = NULL; 660 struct sctp_tcb *stcb; 661 662 stcb = control->stcb; 663 control->held_length = 0; 664 control->length = 0; 665 m = control->data; 666 while (m) { 667 if (SCTP_BUF_LEN(m) == 0) { 668 /* Skip mbufs with NO length */ 669 if (prev == NULL) { 670 /* First one */ 671 control->data = sctp_m_free(m); 672 m = control->data; 673 } else { 674 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 675 m = SCTP_BUF_NEXT(prev); 676 } 677 if (m == NULL) { 678 control->tail_mbuf = prev; 679 } 680 continue; 681 } 682 prev = m; 683 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 684 if (control->on_read_q) { 685 /* 686 * On read queue so we must increment the SB stuff, 687 * we assume caller has done any locks of SB. 688 */ 689 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 690 } 691 m = SCTP_BUF_NEXT(m); 692 } 693 if (prev) { 694 control->tail_mbuf = prev; 695 } 696 } 697 698 static void 699 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 700 { 701 struct mbuf *prev = NULL; 702 struct sctp_tcb *stcb; 703 704 stcb = control->stcb; 705 if (stcb == NULL) { 706 #ifdef INVARIANTS 707 panic("Control broken"); 708 #else 709 return; 710 #endif 711 } 712 if (control->tail_mbuf == NULL) { 713 /* TSNH */ 714 control->data = m; 715 sctp_setup_tail_pointer(control); 716 return; 717 } 718 control->tail_mbuf->m_next = m; 719 while (m) { 720 if (SCTP_BUF_LEN(m) == 0) { 721 /* Skip mbufs with NO length */ 722 if (prev == NULL) { 723 /* First one */ 724 control->tail_mbuf->m_next = sctp_m_free(m); 725 m = control->tail_mbuf->m_next; 726 } else { 727 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 728 m = SCTP_BUF_NEXT(prev); 729 } 730 if (m == NULL) { 731 control->tail_mbuf = prev; 732 } 733 continue; 734 } 735 prev = m; 736 if (control->on_read_q) { 737 /* 738 * On read queue so we must increment the SB stuff, 739 * we assume caller has done any locks of SB. 740 */ 741 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 742 } 743 *added += SCTP_BUF_LEN(m); 744 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 745 m = SCTP_BUF_NEXT(m); 746 } 747 if (prev) { 748 control->tail_mbuf = prev; 749 } 750 } 751 752 static void 753 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 754 { 755 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 756 nc->sinfo_stream = control->sinfo_stream; 757 nc->mid = control->mid; 758 TAILQ_INIT(&nc->reasm); 759 nc->top_fsn = control->top_fsn; 760 nc->mid = control->mid; 761 nc->sinfo_flags = control->sinfo_flags; 762 nc->sinfo_ppid = control->sinfo_ppid; 763 nc->sinfo_context = control->sinfo_context; 764 nc->fsn_included = 0xffffffff; 765 nc->sinfo_tsn = control->sinfo_tsn; 766 nc->sinfo_cumtsn = control->sinfo_cumtsn; 767 nc->sinfo_assoc_id = control->sinfo_assoc_id; 768 nc->whoFrom = control->whoFrom; 769 atomic_add_int(&nc->whoFrom->ref_count, 1); 770 nc->stcb = control->stcb; 771 nc->port_from = control->port_from; 772 } 773 774 static void 775 sctp_reset_a_control(struct sctp_queued_to_read *control, 776 struct sctp_inpcb *inp, uint32_t tsn) 777 { 778 control->fsn_included = tsn; 779 if (control->on_read_q) { 780 /* 781 * We have to purge it from there, hopefully this will work 782 * :-) 783 */ 784 TAILQ_REMOVE(&inp->read_queue, control, next); 785 control->on_read_q = 0; 786 } 787 } 788 789 static int 790 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 791 struct sctp_association *asoc, 792 struct sctp_stream_in *strm, 793 struct sctp_queued_to_read *control, 794 uint32_t pd_point, 795 int inp_read_lock_held) 796 { 797 /* 798 * Special handling for the old un-ordered data chunk. All the 799 * chunks/TSN's go to mid 0. So we have to do the old style watching 800 * to see if we have it all. If you return one, no other control 801 * entries on the un-ordered queue will be looked at. In theory 802 * there should be no others entries in reality, unless the guy is 803 * sending both unordered NDATA and unordered DATA... 804 */ 805 struct sctp_tmit_chunk *chk, *lchk, *tchk; 806 uint32_t fsn; 807 struct sctp_queued_to_read *nc; 808 int cnt_added; 809 810 if (control->first_frag_seen == 0) { 811 /* Nothing we can do, we have not seen the first piece yet */ 812 return (1); 813 } 814 /* Collapse any we can */ 815 cnt_added = 0; 816 restart: 817 fsn = control->fsn_included + 1; 818 /* Now what can we add? */ 819 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 820 if (chk->rec.data.fsn == fsn) { 821 /* Ok lets add it */ 822 sctp_alloc_a_readq(stcb, nc); 823 if (nc == NULL) { 824 break; 825 } 826 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 827 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 828 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD); 829 fsn++; 830 cnt_added++; 831 chk = NULL; 832 if (control->end_added) { 833 /* We are done */ 834 if (!TAILQ_EMPTY(&control->reasm)) { 835 /* 836 * Ok we have to move anything left 837 * on the control queue to a new 838 * control. 839 */ 840 sctp_build_readq_entry_from_ctl(nc, control); 841 tchk = TAILQ_FIRST(&control->reasm); 842 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 843 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 844 if (asoc->size_on_reasm_queue >= tchk->send_size) { 845 asoc->size_on_reasm_queue -= tchk->send_size; 846 } else { 847 #ifdef INVARIANTS 848 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 849 #else 850 asoc->size_on_reasm_queue = 0; 851 #endif 852 } 853 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 854 nc->first_frag_seen = 1; 855 nc->fsn_included = tchk->rec.data.fsn; 856 nc->data = tchk->data; 857 nc->sinfo_ppid = tchk->rec.data.ppid; 858 nc->sinfo_tsn = tchk->rec.data.tsn; 859 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 860 tchk->data = NULL; 861 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 862 sctp_setup_tail_pointer(nc); 863 tchk = TAILQ_FIRST(&control->reasm); 864 } 865 /* Spin the rest onto the queue */ 866 while (tchk) { 867 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 868 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 869 tchk = TAILQ_FIRST(&control->reasm); 870 } 871 /* 872 * Now lets add it to the queue 873 * after removing control 874 */ 875 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 876 nc->on_strm_q = SCTP_ON_UNORDERED; 877 if (control->on_strm_q) { 878 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 879 control->on_strm_q = 0; 880 } 881 } 882 if (control->pdapi_started) { 883 strm->pd_api_started = 0; 884 control->pdapi_started = 0; 885 } 886 if (control->on_strm_q) { 887 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 888 control->on_strm_q = 0; 889 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 890 } 891 if (control->on_read_q == 0) { 892 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 893 &stcb->sctp_socket->so_rcv, control->end_added, 894 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 895 } 896 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 897 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 898 /* 899 * Switch to the new guy and 900 * continue 901 */ 902 control = nc; 903 goto restart; 904 } else { 905 if (nc->on_strm_q == 0) { 906 sctp_free_a_readq(stcb, nc); 907 } 908 } 909 return (1); 910 } else { 911 sctp_free_a_readq(stcb, nc); 912 } 913 } else { 914 /* Can't add more */ 915 break; 916 } 917 } 918 if (cnt_added && strm->pd_api_started) { 919 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 920 } 921 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 922 strm->pd_api_started = 1; 923 control->pdapi_started = 1; 924 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 925 &stcb->sctp_socket->so_rcv, control->end_added, 926 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 927 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 928 return (0); 929 } else { 930 return (1); 931 } 932 } 933 934 static void 935 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 936 struct sctp_association *asoc, 937 struct sctp_queued_to_read *control, 938 struct sctp_tmit_chunk *chk, 939 int *abort_flag) 940 { 941 struct sctp_tmit_chunk *at; 942 int inserted; 943 944 /* 945 * Here we need to place the chunk into the control structure sorted 946 * in the correct order. 947 */ 948 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 949 /* Its the very first one. */ 950 SCTPDBG(SCTP_DEBUG_XXX, 951 "chunk is a first fsn: %u becomes fsn_included\n", 952 chk->rec.data.fsn); 953 at = TAILQ_FIRST(&control->reasm); 954 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { 955 /* 956 * The first chunk in the reassembly is a smaller 957 * TSN than this one, even though this has a first, 958 * it must be from a subsequent msg. 959 */ 960 goto place_chunk; 961 } 962 if (control->first_frag_seen) { 963 /* 964 * In old un-ordered we can reassembly on one 965 * control multiple messages. As long as the next 966 * FIRST is greater then the old first (TSN i.e. FSN 967 * wise) 968 */ 969 struct mbuf *tdata; 970 uint32_t tmp; 971 972 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 973 /* 974 * Easy way the start of a new guy beyond 975 * the lowest 976 */ 977 goto place_chunk; 978 } 979 if ((chk->rec.data.fsn == control->fsn_included) || 980 (control->pdapi_started)) { 981 /* 982 * Ok this should not happen, if it does we 983 * started the pd-api on the higher TSN 984 * (since the equals part is a TSN failure 985 * it must be that). 986 * 987 * We are completly hosed in that case since 988 * I have no way to recover. This really 989 * will only happen if we can get more TSN's 990 * higher before the pd-api-point. 991 */ 992 sctp_abort_in_reasm(stcb, control, chk, 993 abort_flag, 994 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 995 996 return; 997 } 998 /* 999 * Ok we have two firsts and the one we just got is 1000 * smaller than the one we previously placed.. yuck! 1001 * We must swap them out. 1002 */ 1003 /* swap the mbufs */ 1004 tdata = control->data; 1005 control->data = chk->data; 1006 chk->data = tdata; 1007 /* Save the lengths */ 1008 chk->send_size = control->length; 1009 /* Recompute length of control and tail pointer */ 1010 sctp_setup_tail_pointer(control); 1011 /* Fix the FSN included */ 1012 tmp = control->fsn_included; 1013 control->fsn_included = chk->rec.data.fsn; 1014 chk->rec.data.fsn = tmp; 1015 /* Fix the TSN included */ 1016 tmp = control->sinfo_tsn; 1017 control->sinfo_tsn = chk->rec.data.tsn; 1018 chk->rec.data.tsn = tmp; 1019 /* Fix the PPID included */ 1020 tmp = control->sinfo_ppid; 1021 control->sinfo_ppid = chk->rec.data.ppid; 1022 chk->rec.data.ppid = tmp; 1023 /* Fix tail pointer */ 1024 goto place_chunk; 1025 } 1026 control->first_frag_seen = 1; 1027 control->fsn_included = chk->rec.data.fsn; 1028 control->top_fsn = chk->rec.data.fsn; 1029 control->sinfo_tsn = chk->rec.data.tsn; 1030 control->sinfo_ppid = chk->rec.data.ppid; 1031 control->data = chk->data; 1032 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1033 chk->data = NULL; 1034 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1035 sctp_setup_tail_pointer(control); 1036 return; 1037 } 1038 place_chunk: 1039 inserted = 0; 1040 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1041 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1042 /* 1043 * This one in queue is bigger than the new one, 1044 * insert the new one before at. 1045 */ 1046 asoc->size_on_reasm_queue += chk->send_size; 1047 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1048 inserted = 1; 1049 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1050 break; 1051 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1052 /* 1053 * They sent a duplicate fsn number. This really 1054 * should not happen since the FSN is a TSN and it 1055 * should have been dropped earlier. 1056 */ 1057 sctp_abort_in_reasm(stcb, control, chk, 1058 abort_flag, 1059 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1060 return; 1061 } 1062 1063 } 1064 if (inserted == 0) { 1065 /* Its at the end */ 1066 asoc->size_on_reasm_queue += chk->send_size; 1067 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1068 control->top_fsn = chk->rec.data.fsn; 1069 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1070 } 1071 } 1072 1073 static int 1074 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1075 struct sctp_stream_in *strm, int inp_read_lock_held) 1076 { 1077 /* 1078 * Given a stream, strm, see if any of the SSN's on it that are 1079 * fragmented are ready to deliver. If so go ahead and place them on 1080 * the read queue. In so placing if we have hit the end, then we 1081 * need to remove them from the stream's queue. 1082 */ 1083 struct sctp_queued_to_read *control, *nctl = NULL; 1084 uint32_t next_to_del; 1085 uint32_t pd_point; 1086 int ret = 0; 1087 1088 if (stcb->sctp_socket) { 1089 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1090 stcb->sctp_ep->partial_delivery_point); 1091 } else { 1092 pd_point = stcb->sctp_ep->partial_delivery_point; 1093 } 1094 control = TAILQ_FIRST(&strm->uno_inqueue); 1095 1096 if ((control != NULL) && 1097 (asoc->idata_supported == 0)) { 1098 /* Special handling needed for "old" data format */ 1099 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1100 goto done_un; 1101 } 1102 } 1103 if (strm->pd_api_started) { 1104 /* Can't add more */ 1105 return (0); 1106 } 1107 while (control) { 1108 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1109 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1110 nctl = TAILQ_NEXT(control, next_instrm); 1111 if (control->end_added) { 1112 /* We just put the last bit on */ 1113 if (control->on_strm_q) { 1114 #ifdef INVARIANTS 1115 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1116 panic("Huh control: %p on_q: %d -- not unordered?", 1117 control, control->on_strm_q); 1118 } 1119 #endif 1120 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1121 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1122 control->on_strm_q = 0; 1123 } 1124 if (control->on_read_q == 0) { 1125 sctp_add_to_readq(stcb->sctp_ep, stcb, 1126 control, 1127 &stcb->sctp_socket->so_rcv, control->end_added, 1128 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1129 } 1130 } else { 1131 /* Can we do a PD-API for this un-ordered guy? */ 1132 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1133 strm->pd_api_started = 1; 1134 control->pdapi_started = 1; 1135 sctp_add_to_readq(stcb->sctp_ep, stcb, 1136 control, 1137 &stcb->sctp_socket->so_rcv, control->end_added, 1138 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1139 1140 break; 1141 } 1142 } 1143 control = nctl; 1144 } 1145 done_un: 1146 control = TAILQ_FIRST(&strm->inqueue); 1147 if (strm->pd_api_started) { 1148 /* Can't add more */ 1149 return (0); 1150 } 1151 if (control == NULL) { 1152 return (ret); 1153 } 1154 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1155 /* 1156 * Ok the guy at the top was being partially delivered 1157 * completed, so we remove it. Note the pd_api flag was 1158 * taken off when the chunk was merged on in 1159 * sctp_queue_data_for_reasm below. 1160 */ 1161 nctl = TAILQ_NEXT(control, next_instrm); 1162 SCTPDBG(SCTP_DEBUG_XXX, 1163 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1164 control, control->end_added, control->mid, 1165 control->top_fsn, control->fsn_included, 1166 strm->last_mid_delivered); 1167 if (control->end_added) { 1168 if (control->on_strm_q) { 1169 #ifdef INVARIANTS 1170 if (control->on_strm_q != SCTP_ON_ORDERED) { 1171 panic("Huh control: %p on_q: %d -- not ordered?", 1172 control, control->on_strm_q); 1173 } 1174 #endif 1175 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1176 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1177 if (asoc->size_on_all_streams >= control->length) { 1178 asoc->size_on_all_streams -= control->length; 1179 } else { 1180 #ifdef INVARIANTS 1181 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1182 #else 1183 asoc->size_on_all_streams = 0; 1184 #endif 1185 } 1186 sctp_ucount_decr(asoc->cnt_on_all_streams); 1187 control->on_strm_q = 0; 1188 } 1189 if (strm->pd_api_started && control->pdapi_started) { 1190 control->pdapi_started = 0; 1191 strm->pd_api_started = 0; 1192 } 1193 if (control->on_read_q == 0) { 1194 sctp_add_to_readq(stcb->sctp_ep, stcb, 1195 control, 1196 &stcb->sctp_socket->so_rcv, control->end_added, 1197 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1198 } 1199 control = nctl; 1200 } 1201 } 1202 if (strm->pd_api_started) { 1203 /* 1204 * Can't add more must have gotten an un-ordered above being 1205 * partially delivered. 1206 */ 1207 return (0); 1208 } 1209 deliver_more: 1210 next_to_del = strm->last_mid_delivered + 1; 1211 if (control) { 1212 SCTPDBG(SCTP_DEBUG_XXX, 1213 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1214 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1215 next_to_del); 1216 nctl = TAILQ_NEXT(control, next_instrm); 1217 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1218 (control->first_frag_seen)) { 1219 int done; 1220 1221 /* Ok we can deliver it onto the stream. */ 1222 if (control->end_added) { 1223 /* We are done with it afterwards */ 1224 if (control->on_strm_q) { 1225 #ifdef INVARIANTS 1226 if (control->on_strm_q != SCTP_ON_ORDERED) { 1227 panic("Huh control: %p on_q: %d -- not ordered?", 1228 control, control->on_strm_q); 1229 } 1230 #endif 1231 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1232 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1233 if (asoc->size_on_all_streams >= control->length) { 1234 asoc->size_on_all_streams -= control->length; 1235 } else { 1236 #ifdef INVARIANTS 1237 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1238 #else 1239 asoc->size_on_all_streams = 0; 1240 #endif 1241 } 1242 sctp_ucount_decr(asoc->cnt_on_all_streams); 1243 control->on_strm_q = 0; 1244 } 1245 ret++; 1246 } 1247 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1248 /* 1249 * A singleton now slipping through - mark 1250 * it non-revokable too 1251 */ 1252 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1253 } else if (control->end_added == 0) { 1254 /* 1255 * Check if we can defer adding until its 1256 * all there 1257 */ 1258 if ((control->length < pd_point) || (strm->pd_api_started)) { 1259 /* 1260 * Don't need it or cannot add more 1261 * (one being delivered that way) 1262 */ 1263 goto out; 1264 } 1265 } 1266 done = (control->end_added) && (control->last_frag_seen); 1267 if (control->on_read_q == 0) { 1268 if (!done) { 1269 if (asoc->size_on_all_streams >= control->length) { 1270 asoc->size_on_all_streams -= control->length; 1271 } else { 1272 #ifdef INVARIANTS 1273 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1274 #else 1275 asoc->size_on_all_streams = 0; 1276 #endif 1277 } 1278 strm->pd_api_started = 1; 1279 control->pdapi_started = 1; 1280 } 1281 sctp_add_to_readq(stcb->sctp_ep, stcb, 1282 control, 1283 &stcb->sctp_socket->so_rcv, control->end_added, 1284 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1285 } 1286 strm->last_mid_delivered = next_to_del; 1287 if (done) { 1288 control = nctl; 1289 goto deliver_more; 1290 } 1291 } 1292 } 1293 out: 1294 return (ret); 1295 } 1296 1297 1298 uint32_t 1299 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1300 struct sctp_stream_in *strm, 1301 struct sctp_tcb *stcb, struct sctp_association *asoc, 1302 struct sctp_tmit_chunk *chk, int hold_rlock) 1303 { 1304 /* 1305 * Given a control and a chunk, merge the data from the chk onto the 1306 * control and free up the chunk resources. 1307 */ 1308 uint32_t added = 0; 1309 int i_locked = 0; 1310 1311 if (control->on_read_q && (hold_rlock == 0)) { 1312 /* 1313 * Its being pd-api'd so we must do some locks. 1314 */ 1315 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1316 i_locked = 1; 1317 } 1318 if (control->data == NULL) { 1319 control->data = chk->data; 1320 sctp_setup_tail_pointer(control); 1321 } else { 1322 sctp_add_to_tail_pointer(control, chk->data, &added); 1323 } 1324 control->fsn_included = chk->rec.data.fsn; 1325 asoc->size_on_reasm_queue -= chk->send_size; 1326 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1327 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1328 chk->data = NULL; 1329 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1330 control->first_frag_seen = 1; 1331 control->sinfo_tsn = chk->rec.data.tsn; 1332 control->sinfo_ppid = chk->rec.data.ppid; 1333 } 1334 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1335 /* Its complete */ 1336 if ((control->on_strm_q) && (control->on_read_q)) { 1337 if (control->pdapi_started) { 1338 control->pdapi_started = 0; 1339 strm->pd_api_started = 0; 1340 } 1341 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1342 /* Unordered */ 1343 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1344 control->on_strm_q = 0; 1345 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1346 /* Ordered */ 1347 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1348 /* 1349 * Don't need to decrement 1350 * size_on_all_streams, since control is on 1351 * the read queue. 1352 */ 1353 sctp_ucount_decr(asoc->cnt_on_all_streams); 1354 control->on_strm_q = 0; 1355 #ifdef INVARIANTS 1356 } else if (control->on_strm_q) { 1357 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1358 control->on_strm_q); 1359 #endif 1360 } 1361 } 1362 control->end_added = 1; 1363 control->last_frag_seen = 1; 1364 } 1365 if (i_locked) { 1366 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1367 } 1368 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1369 return (added); 1370 } 1371 1372 /* 1373 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1374 * queue, see if anthing can be delivered. If so pull it off (or as much as 1375 * we can. If we run out of space then we must dump what we can and set the 1376 * appropriate flag to say we queued what we could. 1377 */ 1378 static void 1379 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1380 struct sctp_queued_to_read *control, 1381 struct sctp_tmit_chunk *chk, 1382 int created_control, 1383 int *abort_flag, uint32_t tsn) 1384 { 1385 uint32_t next_fsn; 1386 struct sctp_tmit_chunk *at, *nat; 1387 struct sctp_stream_in *strm; 1388 int do_wakeup, unordered; 1389 uint32_t lenadded; 1390 1391 strm = &asoc->strmin[control->sinfo_stream]; 1392 /* 1393 * For old un-ordered data chunks. 1394 */ 1395 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1396 unordered = 1; 1397 } else { 1398 unordered = 0; 1399 } 1400 /* Must be added to the stream-in queue */ 1401 if (created_control) { 1402 if (unordered == 0) { 1403 sctp_ucount_incr(asoc->cnt_on_all_streams); 1404 } 1405 if (sctp_place_control_in_stream(strm, asoc, control)) { 1406 /* Duplicate SSN? */ 1407 sctp_abort_in_reasm(stcb, control, chk, 1408 abort_flag, 1409 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1410 sctp_clean_up_control(stcb, control); 1411 return; 1412 } 1413 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1414 /* 1415 * Ok we created this control and now lets validate 1416 * that its legal i.e. there is a B bit set, if not 1417 * and we have up to the cum-ack then its invalid. 1418 */ 1419 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1420 sctp_abort_in_reasm(stcb, control, chk, 1421 abort_flag, 1422 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1423 return; 1424 } 1425 } 1426 } 1427 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1428 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1429 return; 1430 } 1431 /* 1432 * Ok we must queue the chunk into the reasembly portion: o if its 1433 * the first it goes to the control mbuf. o if its not first but the 1434 * next in sequence it goes to the control, and each succeeding one 1435 * in order also goes. o if its not in order we place it on the list 1436 * in its place. 1437 */ 1438 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1439 /* Its the very first one. */ 1440 SCTPDBG(SCTP_DEBUG_XXX, 1441 "chunk is a first fsn: %u becomes fsn_included\n", 1442 chk->rec.data.fsn); 1443 if (control->first_frag_seen) { 1444 /* 1445 * Error on senders part, they either sent us two 1446 * data chunks with FIRST, or they sent two 1447 * un-ordered chunks that were fragmented at the 1448 * same time in the same stream. 1449 */ 1450 sctp_abort_in_reasm(stcb, control, chk, 1451 abort_flag, 1452 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1453 return; 1454 } 1455 control->first_frag_seen = 1; 1456 control->sinfo_ppid = chk->rec.data.ppid; 1457 control->sinfo_tsn = chk->rec.data.tsn; 1458 control->fsn_included = chk->rec.data.fsn; 1459 control->data = chk->data; 1460 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1461 chk->data = NULL; 1462 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1463 sctp_setup_tail_pointer(control); 1464 asoc->size_on_all_streams += control->length; 1465 } else { 1466 /* Place the chunk in our list */ 1467 int inserted = 0; 1468 1469 if (control->last_frag_seen == 0) { 1470 /* Still willing to raise highest FSN seen */ 1471 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1472 SCTPDBG(SCTP_DEBUG_XXX, 1473 "We have a new top_fsn: %u\n", 1474 chk->rec.data.fsn); 1475 control->top_fsn = chk->rec.data.fsn; 1476 } 1477 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1478 SCTPDBG(SCTP_DEBUG_XXX, 1479 "The last fsn is now in place fsn: %u\n", 1480 chk->rec.data.fsn); 1481 control->last_frag_seen = 1; 1482 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) { 1483 SCTPDBG(SCTP_DEBUG_XXX, 1484 "New fsn: %u is not at top_fsn: %u -- abort\n", 1485 chk->rec.data.fsn, 1486 control->top_fsn); 1487 sctp_abort_in_reasm(stcb, control, chk, 1488 abort_flag, 1489 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1490 return; 1491 } 1492 } 1493 if (asoc->idata_supported || control->first_frag_seen) { 1494 /* 1495 * For IDATA we always check since we know 1496 * that the first fragment is 0. For old 1497 * DATA we have to receive the first before 1498 * we know the first FSN (which is the TSN). 1499 */ 1500 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1501 /* 1502 * We have already delivered up to 1503 * this so its a dup 1504 */ 1505 sctp_abort_in_reasm(stcb, control, chk, 1506 abort_flag, 1507 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1508 return; 1509 } 1510 } 1511 } else { 1512 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1513 /* Second last? huh? */ 1514 SCTPDBG(SCTP_DEBUG_XXX, 1515 "Duplicate last fsn: %u (top: %u) -- abort\n", 1516 chk->rec.data.fsn, control->top_fsn); 1517 sctp_abort_in_reasm(stcb, control, 1518 chk, abort_flag, 1519 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1520 return; 1521 } 1522 if (asoc->idata_supported || control->first_frag_seen) { 1523 /* 1524 * For IDATA we always check since we know 1525 * that the first fragment is 0. For old 1526 * DATA we have to receive the first before 1527 * we know the first FSN (which is the TSN). 1528 */ 1529 1530 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1531 /* 1532 * We have already delivered up to 1533 * this so its a dup 1534 */ 1535 SCTPDBG(SCTP_DEBUG_XXX, 1536 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1537 chk->rec.data.fsn, control->fsn_included); 1538 sctp_abort_in_reasm(stcb, control, chk, 1539 abort_flag, 1540 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1541 return; 1542 } 1543 } 1544 /* 1545 * validate not beyond top FSN if we have seen last 1546 * one 1547 */ 1548 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1549 SCTPDBG(SCTP_DEBUG_XXX, 1550 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1551 chk->rec.data.fsn, 1552 control->top_fsn); 1553 sctp_abort_in_reasm(stcb, control, chk, 1554 abort_flag, 1555 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1556 return; 1557 } 1558 } 1559 /* 1560 * If we reach here, we need to place the new chunk in the 1561 * reassembly for this control. 1562 */ 1563 SCTPDBG(SCTP_DEBUG_XXX, 1564 "chunk is a not first fsn: %u needs to be inserted\n", 1565 chk->rec.data.fsn); 1566 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1567 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1568 /* 1569 * This one in queue is bigger than the new 1570 * one, insert the new one before at. 1571 */ 1572 SCTPDBG(SCTP_DEBUG_XXX, 1573 "Insert it before fsn: %u\n", 1574 at->rec.data.fsn); 1575 asoc->size_on_reasm_queue += chk->send_size; 1576 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1577 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1578 inserted = 1; 1579 break; 1580 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1581 /* 1582 * Gak, He sent me a duplicate str seq 1583 * number 1584 */ 1585 /* 1586 * foo bar, I guess I will just free this 1587 * new guy, should we abort too? FIX ME 1588 * MAYBE? Or it COULD be that the SSN's have 1589 * wrapped. Maybe I should compare to TSN 1590 * somehow... sigh for now just blow away 1591 * the chunk! 1592 */ 1593 SCTPDBG(SCTP_DEBUG_XXX, 1594 "Duplicate to fsn: %u -- abort\n", 1595 at->rec.data.fsn); 1596 sctp_abort_in_reasm(stcb, control, 1597 chk, abort_flag, 1598 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1599 return; 1600 } 1601 } 1602 if (inserted == 0) { 1603 /* Goes on the end */ 1604 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1605 chk->rec.data.fsn); 1606 asoc->size_on_reasm_queue += chk->send_size; 1607 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1608 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1609 } 1610 } 1611 /* 1612 * Ok lets see if we can suck any up into the control structure that 1613 * are in seq if it makes sense. 1614 */ 1615 do_wakeup = 0; 1616 /* 1617 * If the first fragment has not been seen there is no sense in 1618 * looking. 1619 */ 1620 if (control->first_frag_seen) { 1621 next_fsn = control->fsn_included + 1; 1622 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1623 if (at->rec.data.fsn == next_fsn) { 1624 /* We can add this one now to the control */ 1625 SCTPDBG(SCTP_DEBUG_XXX, 1626 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1627 control, at, 1628 at->rec.data.fsn, 1629 next_fsn, control->fsn_included); 1630 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1631 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1632 if (control->on_read_q) { 1633 do_wakeup = 1; 1634 } else { 1635 /* 1636 * We only add to the 1637 * size-on-all-streams if its not on 1638 * the read q. The read q flag will 1639 * cause a sballoc so its accounted 1640 * for there. 1641 */ 1642 asoc->size_on_all_streams += lenadded; 1643 } 1644 next_fsn++; 1645 if (control->end_added && control->pdapi_started) { 1646 if (strm->pd_api_started) { 1647 strm->pd_api_started = 0; 1648 control->pdapi_started = 0; 1649 } 1650 if (control->on_read_q == 0) { 1651 sctp_add_to_readq(stcb->sctp_ep, stcb, 1652 control, 1653 &stcb->sctp_socket->so_rcv, control->end_added, 1654 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1655 } 1656 break; 1657 } 1658 } else { 1659 break; 1660 } 1661 } 1662 } 1663 if (do_wakeup) { 1664 /* Need to wakeup the reader */ 1665 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1666 } 1667 } 1668 1669 static struct sctp_queued_to_read * 1670 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1671 { 1672 struct sctp_queued_to_read *control; 1673 1674 if (ordered) { 1675 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1676 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1677 break; 1678 } 1679 } 1680 } else { 1681 if (idata_supported) { 1682 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1683 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1684 break; 1685 } 1686 } 1687 } else { 1688 control = TAILQ_FIRST(&strm->uno_inqueue); 1689 } 1690 } 1691 return (control); 1692 } 1693 1694 static int 1695 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1696 struct mbuf **m, int offset, int chk_length, 1697 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1698 int *break_flag, int last_chunk, uint8_t chk_type) 1699 { 1700 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ 1701 uint32_t tsn, fsn, gap, mid; 1702 struct mbuf *dmbuf; 1703 int the_len; 1704 int need_reasm_check = 0; 1705 uint16_t sid; 1706 struct mbuf *op_err; 1707 char msg[SCTP_DIAG_INFO_LEN]; 1708 struct sctp_queued_to_read *control, *ncontrol; 1709 uint32_t ppid; 1710 uint8_t chk_flags; 1711 struct sctp_stream_reset_list *liste; 1712 int ordered; 1713 size_t clen; 1714 int created_control = 0; 1715 1716 if (chk_type == SCTP_IDATA) { 1717 struct sctp_idata_chunk *chunk, chunk_buf; 1718 1719 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1720 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1721 chk_flags = chunk->ch.chunk_flags; 1722 clen = sizeof(struct sctp_idata_chunk); 1723 tsn = ntohl(chunk->dp.tsn); 1724 sid = ntohs(chunk->dp.sid); 1725 mid = ntohl(chunk->dp.mid); 1726 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1727 fsn = 0; 1728 ppid = chunk->dp.ppid_fsn.ppid; 1729 } else { 1730 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1731 ppid = 0xffffffff; /* Use as an invalid value. */ 1732 } 1733 } else { 1734 struct sctp_data_chunk *chunk, chunk_buf; 1735 1736 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1737 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1738 chk_flags = chunk->ch.chunk_flags; 1739 clen = sizeof(struct sctp_data_chunk); 1740 tsn = ntohl(chunk->dp.tsn); 1741 sid = ntohs(chunk->dp.sid); 1742 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1743 fsn = tsn; 1744 ppid = chunk->dp.ppid; 1745 } 1746 if ((size_t)chk_length == clen) { 1747 /* 1748 * Need to send an abort since we had a empty data chunk. 1749 */ 1750 op_err = sctp_generate_no_user_data_cause(tsn); 1751 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1752 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1753 *abort_flag = 1; 1754 return (0); 1755 } 1756 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1757 asoc->send_sack = 1; 1758 } 1759 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1761 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1762 } 1763 if (stcb == NULL) { 1764 return (0); 1765 } 1766 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1767 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1768 /* It is a duplicate */ 1769 SCTP_STAT_INCR(sctps_recvdupdata); 1770 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1771 /* Record a dup for the next outbound sack */ 1772 asoc->dup_tsns[asoc->numduptsns] = tsn; 1773 asoc->numduptsns++; 1774 } 1775 asoc->send_sack = 1; 1776 return (0); 1777 } 1778 /* Calculate the number of TSN's between the base and this TSN */ 1779 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1780 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1781 /* Can't hold the bit in the mapping at max array, toss it */ 1782 return (0); 1783 } 1784 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1785 SCTP_TCB_LOCK_ASSERT(stcb); 1786 if (sctp_expand_mapping_array(asoc, gap)) { 1787 /* Can't expand, drop it */ 1788 return (0); 1789 } 1790 } 1791 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1792 *high_tsn = tsn; 1793 } 1794 /* See if we have received this one already */ 1795 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1796 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1797 SCTP_STAT_INCR(sctps_recvdupdata); 1798 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1799 /* Record a dup for the next outbound sack */ 1800 asoc->dup_tsns[asoc->numduptsns] = tsn; 1801 asoc->numduptsns++; 1802 } 1803 asoc->send_sack = 1; 1804 return (0); 1805 } 1806 /* 1807 * Check to see about the GONE flag, duplicates would cause a sack 1808 * to be sent up above 1809 */ 1810 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1811 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1812 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1813 /* 1814 * wait a minute, this guy is gone, there is no longer a 1815 * receiver. Send peer an ABORT! 1816 */ 1817 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1818 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1819 *abort_flag = 1; 1820 return (0); 1821 } 1822 /* 1823 * Now before going further we see if there is room. If NOT then we 1824 * MAY let one through only IF this TSN is the one we are waiting 1825 * for on a partial delivery API. 1826 */ 1827 1828 /* Is the stream valid? */ 1829 if (sid >= asoc->streamincnt) { 1830 struct sctp_error_invalid_stream *cause; 1831 1832 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1833 0, M_NOWAIT, 1, MT_DATA); 1834 if (op_err != NULL) { 1835 /* add some space up front so prepend will work well */ 1836 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1837 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1838 /* 1839 * Error causes are just param's and this one has 1840 * two back to back phdr, one with the error type 1841 * and size, the other with the streamid and a rsvd 1842 */ 1843 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1844 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1845 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1846 cause->stream_id = htons(sid); 1847 cause->reserved = htons(0); 1848 sctp_queue_op_err(stcb, op_err); 1849 } 1850 SCTP_STAT_INCR(sctps_badsid); 1851 SCTP_TCB_LOCK_ASSERT(stcb); 1852 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1853 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1854 asoc->highest_tsn_inside_nr_map = tsn; 1855 } 1856 if (tsn == (asoc->cumulative_tsn + 1)) { 1857 /* Update cum-ack */ 1858 asoc->cumulative_tsn = tsn; 1859 } 1860 return (0); 1861 } 1862 /* 1863 * If its a fragmented message, lets see if we can find the control 1864 * on the reassembly queues. 1865 */ 1866 if ((chk_type == SCTP_IDATA) && 1867 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1868 (fsn == 0)) { 1869 /* 1870 * The first *must* be fsn 0, and other (middle/end) pieces 1871 * can *not* be fsn 0. XXX: This can happen in case of a 1872 * wrap around. Ignore is for now. 1873 */ 1874 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", 1875 mid, chk_flags); 1876 goto err_out; 1877 } 1878 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1879 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1880 chk_flags, control); 1881 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1882 /* See if we can find the re-assembly entity */ 1883 if (control != NULL) { 1884 /* We found something, does it belong? */ 1885 if (ordered && (mid != control->mid)) { 1886 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1887 err_out: 1888 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1889 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1890 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1891 *abort_flag = 1; 1892 return (0); 1893 } 1894 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1895 /* 1896 * We can't have a switched order with an 1897 * unordered chunk 1898 */ 1899 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1900 tsn); 1901 goto err_out; 1902 } 1903 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1904 /* 1905 * We can't have a switched unordered with a 1906 * ordered chunk 1907 */ 1908 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1909 tsn); 1910 goto err_out; 1911 } 1912 } 1913 } else { 1914 /* 1915 * Its a complete segment. Lets validate we don't have a 1916 * re-assembly going on with the same Stream/Seq (for 1917 * ordered) or in the same Stream for unordered. 1918 */ 1919 if (control != NULL) { 1920 if (ordered || asoc->idata_supported) { 1921 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1922 chk_flags, mid); 1923 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1924 goto err_out; 1925 } else { 1926 if ((tsn == control->fsn_included + 1) && 1927 (control->end_added == 0)) { 1928 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included); 1929 goto err_out; 1930 } else { 1931 control = NULL; 1932 } 1933 } 1934 } 1935 } 1936 /* now do the tests */ 1937 if (((asoc->cnt_on_all_streams + 1938 asoc->cnt_on_reasm_queue + 1939 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1940 (((int)asoc->my_rwnd) <= 0)) { 1941 /* 1942 * When we have NO room in the rwnd we check to make sure 1943 * the reader is doing its job... 1944 */ 1945 if (stcb->sctp_socket->so_rcv.sb_cc) { 1946 /* some to read, wake-up */ 1947 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1948 struct socket *so; 1949 1950 so = SCTP_INP_SO(stcb->sctp_ep); 1951 atomic_add_int(&stcb->asoc.refcnt, 1); 1952 SCTP_TCB_UNLOCK(stcb); 1953 SCTP_SOCKET_LOCK(so, 1); 1954 SCTP_TCB_LOCK(stcb); 1955 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1956 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1957 /* assoc was freed while we were unlocked */ 1958 SCTP_SOCKET_UNLOCK(so, 1); 1959 return (0); 1960 } 1961 #endif 1962 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1963 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1964 SCTP_SOCKET_UNLOCK(so, 1); 1965 #endif 1966 } 1967 /* now is it in the mapping array of what we have accepted? */ 1968 if (chk_type == SCTP_DATA) { 1969 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1970 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1971 /* Nope not in the valid range dump it */ 1972 dump_packet: 1973 sctp_set_rwnd(stcb, asoc); 1974 if ((asoc->cnt_on_all_streams + 1975 asoc->cnt_on_reasm_queue + 1976 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1977 SCTP_STAT_INCR(sctps_datadropchklmt); 1978 } else { 1979 SCTP_STAT_INCR(sctps_datadroprwnd); 1980 } 1981 *break_flag = 1; 1982 return (0); 1983 } 1984 } else { 1985 if (control == NULL) { 1986 goto dump_packet; 1987 } 1988 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1989 goto dump_packet; 1990 } 1991 } 1992 } 1993 #ifdef SCTP_ASOCLOG_OF_TSNS 1994 SCTP_TCB_LOCK_ASSERT(stcb); 1995 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1996 asoc->tsn_in_at = 0; 1997 asoc->tsn_in_wrapped = 1; 1998 } 1999 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 2000 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 2001 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 2002 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 2003 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 2004 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 2005 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 2006 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 2007 asoc->tsn_in_at++; 2008 #endif 2009 /* 2010 * Before we continue lets validate that we are not being fooled by 2011 * an evil attacker. We can only have Nk chunks based on our TSN 2012 * spread allowed by the mapping array N * 8 bits, so there is no 2013 * way our stream sequence numbers could have wrapped. We of course 2014 * only validate the FIRST fragment so the bit must be set. 2015 */ 2016 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 2017 (TAILQ_EMPTY(&asoc->resetHead)) && 2018 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 2019 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 2020 /* The incoming sseq is behind where we last delivered? */ 2021 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 2022 mid, asoc->strmin[sid].last_mid_delivered); 2023 2024 if (asoc->idata_supported) { 2025 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2026 asoc->strmin[sid].last_mid_delivered, 2027 tsn, 2028 sid, 2029 mid); 2030 } else { 2031 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2032 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2033 tsn, 2034 sid, 2035 (uint16_t)mid); 2036 } 2037 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2038 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 2039 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 2040 *abort_flag = 1; 2041 return (0); 2042 } 2043 if (chk_type == SCTP_IDATA) { 2044 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2045 } else { 2046 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2047 } 2048 if (last_chunk == 0) { 2049 if (chk_type == SCTP_IDATA) { 2050 dmbuf = SCTP_M_COPYM(*m, 2051 (offset + sizeof(struct sctp_idata_chunk)), 2052 the_len, M_NOWAIT); 2053 } else { 2054 dmbuf = SCTP_M_COPYM(*m, 2055 (offset + sizeof(struct sctp_data_chunk)), 2056 the_len, M_NOWAIT); 2057 } 2058 #ifdef SCTP_MBUF_LOGGING 2059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2060 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2061 } 2062 #endif 2063 } else { 2064 /* We can steal the last chunk */ 2065 int l_len; 2066 2067 dmbuf = *m; 2068 /* lop off the top part */ 2069 if (chk_type == SCTP_IDATA) { 2070 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2071 } else { 2072 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2073 } 2074 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2075 l_len = SCTP_BUF_LEN(dmbuf); 2076 } else { 2077 /* 2078 * need to count up the size hopefully does not hit 2079 * this to often :-0 2080 */ 2081 struct mbuf *lat; 2082 2083 l_len = 0; 2084 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2085 l_len += SCTP_BUF_LEN(lat); 2086 } 2087 } 2088 if (l_len > the_len) { 2089 /* Trim the end round bytes off too */ 2090 m_adj(dmbuf, -(l_len - the_len)); 2091 } 2092 } 2093 if (dmbuf == NULL) { 2094 SCTP_STAT_INCR(sctps_nomem); 2095 return (0); 2096 } 2097 /* 2098 * Now no matter what, we need a control, get one if we don't have 2099 * one (we may have gotten it above when we found the message was 2100 * fragmented 2101 */ 2102 if (control == NULL) { 2103 sctp_alloc_a_readq(stcb, control); 2104 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2105 ppid, 2106 sid, 2107 chk_flags, 2108 NULL, fsn, mid); 2109 if (control == NULL) { 2110 SCTP_STAT_INCR(sctps_nomem); 2111 return (0); 2112 } 2113 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2114 struct mbuf *mm; 2115 2116 control->data = dmbuf; 2117 for (mm = control->data; mm; mm = mm->m_next) { 2118 control->length += SCTP_BUF_LEN(mm); 2119 } 2120 control->tail_mbuf = NULL; 2121 control->end_added = 1; 2122 control->last_frag_seen = 1; 2123 control->first_frag_seen = 1; 2124 control->fsn_included = fsn; 2125 control->top_fsn = fsn; 2126 } 2127 created_control = 1; 2128 } 2129 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2130 chk_flags, ordered, mid, control); 2131 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2132 TAILQ_EMPTY(&asoc->resetHead) && 2133 ((ordered == 0) || 2134 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2135 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2136 /* Candidate for express delivery */ 2137 /* 2138 * Its not fragmented, No PD-API is up, Nothing in the 2139 * delivery queue, Its un-ordered OR ordered and the next to 2140 * deliver AND nothing else is stuck on the stream queue, 2141 * And there is room for it in the socket buffer. Lets just 2142 * stuff it up the buffer.... 2143 */ 2144 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2145 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2146 asoc->highest_tsn_inside_nr_map = tsn; 2147 } 2148 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2149 control, mid); 2150 2151 sctp_add_to_readq(stcb->sctp_ep, stcb, 2152 control, &stcb->sctp_socket->so_rcv, 2153 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2154 2155 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2156 /* for ordered, bump what we delivered */ 2157 asoc->strmin[sid].last_mid_delivered++; 2158 } 2159 SCTP_STAT_INCR(sctps_recvexpress); 2160 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2161 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2162 SCTP_STR_LOG_FROM_EXPRS_DEL); 2163 } 2164 control = NULL; 2165 goto finish_express_del; 2166 } 2167 2168 /* Now will we need a chunk too? */ 2169 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2170 sctp_alloc_a_chunk(stcb, chk); 2171 if (chk == NULL) { 2172 /* No memory so we drop the chunk */ 2173 SCTP_STAT_INCR(sctps_nomem); 2174 if (last_chunk == 0) { 2175 /* we copied it, free the copy */ 2176 sctp_m_freem(dmbuf); 2177 } 2178 return (0); 2179 } 2180 chk->rec.data.tsn = tsn; 2181 chk->no_fr_allowed = 0; 2182 chk->rec.data.fsn = fsn; 2183 chk->rec.data.mid = mid; 2184 chk->rec.data.sid = sid; 2185 chk->rec.data.ppid = ppid; 2186 chk->rec.data.context = stcb->asoc.context; 2187 chk->rec.data.doing_fast_retransmit = 0; 2188 chk->rec.data.rcv_flags = chk_flags; 2189 chk->asoc = asoc; 2190 chk->send_size = the_len; 2191 chk->whoTo = net; 2192 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2193 chk, 2194 control, mid); 2195 atomic_add_int(&net->ref_count, 1); 2196 chk->data = dmbuf; 2197 } 2198 /* Set the appropriate TSN mark */ 2199 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2200 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2201 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2202 asoc->highest_tsn_inside_nr_map = tsn; 2203 } 2204 } else { 2205 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2206 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2207 asoc->highest_tsn_inside_map = tsn; 2208 } 2209 } 2210 /* Now is it complete (i.e. not fragmented)? */ 2211 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2212 /* 2213 * Special check for when streams are resetting. We could be 2214 * more smart about this and check the actual stream to see 2215 * if it is not being reset.. that way we would not create a 2216 * HOLB when amongst streams being reset and those not being 2217 * reset. 2218 * 2219 */ 2220 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2221 SCTP_TSN_GT(tsn, liste->tsn)) { 2222 /* 2223 * yep its past where we need to reset... go ahead 2224 * and queue it. 2225 */ 2226 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2227 /* first one on */ 2228 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2229 } else { 2230 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2231 unsigned char inserted = 0; 2232 2233 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2234 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2235 2236 continue; 2237 } else { 2238 /* found it */ 2239 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2240 inserted = 1; 2241 break; 2242 } 2243 } 2244 if (inserted == 0) { 2245 /* 2246 * must be put at end, use prevP 2247 * (all setup from loop) to setup 2248 * nextP. 2249 */ 2250 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2251 } 2252 } 2253 goto finish_express_del; 2254 } 2255 if (chk_flags & SCTP_DATA_UNORDERED) { 2256 /* queue directly into socket buffer */ 2257 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2258 control, mid); 2259 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2260 sctp_add_to_readq(stcb->sctp_ep, stcb, 2261 control, 2262 &stcb->sctp_socket->so_rcv, 1, 2263 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2264 2265 } else { 2266 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2267 mid); 2268 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2269 if (*abort_flag) { 2270 if (last_chunk) { 2271 *m = NULL; 2272 } 2273 return (0); 2274 } 2275 } 2276 goto finish_express_del; 2277 } 2278 /* If we reach here its a reassembly */ 2279 need_reasm_check = 1; 2280 SCTPDBG(SCTP_DEBUG_XXX, 2281 "Queue data to stream for reasm control: %p MID: %u\n", 2282 control, mid); 2283 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2284 if (*abort_flag) { 2285 /* 2286 * the assoc is now gone and chk was put onto the reasm 2287 * queue, which has all been freed. 2288 */ 2289 if (last_chunk) { 2290 *m = NULL; 2291 } 2292 return (0); 2293 } 2294 finish_express_del: 2295 /* Here we tidy up things */ 2296 if (tsn == (asoc->cumulative_tsn + 1)) { 2297 /* Update cum-ack */ 2298 asoc->cumulative_tsn = tsn; 2299 } 2300 if (last_chunk) { 2301 *m = NULL; 2302 } 2303 if (ordered) { 2304 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2305 } else { 2306 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2307 } 2308 SCTP_STAT_INCR(sctps_recvdata); 2309 /* Set it present please */ 2310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2311 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2312 } 2313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2314 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2315 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2316 } 2317 if (need_reasm_check) { 2318 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2319 need_reasm_check = 0; 2320 } 2321 /* check the special flag for stream resets */ 2322 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2323 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2324 /* 2325 * we have finished working through the backlogged TSN's now 2326 * time to reset streams. 1: call reset function. 2: free 2327 * pending_reply space 3: distribute any chunks in 2328 * pending_reply_queue. 2329 */ 2330 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2331 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2332 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2333 SCTP_FREE(liste, SCTP_M_STRESET); 2334 /* sa_ignore FREED_MEMORY */ 2335 liste = TAILQ_FIRST(&asoc->resetHead); 2336 if (TAILQ_EMPTY(&asoc->resetHead)) { 2337 /* All can be removed */ 2338 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2339 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2340 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2341 if (*abort_flag) { 2342 return (0); 2343 } 2344 if (need_reasm_check) { 2345 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2346 need_reasm_check = 0; 2347 } 2348 } 2349 } else { 2350 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2351 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2352 break; 2353 } 2354 /* 2355 * if control->sinfo_tsn is <= liste->tsn we 2356 * can process it which is the NOT of 2357 * control->sinfo_tsn > liste->tsn 2358 */ 2359 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2360 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2361 if (*abort_flag) { 2362 return (0); 2363 } 2364 if (need_reasm_check) { 2365 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2366 need_reasm_check = 0; 2367 } 2368 } 2369 } 2370 } 2371 return (1); 2372 } 2373 2374 static const int8_t sctp_map_lookup_tab[256] = { 2375 0, 1, 0, 2, 0, 1, 0, 3, 2376 0, 1, 0, 2, 0, 1, 0, 4, 2377 0, 1, 0, 2, 0, 1, 0, 3, 2378 0, 1, 0, 2, 0, 1, 0, 5, 2379 0, 1, 0, 2, 0, 1, 0, 3, 2380 0, 1, 0, 2, 0, 1, 0, 4, 2381 0, 1, 0, 2, 0, 1, 0, 3, 2382 0, 1, 0, 2, 0, 1, 0, 6, 2383 0, 1, 0, 2, 0, 1, 0, 3, 2384 0, 1, 0, 2, 0, 1, 0, 4, 2385 0, 1, 0, 2, 0, 1, 0, 3, 2386 0, 1, 0, 2, 0, 1, 0, 5, 2387 0, 1, 0, 2, 0, 1, 0, 3, 2388 0, 1, 0, 2, 0, 1, 0, 4, 2389 0, 1, 0, 2, 0, 1, 0, 3, 2390 0, 1, 0, 2, 0, 1, 0, 7, 2391 0, 1, 0, 2, 0, 1, 0, 3, 2392 0, 1, 0, 2, 0, 1, 0, 4, 2393 0, 1, 0, 2, 0, 1, 0, 3, 2394 0, 1, 0, 2, 0, 1, 0, 5, 2395 0, 1, 0, 2, 0, 1, 0, 3, 2396 0, 1, 0, 2, 0, 1, 0, 4, 2397 0, 1, 0, 2, 0, 1, 0, 3, 2398 0, 1, 0, 2, 0, 1, 0, 6, 2399 0, 1, 0, 2, 0, 1, 0, 3, 2400 0, 1, 0, 2, 0, 1, 0, 4, 2401 0, 1, 0, 2, 0, 1, 0, 3, 2402 0, 1, 0, 2, 0, 1, 0, 5, 2403 0, 1, 0, 2, 0, 1, 0, 3, 2404 0, 1, 0, 2, 0, 1, 0, 4, 2405 0, 1, 0, 2, 0, 1, 0, 3, 2406 0, 1, 0, 2, 0, 1, 0, 8 2407 }; 2408 2409 2410 void 2411 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2412 { 2413 /* 2414 * Now we also need to check the mapping array in a couple of ways. 2415 * 1) Did we move the cum-ack point? 2416 * 2417 * When you first glance at this you might think that all entries 2418 * that make up the position of the cum-ack would be in the 2419 * nr-mapping array only.. i.e. things up to the cum-ack are always 2420 * deliverable. Thats true with one exception, when its a fragmented 2421 * message we may not deliver the data until some threshold (or all 2422 * of it) is in place. So we must OR the nr_mapping_array and 2423 * mapping_array to get a true picture of the cum-ack. 2424 */ 2425 struct sctp_association *asoc; 2426 int at; 2427 uint8_t val; 2428 int slide_from, slide_end, lgap, distance; 2429 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2430 2431 asoc = &stcb->asoc; 2432 2433 old_cumack = asoc->cumulative_tsn; 2434 old_base = asoc->mapping_array_base_tsn; 2435 old_highest = asoc->highest_tsn_inside_map; 2436 /* 2437 * We could probably improve this a small bit by calculating the 2438 * offset of the current cum-ack as the starting point. 2439 */ 2440 at = 0; 2441 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2442 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2443 if (val == 0xff) { 2444 at += 8; 2445 } else { 2446 /* there is a 0 bit */ 2447 at += sctp_map_lookup_tab[val]; 2448 break; 2449 } 2450 } 2451 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2452 2453 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2454 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2455 #ifdef INVARIANTS 2456 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2457 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2458 #else 2459 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2460 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2461 sctp_print_mapping_array(asoc); 2462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2463 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2464 } 2465 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2466 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2467 #endif 2468 } 2469 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2470 highest_tsn = asoc->highest_tsn_inside_nr_map; 2471 } else { 2472 highest_tsn = asoc->highest_tsn_inside_map; 2473 } 2474 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2475 /* The complete array was completed by a single FR */ 2476 /* highest becomes the cum-ack */ 2477 int clr; 2478 #ifdef INVARIANTS 2479 unsigned int i; 2480 #endif 2481 2482 /* clear the array */ 2483 clr = ((at + 7) >> 3); 2484 if (clr > asoc->mapping_array_size) { 2485 clr = asoc->mapping_array_size; 2486 } 2487 memset(asoc->mapping_array, 0, clr); 2488 memset(asoc->nr_mapping_array, 0, clr); 2489 #ifdef INVARIANTS 2490 for (i = 0; i < asoc->mapping_array_size; i++) { 2491 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2492 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2493 sctp_print_mapping_array(asoc); 2494 } 2495 } 2496 #endif 2497 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2498 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2499 } else if (at >= 8) { 2500 /* we can slide the mapping array down */ 2501 /* slide_from holds where we hit the first NON 0xff byte */ 2502 2503 /* 2504 * now calculate the ceiling of the move using our highest 2505 * TSN value 2506 */ 2507 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2508 slide_end = (lgap >> 3); 2509 if (slide_end < slide_from) { 2510 sctp_print_mapping_array(asoc); 2511 #ifdef INVARIANTS 2512 panic("impossible slide"); 2513 #else 2514 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2515 lgap, slide_end, slide_from, at); 2516 return; 2517 #endif 2518 } 2519 if (slide_end > asoc->mapping_array_size) { 2520 #ifdef INVARIANTS 2521 panic("would overrun buffer"); 2522 #else 2523 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2524 asoc->mapping_array_size, slide_end); 2525 slide_end = asoc->mapping_array_size; 2526 #endif 2527 } 2528 distance = (slide_end - slide_from) + 1; 2529 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2530 sctp_log_map(old_base, old_cumack, old_highest, 2531 SCTP_MAP_PREPARE_SLIDE); 2532 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2533 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2534 } 2535 if (distance + slide_from > asoc->mapping_array_size || 2536 distance < 0) { 2537 /* 2538 * Here we do NOT slide forward the array so that 2539 * hopefully when more data comes in to fill it up 2540 * we will be able to slide it forward. Really I 2541 * don't think this should happen :-0 2542 */ 2543 2544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2545 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2546 (uint32_t)asoc->mapping_array_size, 2547 SCTP_MAP_SLIDE_NONE); 2548 } 2549 } else { 2550 int ii; 2551 2552 for (ii = 0; ii < distance; ii++) { 2553 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2554 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2555 2556 } 2557 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2558 asoc->mapping_array[ii] = 0; 2559 asoc->nr_mapping_array[ii] = 0; 2560 } 2561 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2562 asoc->highest_tsn_inside_map += (slide_from << 3); 2563 } 2564 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2565 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2566 } 2567 asoc->mapping_array_base_tsn += (slide_from << 3); 2568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2569 sctp_log_map(asoc->mapping_array_base_tsn, 2570 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2571 SCTP_MAP_SLIDE_RESULT); 2572 } 2573 } 2574 } 2575 } 2576 2577 void 2578 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2579 { 2580 struct sctp_association *asoc; 2581 uint32_t highest_tsn; 2582 int is_a_gap; 2583 2584 sctp_slide_mapping_arrays(stcb); 2585 asoc = &stcb->asoc; 2586 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2587 highest_tsn = asoc->highest_tsn_inside_nr_map; 2588 } else { 2589 highest_tsn = asoc->highest_tsn_inside_map; 2590 } 2591 /* Is there a gap now? */ 2592 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2593 2594 /* 2595 * Now we need to see if we need to queue a sack or just start the 2596 * timer (if allowed). 2597 */ 2598 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2599 /* 2600 * Ok special case, in SHUTDOWN-SENT case. here we maker 2601 * sure SACK timer is off and instead send a SHUTDOWN and a 2602 * SACK 2603 */ 2604 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2605 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2606 stcb->sctp_ep, stcb, NULL, 2607 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2608 } 2609 sctp_send_shutdown(stcb, 2610 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2611 if (is_a_gap) { 2612 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2613 } 2614 } else { 2615 /* 2616 * CMT DAC algorithm: increase number of packets received 2617 * since last ack 2618 */ 2619 stcb->asoc.cmt_dac_pkts_rcvd++; 2620 2621 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2622 * SACK */ 2623 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2624 * longer is one */ 2625 (stcb->asoc.numduptsns) || /* we have dup's */ 2626 (is_a_gap) || /* is still a gap */ 2627 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2628 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2629 ) { 2630 2631 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2632 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2633 (stcb->asoc.send_sack == 0) && 2634 (stcb->asoc.numduptsns == 0) && 2635 (stcb->asoc.delayed_ack) && 2636 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2637 2638 /* 2639 * CMT DAC algorithm: With CMT, delay acks 2640 * even in the face of 2641 * 2642 * reordering. Therefore, if acks that do 2643 * not have to be sent because of the above 2644 * reasons, will be delayed. That is, acks 2645 * that would have been sent due to gap 2646 * reports will be delayed with DAC. Start 2647 * the delayed ack timer. 2648 */ 2649 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2650 stcb->sctp_ep, stcb, NULL); 2651 } else { 2652 /* 2653 * Ok we must build a SACK since the timer 2654 * is pending, we got our first packet OR 2655 * there are gaps or duplicates. 2656 */ 2657 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2658 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2659 } 2660 } else { 2661 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2662 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2663 stcb->sctp_ep, stcb, NULL); 2664 } 2665 } 2666 } 2667 } 2668 2669 int 2670 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2671 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2672 struct sctp_nets *net, uint32_t *high_tsn) 2673 { 2674 struct sctp_chunkhdr *ch, chunk_buf; 2675 struct sctp_association *asoc; 2676 int num_chunks = 0; /* number of control chunks processed */ 2677 int stop_proc = 0; 2678 int break_flag, last_chunk; 2679 int abort_flag = 0, was_a_gap; 2680 struct mbuf *m; 2681 uint32_t highest_tsn; 2682 uint16_t chk_length; 2683 2684 /* set the rwnd */ 2685 sctp_set_rwnd(stcb, &stcb->asoc); 2686 2687 m = *mm; 2688 SCTP_TCB_LOCK_ASSERT(stcb); 2689 asoc = &stcb->asoc; 2690 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2691 highest_tsn = asoc->highest_tsn_inside_nr_map; 2692 } else { 2693 highest_tsn = asoc->highest_tsn_inside_map; 2694 } 2695 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2696 /* 2697 * setup where we got the last DATA packet from for any SACK that 2698 * may need to go out. Don't bump the net. This is done ONLY when a 2699 * chunk is assigned. 2700 */ 2701 asoc->last_data_chunk_from = net; 2702 2703 /*- 2704 * Now before we proceed we must figure out if this is a wasted 2705 * cluster... i.e. it is a small packet sent in and yet the driver 2706 * underneath allocated a full cluster for it. If so we must copy it 2707 * to a smaller mbuf and free up the cluster mbuf. This will help 2708 * with cluster starvation. Note for __Panda__ we don't do this 2709 * since it has clusters all the way down to 64 bytes. 2710 */ 2711 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2712 /* we only handle mbufs that are singletons.. not chains */ 2713 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2714 if (m) { 2715 /* ok lets see if we can copy the data up */ 2716 caddr_t *from, *to; 2717 2718 /* get the pointers and copy */ 2719 to = mtod(m, caddr_t *); 2720 from = mtod((*mm), caddr_t *); 2721 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2722 /* copy the length and free up the old */ 2723 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2724 sctp_m_freem(*mm); 2725 /* success, back copy */ 2726 *mm = m; 2727 } else { 2728 /* We are in trouble in the mbuf world .. yikes */ 2729 m = *mm; 2730 } 2731 } 2732 /* get pointer to the first chunk header */ 2733 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2734 sizeof(struct sctp_chunkhdr), 2735 (uint8_t *)&chunk_buf); 2736 if (ch == NULL) { 2737 return (1); 2738 } 2739 /* 2740 * process all DATA chunks... 2741 */ 2742 *high_tsn = asoc->cumulative_tsn; 2743 break_flag = 0; 2744 asoc->data_pkts_seen++; 2745 while (stop_proc == 0) { 2746 /* validate chunk length */ 2747 chk_length = ntohs(ch->chunk_length); 2748 if (length - *offset < chk_length) { 2749 /* all done, mutulated chunk */ 2750 stop_proc = 1; 2751 continue; 2752 } 2753 if ((asoc->idata_supported == 1) && 2754 (ch->chunk_type == SCTP_DATA)) { 2755 struct mbuf *op_err; 2756 char msg[SCTP_DIAG_INFO_LEN]; 2757 2758 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2759 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2760 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2761 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2762 return (2); 2763 } 2764 if ((asoc->idata_supported == 0) && 2765 (ch->chunk_type == SCTP_IDATA)) { 2766 struct mbuf *op_err; 2767 char msg[SCTP_DIAG_INFO_LEN]; 2768 2769 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2770 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2771 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2772 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2773 return (2); 2774 } 2775 if ((ch->chunk_type == SCTP_DATA) || 2776 (ch->chunk_type == SCTP_IDATA)) { 2777 uint16_t clen; 2778 2779 if (ch->chunk_type == SCTP_DATA) { 2780 clen = sizeof(struct sctp_data_chunk); 2781 } else { 2782 clen = sizeof(struct sctp_idata_chunk); 2783 } 2784 if (chk_length < clen) { 2785 /* 2786 * Need to send an abort since we had a 2787 * invalid data chunk. 2788 */ 2789 struct mbuf *op_err; 2790 char msg[SCTP_DIAG_INFO_LEN]; 2791 2792 snprintf(msg, sizeof(msg), "%s chunk of length %u", 2793 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2794 chk_length); 2795 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2796 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2797 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2798 return (2); 2799 } 2800 #ifdef SCTP_AUDITING_ENABLED 2801 sctp_audit_log(0xB1, 0); 2802 #endif 2803 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2804 last_chunk = 1; 2805 } else { 2806 last_chunk = 0; 2807 } 2808 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2809 chk_length, net, high_tsn, &abort_flag, &break_flag, 2810 last_chunk, ch->chunk_type)) { 2811 num_chunks++; 2812 } 2813 if (abort_flag) 2814 return (2); 2815 2816 if (break_flag) { 2817 /* 2818 * Set because of out of rwnd space and no 2819 * drop rep space left. 2820 */ 2821 stop_proc = 1; 2822 continue; 2823 } 2824 } else { 2825 /* not a data chunk in the data region */ 2826 switch (ch->chunk_type) { 2827 case SCTP_INITIATION: 2828 case SCTP_INITIATION_ACK: 2829 case SCTP_SELECTIVE_ACK: 2830 case SCTP_NR_SELECTIVE_ACK: 2831 case SCTP_HEARTBEAT_REQUEST: 2832 case SCTP_HEARTBEAT_ACK: 2833 case SCTP_ABORT_ASSOCIATION: 2834 case SCTP_SHUTDOWN: 2835 case SCTP_SHUTDOWN_ACK: 2836 case SCTP_OPERATION_ERROR: 2837 case SCTP_COOKIE_ECHO: 2838 case SCTP_COOKIE_ACK: 2839 case SCTP_ECN_ECHO: 2840 case SCTP_ECN_CWR: 2841 case SCTP_SHUTDOWN_COMPLETE: 2842 case SCTP_AUTHENTICATION: 2843 case SCTP_ASCONF_ACK: 2844 case SCTP_PACKET_DROPPED: 2845 case SCTP_STREAM_RESET: 2846 case SCTP_FORWARD_CUM_TSN: 2847 case SCTP_ASCONF: 2848 { 2849 /* 2850 * Now, what do we do with KNOWN 2851 * chunks that are NOT in the right 2852 * place? 2853 * 2854 * For now, I do nothing but ignore 2855 * them. We may later want to add 2856 * sysctl stuff to switch out and do 2857 * either an ABORT() or possibly 2858 * process them. 2859 */ 2860 struct mbuf *op_err; 2861 char msg[SCTP_DIAG_INFO_LEN]; 2862 2863 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2864 ch->chunk_type); 2865 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2866 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2867 return (2); 2868 } 2869 default: 2870 /* 2871 * Unknown chunk type: use bit rules after 2872 * checking length 2873 */ 2874 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2875 /* 2876 * Need to send an abort since we 2877 * had a invalid chunk. 2878 */ 2879 struct mbuf *op_err; 2880 char msg[SCTP_DIAG_INFO_LEN]; 2881 2882 snprintf(msg, sizeof(msg), "Chunk of length %u", 2883 chk_length); 2884 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2885 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2886 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2887 return (2); 2888 } 2889 if (ch->chunk_type & 0x40) { 2890 /* Add a error report to the queue */ 2891 struct mbuf *op_err; 2892 struct sctp_gen_error_cause *cause; 2893 2894 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2895 0, M_NOWAIT, 1, MT_DATA); 2896 if (op_err != NULL) { 2897 cause = mtod(op_err, struct sctp_gen_error_cause *); 2898 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2899 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2900 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2901 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2902 if (SCTP_BUF_NEXT(op_err) != NULL) { 2903 sctp_queue_op_err(stcb, op_err); 2904 } else { 2905 sctp_m_freem(op_err); 2906 } 2907 } 2908 } 2909 if ((ch->chunk_type & 0x80) == 0) { 2910 /* discard the rest of this packet */ 2911 stop_proc = 1; 2912 } /* else skip this bad chunk and 2913 * continue... */ 2914 break; 2915 } /* switch of chunk type */ 2916 } 2917 *offset += SCTP_SIZE32(chk_length); 2918 if ((*offset >= length) || stop_proc) { 2919 /* no more data left in the mbuf chain */ 2920 stop_proc = 1; 2921 continue; 2922 } 2923 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2924 sizeof(struct sctp_chunkhdr), 2925 (uint8_t *)&chunk_buf); 2926 if (ch == NULL) { 2927 *offset = length; 2928 stop_proc = 1; 2929 continue; 2930 } 2931 } 2932 if (break_flag) { 2933 /* 2934 * we need to report rwnd overrun drops. 2935 */ 2936 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2937 } 2938 if (num_chunks) { 2939 /* 2940 * Did we get data, if so update the time for auto-close and 2941 * give peer credit for being alive. 2942 */ 2943 SCTP_STAT_INCR(sctps_recvpktwithdata); 2944 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2945 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2946 stcb->asoc.overall_error_count, 2947 0, 2948 SCTP_FROM_SCTP_INDATA, 2949 __LINE__); 2950 } 2951 stcb->asoc.overall_error_count = 0; 2952 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2953 } 2954 /* now service all of the reassm queue if needed */ 2955 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2956 /* Assure that we ack right away */ 2957 stcb->asoc.send_sack = 1; 2958 } 2959 /* Start a sack timer or QUEUE a SACK for sending */ 2960 sctp_sack_check(stcb, was_a_gap); 2961 return (0); 2962 } 2963 2964 static int 2965 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2966 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2967 int *num_frs, 2968 uint32_t *biggest_newly_acked_tsn, 2969 uint32_t *this_sack_lowest_newack, 2970 int *rto_ok) 2971 { 2972 struct sctp_tmit_chunk *tp1; 2973 unsigned int theTSN; 2974 int j, wake_him = 0, circled = 0; 2975 2976 /* Recover the tp1 we last saw */ 2977 tp1 = *p_tp1; 2978 if (tp1 == NULL) { 2979 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2980 } 2981 for (j = frag_strt; j <= frag_end; j++) { 2982 theTSN = j + last_tsn; 2983 while (tp1) { 2984 if (tp1->rec.data.doing_fast_retransmit) 2985 (*num_frs) += 1; 2986 2987 /*- 2988 * CMT: CUCv2 algorithm. For each TSN being 2989 * processed from the sent queue, track the 2990 * next expected pseudo-cumack, or 2991 * rtx_pseudo_cumack, if required. Separate 2992 * cumack trackers for first transmissions, 2993 * and retransmissions. 2994 */ 2995 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2996 (tp1->whoTo->find_pseudo_cumack == 1) && 2997 (tp1->snd_count == 1)) { 2998 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 2999 tp1->whoTo->find_pseudo_cumack = 0; 3000 } 3001 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3002 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 3003 (tp1->snd_count > 1)) { 3004 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 3005 tp1->whoTo->find_rtx_pseudo_cumack = 0; 3006 } 3007 if (tp1->rec.data.tsn == theTSN) { 3008 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 3009 /*- 3010 * must be held until 3011 * cum-ack passes 3012 */ 3013 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3014 /*- 3015 * If it is less than RESEND, it is 3016 * now no-longer in flight. 3017 * Higher values may already be set 3018 * via previous Gap Ack Blocks... 3019 * i.e. ACKED or RESEND. 3020 */ 3021 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3022 *biggest_newly_acked_tsn)) { 3023 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 3024 } 3025 /*- 3026 * CMT: SFR algo (and HTNA) - set 3027 * saw_newack to 1 for dest being 3028 * newly acked. update 3029 * this_sack_highest_newack if 3030 * appropriate. 3031 */ 3032 if (tp1->rec.data.chunk_was_revoked == 0) 3033 tp1->whoTo->saw_newack = 1; 3034 3035 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3036 tp1->whoTo->this_sack_highest_newack)) { 3037 tp1->whoTo->this_sack_highest_newack = 3038 tp1->rec.data.tsn; 3039 } 3040 /*- 3041 * CMT DAC algo: also update 3042 * this_sack_lowest_newack 3043 */ 3044 if (*this_sack_lowest_newack == 0) { 3045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3046 sctp_log_sack(*this_sack_lowest_newack, 3047 last_tsn, 3048 tp1->rec.data.tsn, 3049 0, 3050 0, 3051 SCTP_LOG_TSN_ACKED); 3052 } 3053 *this_sack_lowest_newack = tp1->rec.data.tsn; 3054 } 3055 /*- 3056 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3057 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3058 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3059 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3060 * Separate pseudo_cumack trackers for first transmissions and 3061 * retransmissions. 3062 */ 3063 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3064 if (tp1->rec.data.chunk_was_revoked == 0) { 3065 tp1->whoTo->new_pseudo_cumack = 1; 3066 } 3067 tp1->whoTo->find_pseudo_cumack = 1; 3068 } 3069 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3070 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3071 } 3072 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3073 if (tp1->rec.data.chunk_was_revoked == 0) { 3074 tp1->whoTo->new_pseudo_cumack = 1; 3075 } 3076 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3077 } 3078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3079 sctp_log_sack(*biggest_newly_acked_tsn, 3080 last_tsn, 3081 tp1->rec.data.tsn, 3082 frag_strt, 3083 frag_end, 3084 SCTP_LOG_TSN_ACKED); 3085 } 3086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3087 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3088 tp1->whoTo->flight_size, 3089 tp1->book_size, 3090 (uint32_t)(uintptr_t)tp1->whoTo, 3091 tp1->rec.data.tsn); 3092 } 3093 sctp_flight_size_decrease(tp1); 3094 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3095 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3096 tp1); 3097 } 3098 sctp_total_flight_decrease(stcb, tp1); 3099 3100 tp1->whoTo->net_ack += tp1->send_size; 3101 if (tp1->snd_count < 2) { 3102 /*- 3103 * True non-retransmitted chunk 3104 */ 3105 tp1->whoTo->net_ack2 += tp1->send_size; 3106 3107 /*- 3108 * update RTO too ? 3109 */ 3110 if (tp1->do_rtt) { 3111 if (*rto_ok) { 3112 tp1->whoTo->RTO = 3113 sctp_calculate_rto(stcb, 3114 &stcb->asoc, 3115 tp1->whoTo, 3116 &tp1->sent_rcv_time, 3117 SCTP_RTT_FROM_DATA); 3118 *rto_ok = 0; 3119 } 3120 if (tp1->whoTo->rto_needed == 0) { 3121 tp1->whoTo->rto_needed = 1; 3122 } 3123 tp1->do_rtt = 0; 3124 } 3125 } 3126 3127 } 3128 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3129 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3130 stcb->asoc.this_sack_highest_gap)) { 3131 stcb->asoc.this_sack_highest_gap = 3132 tp1->rec.data.tsn; 3133 } 3134 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3135 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3136 #ifdef SCTP_AUDITING_ENABLED 3137 sctp_audit_log(0xB2, 3138 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3139 #endif 3140 } 3141 } 3142 /*- 3143 * All chunks NOT UNSENT fall through here and are marked 3144 * (leave PR-SCTP ones that are to skip alone though) 3145 */ 3146 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3147 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3148 tp1->sent = SCTP_DATAGRAM_MARKED; 3149 } 3150 if (tp1->rec.data.chunk_was_revoked) { 3151 /* deflate the cwnd */ 3152 tp1->whoTo->cwnd -= tp1->book_size; 3153 tp1->rec.data.chunk_was_revoked = 0; 3154 } 3155 /* NR Sack code here */ 3156 if (nr_sacking && 3157 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3158 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3159 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3160 #ifdef INVARIANTS 3161 } else { 3162 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3163 #endif 3164 } 3165 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3166 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3167 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3168 stcb->asoc.trigger_reset = 1; 3169 } 3170 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3171 if (tp1->data) { 3172 /* 3173 * sa_ignore 3174 * NO_NULL_CHK 3175 */ 3176 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3177 sctp_m_freem(tp1->data); 3178 tp1->data = NULL; 3179 } 3180 wake_him++; 3181 } 3182 } 3183 break; 3184 } /* if (tp1->tsn == theTSN) */ 3185 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3186 break; 3187 } 3188 tp1 = TAILQ_NEXT(tp1, sctp_next); 3189 if ((tp1 == NULL) && (circled == 0)) { 3190 circled++; 3191 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3192 } 3193 } /* end while (tp1) */ 3194 if (tp1 == NULL) { 3195 circled = 0; 3196 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3197 } 3198 /* In case the fragments were not in order we must reset */ 3199 } /* end for (j = fragStart */ 3200 *p_tp1 = tp1; 3201 return (wake_him); /* Return value only used for nr-sack */ 3202 } 3203 3204 3205 static int 3206 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3207 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3208 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3209 int num_seg, int num_nr_seg, int *rto_ok) 3210 { 3211 struct sctp_gap_ack_block *frag, block; 3212 struct sctp_tmit_chunk *tp1; 3213 int i; 3214 int num_frs = 0; 3215 int chunk_freed; 3216 int non_revocable; 3217 uint16_t frag_strt, frag_end, prev_frag_end; 3218 3219 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3220 prev_frag_end = 0; 3221 chunk_freed = 0; 3222 3223 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3224 if (i == num_seg) { 3225 prev_frag_end = 0; 3226 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3227 } 3228 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3229 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3230 *offset += sizeof(block); 3231 if (frag == NULL) { 3232 return (chunk_freed); 3233 } 3234 frag_strt = ntohs(frag->start); 3235 frag_end = ntohs(frag->end); 3236 3237 if (frag_strt > frag_end) { 3238 /* This gap report is malformed, skip it. */ 3239 continue; 3240 } 3241 if (frag_strt <= prev_frag_end) { 3242 /* This gap report is not in order, so restart. */ 3243 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3244 } 3245 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3246 *biggest_tsn_acked = last_tsn + frag_end; 3247 } 3248 if (i < num_seg) { 3249 non_revocable = 0; 3250 } else { 3251 non_revocable = 1; 3252 } 3253 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3254 non_revocable, &num_frs, biggest_newly_acked_tsn, 3255 this_sack_lowest_newack, rto_ok)) { 3256 chunk_freed = 1; 3257 } 3258 prev_frag_end = frag_end; 3259 } 3260 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3261 if (num_frs) 3262 sctp_log_fr(*biggest_tsn_acked, 3263 *biggest_newly_acked_tsn, 3264 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3265 } 3266 return (chunk_freed); 3267 } 3268 3269 static void 3270 sctp_check_for_revoked(struct sctp_tcb *stcb, 3271 struct sctp_association *asoc, uint32_t cumack, 3272 uint32_t biggest_tsn_acked) 3273 { 3274 struct sctp_tmit_chunk *tp1; 3275 3276 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3277 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3278 /* 3279 * ok this guy is either ACK or MARKED. If it is 3280 * ACKED it has been previously acked but not this 3281 * time i.e. revoked. If it is MARKED it was ACK'ed 3282 * again. 3283 */ 3284 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3285 break; 3286 } 3287 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3288 /* it has been revoked */ 3289 tp1->sent = SCTP_DATAGRAM_SENT; 3290 tp1->rec.data.chunk_was_revoked = 1; 3291 /* 3292 * We must add this stuff back in to assure 3293 * timers and such get started. 3294 */ 3295 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3296 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3297 tp1->whoTo->flight_size, 3298 tp1->book_size, 3299 (uint32_t)(uintptr_t)tp1->whoTo, 3300 tp1->rec.data.tsn); 3301 } 3302 sctp_flight_size_increase(tp1); 3303 sctp_total_flight_increase(stcb, tp1); 3304 /* 3305 * We inflate the cwnd to compensate for our 3306 * artificial inflation of the flight_size. 3307 */ 3308 tp1->whoTo->cwnd += tp1->book_size; 3309 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3310 sctp_log_sack(asoc->last_acked_seq, 3311 cumack, 3312 tp1->rec.data.tsn, 3313 0, 3314 0, 3315 SCTP_LOG_TSN_REVOKED); 3316 } 3317 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3318 /* it has been re-acked in this SACK */ 3319 tp1->sent = SCTP_DATAGRAM_ACKED; 3320 } 3321 } 3322 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3323 break; 3324 } 3325 } 3326 3327 3328 static void 3329 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3330 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3331 { 3332 struct sctp_tmit_chunk *tp1; 3333 int strike_flag = 0; 3334 struct timeval now; 3335 int tot_retrans = 0; 3336 uint32_t sending_seq; 3337 struct sctp_nets *net; 3338 int num_dests_sacked = 0; 3339 3340 /* 3341 * select the sending_seq, this is either the next thing ready to be 3342 * sent but not transmitted, OR, the next seq we assign. 3343 */ 3344 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3345 if (tp1 == NULL) { 3346 sending_seq = asoc->sending_seq; 3347 } else { 3348 sending_seq = tp1->rec.data.tsn; 3349 } 3350 3351 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3352 if ((asoc->sctp_cmt_on_off > 0) && 3353 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3354 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3355 if (net->saw_newack) 3356 num_dests_sacked++; 3357 } 3358 } 3359 if (stcb->asoc.prsctp_supported) { 3360 (void)SCTP_GETTIME_TIMEVAL(&now); 3361 } 3362 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3363 strike_flag = 0; 3364 if (tp1->no_fr_allowed) { 3365 /* this one had a timeout or something */ 3366 continue; 3367 } 3368 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3369 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3370 sctp_log_fr(biggest_tsn_newly_acked, 3371 tp1->rec.data.tsn, 3372 tp1->sent, 3373 SCTP_FR_LOG_CHECK_STRIKE); 3374 } 3375 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3376 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3377 /* done */ 3378 break; 3379 } 3380 if (stcb->asoc.prsctp_supported) { 3381 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3382 /* Is it expired? */ 3383 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3384 /* Yes so drop it */ 3385 if (tp1->data != NULL) { 3386 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3387 SCTP_SO_NOT_LOCKED); 3388 } 3389 continue; 3390 } 3391 } 3392 3393 } 3394 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3395 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3396 /* we are beyond the tsn in the sack */ 3397 break; 3398 } 3399 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3400 /* either a RESEND, ACKED, or MARKED */ 3401 /* skip */ 3402 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3403 /* Continue strikin FWD-TSN chunks */ 3404 tp1->rec.data.fwd_tsn_cnt++; 3405 } 3406 continue; 3407 } 3408 /* 3409 * CMT : SFR algo (covers part of DAC and HTNA as well) 3410 */ 3411 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3412 /* 3413 * No new acks were receieved for data sent to this 3414 * dest. Therefore, according to the SFR algo for 3415 * CMT, no data sent to this dest can be marked for 3416 * FR using this SACK. 3417 */ 3418 continue; 3419 } else if (tp1->whoTo && 3420 SCTP_TSN_GT(tp1->rec.data.tsn, 3421 tp1->whoTo->this_sack_highest_newack) && 3422 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3423 /* 3424 * CMT: New acks were receieved for data sent to 3425 * this dest. But no new acks were seen for data 3426 * sent after tp1. Therefore, according to the SFR 3427 * algo for CMT, tp1 cannot be marked for FR using 3428 * this SACK. This step covers part of the DAC algo 3429 * and the HTNA algo as well. 3430 */ 3431 continue; 3432 } 3433 /* 3434 * Here we check to see if we were have already done a FR 3435 * and if so we see if the biggest TSN we saw in the sack is 3436 * smaller than the recovery point. If so we don't strike 3437 * the tsn... otherwise we CAN strike the TSN. 3438 */ 3439 /* 3440 * @@@ JRI: Check for CMT if (accum_moved && 3441 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3442 * 0)) { 3443 */ 3444 if (accum_moved && asoc->fast_retran_loss_recovery) { 3445 /* 3446 * Strike the TSN if in fast-recovery and cum-ack 3447 * moved. 3448 */ 3449 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3450 sctp_log_fr(biggest_tsn_newly_acked, 3451 tp1->rec.data.tsn, 3452 tp1->sent, 3453 SCTP_FR_LOG_STRIKE_CHUNK); 3454 } 3455 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3456 tp1->sent++; 3457 } 3458 if ((asoc->sctp_cmt_on_off > 0) && 3459 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3460 /* 3461 * CMT DAC algorithm: If SACK flag is set to 3462 * 0, then lowest_newack test will not pass 3463 * because it would have been set to the 3464 * cumack earlier. If not already to be 3465 * rtx'd, If not a mixed sack and if tp1 is 3466 * not between two sacked TSNs, then mark by 3467 * one more. NOTE that we are marking by one 3468 * additional time since the SACK DAC flag 3469 * indicates that two packets have been 3470 * received after this missing TSN. 3471 */ 3472 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3473 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3475 sctp_log_fr(16 + num_dests_sacked, 3476 tp1->rec.data.tsn, 3477 tp1->sent, 3478 SCTP_FR_LOG_STRIKE_CHUNK); 3479 } 3480 tp1->sent++; 3481 } 3482 } 3483 } else if ((tp1->rec.data.doing_fast_retransmit) && 3484 (asoc->sctp_cmt_on_off == 0)) { 3485 /* 3486 * For those that have done a FR we must take 3487 * special consideration if we strike. I.e the 3488 * biggest_newly_acked must be higher than the 3489 * sending_seq at the time we did the FR. 3490 */ 3491 if ( 3492 #ifdef SCTP_FR_TO_ALTERNATE 3493 /* 3494 * If FR's go to new networks, then we must only do 3495 * this for singly homed asoc's. However if the FR's 3496 * go to the same network (Armando's work) then its 3497 * ok to FR multiple times. 3498 */ 3499 (asoc->numnets < 2) 3500 #else 3501 (1) 3502 #endif 3503 ) { 3504 3505 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3506 tp1->rec.data.fast_retran_tsn)) { 3507 /* 3508 * Strike the TSN, since this ack is 3509 * beyond where things were when we 3510 * did a FR. 3511 */ 3512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3513 sctp_log_fr(biggest_tsn_newly_acked, 3514 tp1->rec.data.tsn, 3515 tp1->sent, 3516 SCTP_FR_LOG_STRIKE_CHUNK); 3517 } 3518 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3519 tp1->sent++; 3520 } 3521 strike_flag = 1; 3522 if ((asoc->sctp_cmt_on_off > 0) && 3523 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3524 /* 3525 * CMT DAC algorithm: If 3526 * SACK flag is set to 0, 3527 * then lowest_newack test 3528 * will not pass because it 3529 * would have been set to 3530 * the cumack earlier. If 3531 * not already to be rtx'd, 3532 * If not a mixed sack and 3533 * if tp1 is not between two 3534 * sacked TSNs, then mark by 3535 * one more. NOTE that we 3536 * are marking by one 3537 * additional time since the 3538 * SACK DAC flag indicates 3539 * that two packets have 3540 * been received after this 3541 * missing TSN. 3542 */ 3543 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3544 (num_dests_sacked == 1) && 3545 SCTP_TSN_GT(this_sack_lowest_newack, 3546 tp1->rec.data.tsn)) { 3547 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3548 sctp_log_fr(32 + num_dests_sacked, 3549 tp1->rec.data.tsn, 3550 tp1->sent, 3551 SCTP_FR_LOG_STRIKE_CHUNK); 3552 } 3553 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3554 tp1->sent++; 3555 } 3556 } 3557 } 3558 } 3559 } 3560 /* 3561 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3562 * algo covers HTNA. 3563 */ 3564 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3565 biggest_tsn_newly_acked)) { 3566 /* 3567 * We don't strike these: This is the HTNA 3568 * algorithm i.e. we don't strike If our TSN is 3569 * larger than the Highest TSN Newly Acked. 3570 */ 3571 ; 3572 } else { 3573 /* Strike the TSN */ 3574 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3575 sctp_log_fr(biggest_tsn_newly_acked, 3576 tp1->rec.data.tsn, 3577 tp1->sent, 3578 SCTP_FR_LOG_STRIKE_CHUNK); 3579 } 3580 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3581 tp1->sent++; 3582 } 3583 if ((asoc->sctp_cmt_on_off > 0) && 3584 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3585 /* 3586 * CMT DAC algorithm: If SACK flag is set to 3587 * 0, then lowest_newack test will not pass 3588 * because it would have been set to the 3589 * cumack earlier. If not already to be 3590 * rtx'd, If not a mixed sack and if tp1 is 3591 * not between two sacked TSNs, then mark by 3592 * one more. NOTE that we are marking by one 3593 * additional time since the SACK DAC flag 3594 * indicates that two packets have been 3595 * received after this missing TSN. 3596 */ 3597 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3598 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3600 sctp_log_fr(48 + num_dests_sacked, 3601 tp1->rec.data.tsn, 3602 tp1->sent, 3603 SCTP_FR_LOG_STRIKE_CHUNK); 3604 } 3605 tp1->sent++; 3606 } 3607 } 3608 } 3609 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3610 struct sctp_nets *alt; 3611 3612 /* fix counts and things */ 3613 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3614 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3615 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3616 tp1->book_size, 3617 (uint32_t)(uintptr_t)tp1->whoTo, 3618 tp1->rec.data.tsn); 3619 } 3620 if (tp1->whoTo) { 3621 tp1->whoTo->net_ack++; 3622 sctp_flight_size_decrease(tp1); 3623 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3624 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3625 tp1); 3626 } 3627 } 3628 3629 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3630 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3631 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3632 } 3633 /* add back to the rwnd */ 3634 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3635 3636 /* remove from the total flight */ 3637 sctp_total_flight_decrease(stcb, tp1); 3638 3639 if ((stcb->asoc.prsctp_supported) && 3640 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3641 /* 3642 * Has it been retransmitted tv_sec times? - 3643 * we store the retran count there. 3644 */ 3645 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3646 /* Yes, so drop it */ 3647 if (tp1->data != NULL) { 3648 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3649 SCTP_SO_NOT_LOCKED); 3650 } 3651 /* Make sure to flag we had a FR */ 3652 if (tp1->whoTo != NULL) { 3653 tp1->whoTo->net_ack++; 3654 } 3655 continue; 3656 } 3657 } 3658 /* 3659 * SCTP_PRINTF("OK, we are now ready to FR this 3660 * guy\n"); 3661 */ 3662 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3663 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3664 0, SCTP_FR_MARKED); 3665 } 3666 if (strike_flag) { 3667 /* This is a subsequent FR */ 3668 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3669 } 3670 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3671 if (asoc->sctp_cmt_on_off > 0) { 3672 /* 3673 * CMT: Using RTX_SSTHRESH policy for CMT. 3674 * If CMT is being used, then pick dest with 3675 * largest ssthresh for any retransmission. 3676 */ 3677 tp1->no_fr_allowed = 1; 3678 alt = tp1->whoTo; 3679 /* sa_ignore NO_NULL_CHK */ 3680 if (asoc->sctp_cmt_pf > 0) { 3681 /* 3682 * JRS 5/18/07 - If CMT PF is on, 3683 * use the PF version of 3684 * find_alt_net() 3685 */ 3686 alt = sctp_find_alternate_net(stcb, alt, 2); 3687 } else { 3688 /* 3689 * JRS 5/18/07 - If only CMT is on, 3690 * use the CMT version of 3691 * find_alt_net() 3692 */ 3693 /* sa_ignore NO_NULL_CHK */ 3694 alt = sctp_find_alternate_net(stcb, alt, 1); 3695 } 3696 if (alt == NULL) { 3697 alt = tp1->whoTo; 3698 } 3699 /* 3700 * CUCv2: If a different dest is picked for 3701 * the retransmission, then new 3702 * (rtx-)pseudo_cumack needs to be tracked 3703 * for orig dest. Let CUCv2 track new (rtx-) 3704 * pseudo-cumack always. 3705 */ 3706 if (tp1->whoTo) { 3707 tp1->whoTo->find_pseudo_cumack = 1; 3708 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3709 } 3710 3711 } else { /* CMT is OFF */ 3712 3713 #ifdef SCTP_FR_TO_ALTERNATE 3714 /* Can we find an alternate? */ 3715 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3716 #else 3717 /* 3718 * default behavior is to NOT retransmit 3719 * FR's to an alternate. Armando Caro's 3720 * paper details why. 3721 */ 3722 alt = tp1->whoTo; 3723 #endif 3724 } 3725 3726 tp1->rec.data.doing_fast_retransmit = 1; 3727 tot_retrans++; 3728 /* mark the sending seq for possible subsequent FR's */ 3729 /* 3730 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3731 * (uint32_t)tpi->rec.data.tsn); 3732 */ 3733 if (TAILQ_EMPTY(&asoc->send_queue)) { 3734 /* 3735 * If the queue of send is empty then its 3736 * the next sequence number that will be 3737 * assigned so we subtract one from this to 3738 * get the one we last sent. 3739 */ 3740 tp1->rec.data.fast_retran_tsn = sending_seq; 3741 } else { 3742 /* 3743 * If there are chunks on the send queue 3744 * (unsent data that has made it from the 3745 * stream queues but not out the door, we 3746 * take the first one (which will have the 3747 * lowest TSN) and subtract one to get the 3748 * one we last sent. 3749 */ 3750 struct sctp_tmit_chunk *ttt; 3751 3752 ttt = TAILQ_FIRST(&asoc->send_queue); 3753 tp1->rec.data.fast_retran_tsn = 3754 ttt->rec.data.tsn; 3755 } 3756 3757 if (tp1->do_rtt) { 3758 /* 3759 * this guy had a RTO calculation pending on 3760 * it, cancel it 3761 */ 3762 if ((tp1->whoTo != NULL) && 3763 (tp1->whoTo->rto_needed == 0)) { 3764 tp1->whoTo->rto_needed = 1; 3765 } 3766 tp1->do_rtt = 0; 3767 } 3768 if (alt != tp1->whoTo) { 3769 /* yes, there is an alternate. */ 3770 sctp_free_remote_addr(tp1->whoTo); 3771 /* sa_ignore FREED_MEMORY */ 3772 tp1->whoTo = alt; 3773 atomic_add_int(&alt->ref_count, 1); 3774 } 3775 } 3776 } 3777 } 3778 3779 struct sctp_tmit_chunk * 3780 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3781 struct sctp_association *asoc) 3782 { 3783 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3784 struct timeval now; 3785 int now_filled = 0; 3786 3787 if (asoc->prsctp_supported == 0) { 3788 return (NULL); 3789 } 3790 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3791 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3792 tp1->sent != SCTP_DATAGRAM_RESEND && 3793 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3794 /* no chance to advance, out of here */ 3795 break; 3796 } 3797 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3798 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3799 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3800 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3801 asoc->advanced_peer_ack_point, 3802 tp1->rec.data.tsn, 0, 0); 3803 } 3804 } 3805 if (!PR_SCTP_ENABLED(tp1->flags)) { 3806 /* 3807 * We can't fwd-tsn past any that are reliable aka 3808 * retransmitted until the asoc fails. 3809 */ 3810 break; 3811 } 3812 if (!now_filled) { 3813 (void)SCTP_GETTIME_TIMEVAL(&now); 3814 now_filled = 1; 3815 } 3816 /* 3817 * now we got a chunk which is marked for another 3818 * retransmission to a PR-stream but has run out its chances 3819 * already maybe OR has been marked to skip now. Can we skip 3820 * it if its a resend? 3821 */ 3822 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3823 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3824 /* 3825 * Now is this one marked for resend and its time is 3826 * now up? 3827 */ 3828 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3829 /* Yes so drop it */ 3830 if (tp1->data) { 3831 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3832 1, SCTP_SO_NOT_LOCKED); 3833 } 3834 } else { 3835 /* 3836 * No, we are done when hit one for resend 3837 * whos time as not expired. 3838 */ 3839 break; 3840 } 3841 } 3842 /* 3843 * Ok now if this chunk is marked to drop it we can clean up 3844 * the chunk, advance our peer ack point and we can check 3845 * the next chunk. 3846 */ 3847 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3848 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3849 /* advance PeerAckPoint goes forward */ 3850 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3851 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3852 a_adv = tp1; 3853 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3854 /* No update but we do save the chk */ 3855 a_adv = tp1; 3856 } 3857 } else { 3858 /* 3859 * If it is still in RESEND we can advance no 3860 * further 3861 */ 3862 break; 3863 } 3864 } 3865 return (a_adv); 3866 } 3867 3868 static int 3869 sctp_fs_audit(struct sctp_association *asoc) 3870 { 3871 struct sctp_tmit_chunk *chk; 3872 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3873 int ret; 3874 #ifndef INVARIANTS 3875 int entry_flight, entry_cnt; 3876 #endif 3877 3878 ret = 0; 3879 #ifndef INVARIANTS 3880 entry_flight = asoc->total_flight; 3881 entry_cnt = asoc->total_flight_count; 3882 #endif 3883 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3884 return (0); 3885 3886 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3887 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3888 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3889 chk->rec.data.tsn, 3890 chk->send_size, 3891 chk->snd_count); 3892 inflight++; 3893 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3894 resend++; 3895 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3896 inbetween++; 3897 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3898 above++; 3899 } else { 3900 acked++; 3901 } 3902 } 3903 3904 if ((inflight > 0) || (inbetween > 0)) { 3905 #ifdef INVARIANTS 3906 panic("Flight size-express incorrect? \n"); 3907 #else 3908 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3909 entry_flight, entry_cnt); 3910 3911 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3912 inflight, inbetween, resend, above, acked); 3913 ret = 1; 3914 #endif 3915 } 3916 return (ret); 3917 } 3918 3919 3920 static void 3921 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3922 struct sctp_association *asoc, 3923 struct sctp_tmit_chunk *tp1) 3924 { 3925 tp1->window_probe = 0; 3926 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3927 /* TSN's skipped we do NOT move back. */ 3928 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3929 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3930 tp1->book_size, 3931 (uint32_t)(uintptr_t)tp1->whoTo, 3932 tp1->rec.data.tsn); 3933 return; 3934 } 3935 /* First setup this by shrinking flight */ 3936 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3937 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3938 tp1); 3939 } 3940 sctp_flight_size_decrease(tp1); 3941 sctp_total_flight_decrease(stcb, tp1); 3942 /* Now mark for resend */ 3943 tp1->sent = SCTP_DATAGRAM_RESEND; 3944 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3945 3946 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3947 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3948 tp1->whoTo->flight_size, 3949 tp1->book_size, 3950 (uint32_t)(uintptr_t)tp1->whoTo, 3951 tp1->rec.data.tsn); 3952 } 3953 } 3954 3955 void 3956 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3957 uint32_t rwnd, int *abort_now, int ecne_seen) 3958 { 3959 struct sctp_nets *net; 3960 struct sctp_association *asoc; 3961 struct sctp_tmit_chunk *tp1, *tp2; 3962 uint32_t old_rwnd; 3963 int win_probe_recovery = 0; 3964 int win_probe_recovered = 0; 3965 int j, done_once = 0; 3966 int rto_ok = 1; 3967 uint32_t send_s; 3968 3969 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3970 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3971 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3972 } 3973 SCTP_TCB_LOCK_ASSERT(stcb); 3974 #ifdef SCTP_ASOCLOG_OF_TSNS 3975 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3976 stcb->asoc.cumack_log_at++; 3977 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3978 stcb->asoc.cumack_log_at = 0; 3979 } 3980 #endif 3981 asoc = &stcb->asoc; 3982 old_rwnd = asoc->peers_rwnd; 3983 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3984 /* old ack */ 3985 return; 3986 } else if (asoc->last_acked_seq == cumack) { 3987 /* Window update sack */ 3988 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3989 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3990 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3991 /* SWS sender side engages */ 3992 asoc->peers_rwnd = 0; 3993 } 3994 if (asoc->peers_rwnd > old_rwnd) { 3995 goto again; 3996 } 3997 return; 3998 } 3999 4000 /* First setup for CC stuff */ 4001 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4002 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 4003 /* Drag along the window_tsn for cwr's */ 4004 net->cwr_window_tsn = cumack; 4005 } 4006 net->prev_cwnd = net->cwnd; 4007 net->net_ack = 0; 4008 net->net_ack2 = 0; 4009 4010 /* 4011 * CMT: Reset CUC and Fast recovery algo variables before 4012 * SACK processing 4013 */ 4014 net->new_pseudo_cumack = 0; 4015 net->will_exit_fast_recovery = 0; 4016 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4017 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4018 } 4019 } 4020 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4021 tp1 = TAILQ_LAST(&asoc->sent_queue, 4022 sctpchunk_listhead); 4023 send_s = tp1->rec.data.tsn + 1; 4024 } else { 4025 send_s = asoc->sending_seq; 4026 } 4027 if (SCTP_TSN_GE(cumack, send_s)) { 4028 struct mbuf *op_err; 4029 char msg[SCTP_DIAG_INFO_LEN]; 4030 4031 *abort_now = 1; 4032 /* XXX */ 4033 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4034 cumack, send_s); 4035 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4036 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 4037 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4038 return; 4039 } 4040 asoc->this_sack_highest_gap = cumack; 4041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4042 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4043 stcb->asoc.overall_error_count, 4044 0, 4045 SCTP_FROM_SCTP_INDATA, 4046 __LINE__); 4047 } 4048 stcb->asoc.overall_error_count = 0; 4049 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4050 /* process the new consecutive TSN first */ 4051 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4052 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4053 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4054 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4055 } 4056 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4057 /* 4058 * If it is less than ACKED, it is 4059 * now no-longer in flight. Higher 4060 * values may occur during marking 4061 */ 4062 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4064 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4065 tp1->whoTo->flight_size, 4066 tp1->book_size, 4067 (uint32_t)(uintptr_t)tp1->whoTo, 4068 tp1->rec.data.tsn); 4069 } 4070 sctp_flight_size_decrease(tp1); 4071 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4072 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4073 tp1); 4074 } 4075 /* sa_ignore NO_NULL_CHK */ 4076 sctp_total_flight_decrease(stcb, tp1); 4077 } 4078 tp1->whoTo->net_ack += tp1->send_size; 4079 if (tp1->snd_count < 2) { 4080 /* 4081 * True non-retransmitted 4082 * chunk 4083 */ 4084 tp1->whoTo->net_ack2 += 4085 tp1->send_size; 4086 4087 /* update RTO too? */ 4088 if (tp1->do_rtt) { 4089 if (rto_ok) { 4090 tp1->whoTo->RTO = 4091 /* 4092 * sa_ignore 4093 * NO_NULL_CHK 4094 */ 4095 sctp_calculate_rto(stcb, 4096 asoc, tp1->whoTo, 4097 &tp1->sent_rcv_time, 4098 SCTP_RTT_FROM_DATA); 4099 rto_ok = 0; 4100 } 4101 if (tp1->whoTo->rto_needed == 0) { 4102 tp1->whoTo->rto_needed = 1; 4103 } 4104 tp1->do_rtt = 0; 4105 } 4106 } 4107 /* 4108 * CMT: CUCv2 algorithm. From the 4109 * cumack'd TSNs, for each TSN being 4110 * acked for the first time, set the 4111 * following variables for the 4112 * corresp destination. 4113 * new_pseudo_cumack will trigger a 4114 * cwnd update. 4115 * find_(rtx_)pseudo_cumack will 4116 * trigger search for the next 4117 * expected (rtx-)pseudo-cumack. 4118 */ 4119 tp1->whoTo->new_pseudo_cumack = 1; 4120 tp1->whoTo->find_pseudo_cumack = 1; 4121 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4122 4123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4124 /* sa_ignore NO_NULL_CHK */ 4125 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4126 } 4127 } 4128 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4129 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4130 } 4131 if (tp1->rec.data.chunk_was_revoked) { 4132 /* deflate the cwnd */ 4133 tp1->whoTo->cwnd -= tp1->book_size; 4134 tp1->rec.data.chunk_was_revoked = 0; 4135 } 4136 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4137 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4138 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4139 #ifdef INVARIANTS 4140 } else { 4141 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4142 #endif 4143 } 4144 } 4145 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4146 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4147 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4148 asoc->trigger_reset = 1; 4149 } 4150 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4151 if (tp1->data) { 4152 /* sa_ignore NO_NULL_CHK */ 4153 sctp_free_bufspace(stcb, asoc, tp1, 1); 4154 sctp_m_freem(tp1->data); 4155 tp1->data = NULL; 4156 } 4157 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4158 sctp_log_sack(asoc->last_acked_seq, 4159 cumack, 4160 tp1->rec.data.tsn, 4161 0, 4162 0, 4163 SCTP_LOG_FREE_SENT); 4164 } 4165 asoc->sent_queue_cnt--; 4166 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4167 } else { 4168 break; 4169 } 4170 } 4171 4172 } 4173 /* sa_ignore NO_NULL_CHK */ 4174 if (stcb->sctp_socket) { 4175 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4176 struct socket *so; 4177 4178 #endif 4179 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4180 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4181 /* sa_ignore NO_NULL_CHK */ 4182 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4183 } 4184 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4185 so = SCTP_INP_SO(stcb->sctp_ep); 4186 atomic_add_int(&stcb->asoc.refcnt, 1); 4187 SCTP_TCB_UNLOCK(stcb); 4188 SCTP_SOCKET_LOCK(so, 1); 4189 SCTP_TCB_LOCK(stcb); 4190 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4191 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4192 /* assoc was freed while we were unlocked */ 4193 SCTP_SOCKET_UNLOCK(so, 1); 4194 return; 4195 } 4196 #endif 4197 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4198 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4199 SCTP_SOCKET_UNLOCK(so, 1); 4200 #endif 4201 } else { 4202 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4203 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4204 } 4205 } 4206 4207 /* JRS - Use the congestion control given in the CC module */ 4208 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4209 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4210 if (net->net_ack2 > 0) { 4211 /* 4212 * Karn's rule applies to clearing error 4213 * count, this is optional. 4214 */ 4215 net->error_count = 0; 4216 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4217 /* addr came good */ 4218 net->dest_state |= SCTP_ADDR_REACHABLE; 4219 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4220 0, (void *)net, SCTP_SO_NOT_LOCKED); 4221 } 4222 if (net == stcb->asoc.primary_destination) { 4223 if (stcb->asoc.alternate) { 4224 /* 4225 * release the alternate, 4226 * primary is good 4227 */ 4228 sctp_free_remote_addr(stcb->asoc.alternate); 4229 stcb->asoc.alternate = NULL; 4230 } 4231 } 4232 if (net->dest_state & SCTP_ADDR_PF) { 4233 net->dest_state &= ~SCTP_ADDR_PF; 4234 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4235 stcb->sctp_ep, stcb, net, 4236 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4237 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4238 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4239 /* Done with this net */ 4240 net->net_ack = 0; 4241 } 4242 /* restore any doubled timers */ 4243 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4244 if (net->RTO < stcb->asoc.minrto) { 4245 net->RTO = stcb->asoc.minrto; 4246 } 4247 if (net->RTO > stcb->asoc.maxrto) { 4248 net->RTO = stcb->asoc.maxrto; 4249 } 4250 } 4251 } 4252 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4253 } 4254 asoc->last_acked_seq = cumack; 4255 4256 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4257 /* nothing left in-flight */ 4258 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4259 net->flight_size = 0; 4260 net->partial_bytes_acked = 0; 4261 } 4262 asoc->total_flight = 0; 4263 asoc->total_flight_count = 0; 4264 } 4265 4266 /* RWND update */ 4267 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4268 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4269 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4270 /* SWS sender side engages */ 4271 asoc->peers_rwnd = 0; 4272 } 4273 if (asoc->peers_rwnd > old_rwnd) { 4274 win_probe_recovery = 1; 4275 } 4276 /* Now assure a timer where data is queued at */ 4277 again: 4278 j = 0; 4279 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4280 if (win_probe_recovery && (net->window_probe)) { 4281 win_probe_recovered = 1; 4282 /* 4283 * Find first chunk that was used with window probe 4284 * and clear the sent 4285 */ 4286 /* sa_ignore FREED_MEMORY */ 4287 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4288 if (tp1->window_probe) { 4289 /* move back to data send queue */ 4290 sctp_window_probe_recovery(stcb, asoc, tp1); 4291 break; 4292 } 4293 } 4294 } 4295 if (net->flight_size) { 4296 j++; 4297 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4298 if (net->window_probe) { 4299 net->window_probe = 0; 4300 } 4301 } else { 4302 if (net->window_probe) { 4303 /* 4304 * In window probes we must assure a timer 4305 * is still running there 4306 */ 4307 net->window_probe = 0; 4308 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4309 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4310 } 4311 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4312 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4313 stcb, net, 4314 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4315 } 4316 } 4317 } 4318 if ((j == 0) && 4319 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4320 (asoc->sent_queue_retran_cnt == 0) && 4321 (win_probe_recovered == 0) && 4322 (done_once == 0)) { 4323 /* 4324 * huh, this should not happen unless all packets are 4325 * PR-SCTP and marked to skip of course. 4326 */ 4327 if (sctp_fs_audit(asoc)) { 4328 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4329 net->flight_size = 0; 4330 } 4331 asoc->total_flight = 0; 4332 asoc->total_flight_count = 0; 4333 asoc->sent_queue_retran_cnt = 0; 4334 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4335 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4336 sctp_flight_size_increase(tp1); 4337 sctp_total_flight_increase(stcb, tp1); 4338 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4339 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4340 } 4341 } 4342 } 4343 done_once = 1; 4344 goto again; 4345 } 4346 /**********************************/ 4347 /* Now what about shutdown issues */ 4348 /**********************************/ 4349 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4350 /* nothing left on sendqueue.. consider done */ 4351 /* clean up */ 4352 if ((asoc->stream_queue_cnt == 1) && 4353 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4354 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4355 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4356 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4357 } 4358 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4359 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4360 (asoc->stream_queue_cnt == 1) && 4361 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4362 struct mbuf *op_err; 4363 4364 *abort_now = 1; 4365 /* XXX */ 4366 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4367 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4368 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4369 return; 4370 } 4371 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4372 (asoc->stream_queue_cnt == 0)) { 4373 struct sctp_nets *netp; 4374 4375 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4376 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4377 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4378 } 4379 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4380 sctp_stop_timers_for_shutdown(stcb); 4381 if (asoc->alternate) { 4382 netp = asoc->alternate; 4383 } else { 4384 netp = asoc->primary_destination; 4385 } 4386 sctp_send_shutdown(stcb, netp); 4387 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4388 stcb->sctp_ep, stcb, netp); 4389 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4390 stcb->sctp_ep, stcb, netp); 4391 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4392 (asoc->stream_queue_cnt == 0)) { 4393 struct sctp_nets *netp; 4394 4395 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4396 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 4397 sctp_stop_timers_for_shutdown(stcb); 4398 if (asoc->alternate) { 4399 netp = asoc->alternate; 4400 } else { 4401 netp = asoc->primary_destination; 4402 } 4403 sctp_send_shutdown_ack(stcb, netp); 4404 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4405 stcb->sctp_ep, stcb, netp); 4406 } 4407 } 4408 /*********************************************/ 4409 /* Here we perform PR-SCTP procedures */ 4410 /* (section 4.2) */ 4411 /*********************************************/ 4412 /* C1. update advancedPeerAckPoint */ 4413 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4414 asoc->advanced_peer_ack_point = cumack; 4415 } 4416 /* PR-Sctp issues need to be addressed too */ 4417 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4418 struct sctp_tmit_chunk *lchk; 4419 uint32_t old_adv_peer_ack_point; 4420 4421 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4422 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4423 /* C3. See if we need to send a Fwd-TSN */ 4424 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4425 /* 4426 * ISSUE with ECN, see FWD-TSN processing. 4427 */ 4428 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4429 send_forward_tsn(stcb, asoc); 4430 } else if (lchk) { 4431 /* try to FR fwd-tsn's that get lost too */ 4432 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4433 send_forward_tsn(stcb, asoc); 4434 } 4435 } 4436 } 4437 if (lchk) { 4438 /* Assure a timer is up */ 4439 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4440 stcb->sctp_ep, stcb, lchk->whoTo); 4441 } 4442 } 4443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4444 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4445 rwnd, 4446 stcb->asoc.peers_rwnd, 4447 stcb->asoc.total_flight, 4448 stcb->asoc.total_output_queue_size); 4449 } 4450 } 4451 4452 void 4453 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4454 struct sctp_tcb *stcb, 4455 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4456 int *abort_now, uint8_t flags, 4457 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4458 { 4459 struct sctp_association *asoc; 4460 struct sctp_tmit_chunk *tp1, *tp2; 4461 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4462 uint16_t wake_him = 0; 4463 uint32_t send_s = 0; 4464 long j; 4465 int accum_moved = 0; 4466 int will_exit_fast_recovery = 0; 4467 uint32_t a_rwnd, old_rwnd; 4468 int win_probe_recovery = 0; 4469 int win_probe_recovered = 0; 4470 struct sctp_nets *net = NULL; 4471 int done_once; 4472 int rto_ok = 1; 4473 uint8_t reneged_all = 0; 4474 uint8_t cmt_dac_flag; 4475 4476 /* 4477 * we take any chance we can to service our queues since we cannot 4478 * get awoken when the socket is read from :< 4479 */ 4480 /* 4481 * Now perform the actual SACK handling: 1) Verify that it is not an 4482 * old sack, if so discard. 2) If there is nothing left in the send 4483 * queue (cum-ack is equal to last acked) then you have a duplicate 4484 * too, update any rwnd change and verify no timers are running. 4485 * then return. 3) Process any new consequtive data i.e. cum-ack 4486 * moved process these first and note that it moved. 4) Process any 4487 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4488 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4489 * sync up flightsizes and things, stop all timers and also check 4490 * for shutdown_pending state. If so then go ahead and send off the 4491 * shutdown. If in shutdown recv, send off the shutdown-ack and 4492 * start that timer, Ret. 9) Strike any non-acked things and do FR 4493 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4494 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4495 * if in shutdown_recv state. 4496 */ 4497 SCTP_TCB_LOCK_ASSERT(stcb); 4498 /* CMT DAC algo */ 4499 this_sack_lowest_newack = 0; 4500 SCTP_STAT_INCR(sctps_slowpath_sack); 4501 last_tsn = cum_ack; 4502 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4503 #ifdef SCTP_ASOCLOG_OF_TSNS 4504 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4505 stcb->asoc.cumack_log_at++; 4506 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4507 stcb->asoc.cumack_log_at = 0; 4508 } 4509 #endif 4510 a_rwnd = rwnd; 4511 4512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4513 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4514 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4515 } 4516 4517 old_rwnd = stcb->asoc.peers_rwnd; 4518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4519 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4520 stcb->asoc.overall_error_count, 4521 0, 4522 SCTP_FROM_SCTP_INDATA, 4523 __LINE__); 4524 } 4525 stcb->asoc.overall_error_count = 0; 4526 asoc = &stcb->asoc; 4527 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4528 sctp_log_sack(asoc->last_acked_seq, 4529 cum_ack, 4530 0, 4531 num_seg, 4532 num_dup, 4533 SCTP_LOG_NEW_SACK); 4534 } 4535 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4536 uint16_t i; 4537 uint32_t *dupdata, dblock; 4538 4539 for (i = 0; i < num_dup; i++) { 4540 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4541 sizeof(uint32_t), (uint8_t *)&dblock); 4542 if (dupdata == NULL) { 4543 break; 4544 } 4545 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4546 } 4547 } 4548 /* reality check */ 4549 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4550 tp1 = TAILQ_LAST(&asoc->sent_queue, 4551 sctpchunk_listhead); 4552 send_s = tp1->rec.data.tsn + 1; 4553 } else { 4554 tp1 = NULL; 4555 send_s = asoc->sending_seq; 4556 } 4557 if (SCTP_TSN_GE(cum_ack, send_s)) { 4558 struct mbuf *op_err; 4559 char msg[SCTP_DIAG_INFO_LEN]; 4560 4561 /* 4562 * no way, we have not even sent this TSN out yet. Peer is 4563 * hopelessly messed up with us. 4564 */ 4565 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4566 cum_ack, send_s); 4567 if (tp1) { 4568 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4569 tp1->rec.data.tsn, (void *)tp1); 4570 } 4571 hopeless_peer: 4572 *abort_now = 1; 4573 /* XXX */ 4574 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4575 cum_ack, send_s); 4576 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4577 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4578 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4579 return; 4580 } 4581 /**********************/ 4582 /* 1) check the range */ 4583 /**********************/ 4584 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4585 /* acking something behind */ 4586 return; 4587 } 4588 4589 /* update the Rwnd of the peer */ 4590 if (TAILQ_EMPTY(&asoc->sent_queue) && 4591 TAILQ_EMPTY(&asoc->send_queue) && 4592 (asoc->stream_queue_cnt == 0)) { 4593 /* nothing left on send/sent and strmq */ 4594 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4595 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4596 asoc->peers_rwnd, 0, 0, a_rwnd); 4597 } 4598 asoc->peers_rwnd = a_rwnd; 4599 if (asoc->sent_queue_retran_cnt) { 4600 asoc->sent_queue_retran_cnt = 0; 4601 } 4602 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4603 /* SWS sender side engages */ 4604 asoc->peers_rwnd = 0; 4605 } 4606 /* stop any timers */ 4607 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4608 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4609 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4610 net->partial_bytes_acked = 0; 4611 net->flight_size = 0; 4612 } 4613 asoc->total_flight = 0; 4614 asoc->total_flight_count = 0; 4615 return; 4616 } 4617 /* 4618 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4619 * things. The total byte count acked is tracked in netAckSz AND 4620 * netAck2 is used to track the total bytes acked that are un- 4621 * amibguious and were never retransmitted. We track these on a per 4622 * destination address basis. 4623 */ 4624 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4625 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4626 /* Drag along the window_tsn for cwr's */ 4627 net->cwr_window_tsn = cum_ack; 4628 } 4629 net->prev_cwnd = net->cwnd; 4630 net->net_ack = 0; 4631 net->net_ack2 = 0; 4632 4633 /* 4634 * CMT: Reset CUC and Fast recovery algo variables before 4635 * SACK processing 4636 */ 4637 net->new_pseudo_cumack = 0; 4638 net->will_exit_fast_recovery = 0; 4639 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4640 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4641 } 4642 4643 /* 4644 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4645 * to be greater than the cumack. Also reset saw_newack to 0 4646 * for all dests. 4647 */ 4648 net->saw_newack = 0; 4649 net->this_sack_highest_newack = last_tsn; 4650 } 4651 /* process the new consecutive TSN first */ 4652 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4653 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4654 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4655 accum_moved = 1; 4656 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4657 /* 4658 * If it is less than ACKED, it is 4659 * now no-longer in flight. Higher 4660 * values may occur during marking 4661 */ 4662 if ((tp1->whoTo->dest_state & 4663 SCTP_ADDR_UNCONFIRMED) && 4664 (tp1->snd_count < 2)) { 4665 /* 4666 * If there was no retran 4667 * and the address is 4668 * un-confirmed and we sent 4669 * there and are now 4670 * sacked.. its confirmed, 4671 * mark it so. 4672 */ 4673 tp1->whoTo->dest_state &= 4674 ~SCTP_ADDR_UNCONFIRMED; 4675 } 4676 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4677 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4678 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4679 tp1->whoTo->flight_size, 4680 tp1->book_size, 4681 (uint32_t)(uintptr_t)tp1->whoTo, 4682 tp1->rec.data.tsn); 4683 } 4684 sctp_flight_size_decrease(tp1); 4685 sctp_total_flight_decrease(stcb, tp1); 4686 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4687 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4688 tp1); 4689 } 4690 } 4691 tp1->whoTo->net_ack += tp1->send_size; 4692 4693 /* CMT SFR and DAC algos */ 4694 this_sack_lowest_newack = tp1->rec.data.tsn; 4695 tp1->whoTo->saw_newack = 1; 4696 4697 if (tp1->snd_count < 2) { 4698 /* 4699 * True non-retransmitted 4700 * chunk 4701 */ 4702 tp1->whoTo->net_ack2 += 4703 tp1->send_size; 4704 4705 /* update RTO too? */ 4706 if (tp1->do_rtt) { 4707 if (rto_ok) { 4708 tp1->whoTo->RTO = 4709 sctp_calculate_rto(stcb, 4710 asoc, tp1->whoTo, 4711 &tp1->sent_rcv_time, 4712 SCTP_RTT_FROM_DATA); 4713 rto_ok = 0; 4714 } 4715 if (tp1->whoTo->rto_needed == 0) { 4716 tp1->whoTo->rto_needed = 1; 4717 } 4718 tp1->do_rtt = 0; 4719 } 4720 } 4721 /* 4722 * CMT: CUCv2 algorithm. From the 4723 * cumack'd TSNs, for each TSN being 4724 * acked for the first time, set the 4725 * following variables for the 4726 * corresp destination. 4727 * new_pseudo_cumack will trigger a 4728 * cwnd update. 4729 * find_(rtx_)pseudo_cumack will 4730 * trigger search for the next 4731 * expected (rtx-)pseudo-cumack. 4732 */ 4733 tp1->whoTo->new_pseudo_cumack = 1; 4734 tp1->whoTo->find_pseudo_cumack = 1; 4735 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4736 4737 4738 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4739 sctp_log_sack(asoc->last_acked_seq, 4740 cum_ack, 4741 tp1->rec.data.tsn, 4742 0, 4743 0, 4744 SCTP_LOG_TSN_ACKED); 4745 } 4746 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4747 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4748 } 4749 } 4750 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4751 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4752 #ifdef SCTP_AUDITING_ENABLED 4753 sctp_audit_log(0xB3, 4754 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4755 #endif 4756 } 4757 if (tp1->rec.data.chunk_was_revoked) { 4758 /* deflate the cwnd */ 4759 tp1->whoTo->cwnd -= tp1->book_size; 4760 tp1->rec.data.chunk_was_revoked = 0; 4761 } 4762 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4763 tp1->sent = SCTP_DATAGRAM_ACKED; 4764 } 4765 } 4766 } else { 4767 break; 4768 } 4769 } 4770 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4771 /* always set this up to cum-ack */ 4772 asoc->this_sack_highest_gap = last_tsn; 4773 4774 if ((num_seg > 0) || (num_nr_seg > 0)) { 4775 4776 /* 4777 * thisSackHighestGap will increase while handling NEW 4778 * segments this_sack_highest_newack will increase while 4779 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4780 * used for CMT DAC algo. saw_newack will also change. 4781 */ 4782 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4783 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4784 num_seg, num_nr_seg, &rto_ok)) { 4785 wake_him++; 4786 } 4787 /* 4788 * validate the biggest_tsn_acked in the gap acks if strict 4789 * adherence is wanted. 4790 */ 4791 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4792 /* 4793 * peer is either confused or we are under attack. 4794 * We must abort. 4795 */ 4796 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4797 biggest_tsn_acked, send_s); 4798 goto hopeless_peer; 4799 } 4800 } 4801 /*******************************************/ 4802 /* cancel ALL T3-send timer if accum moved */ 4803 /*******************************************/ 4804 if (asoc->sctp_cmt_on_off > 0) { 4805 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4806 if (net->new_pseudo_cumack) 4807 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4808 stcb, net, 4809 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4810 4811 } 4812 } else { 4813 if (accum_moved) { 4814 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4815 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4816 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4817 } 4818 } 4819 } 4820 /********************************************/ 4821 /* drop the acked chunks from the sentqueue */ 4822 /********************************************/ 4823 asoc->last_acked_seq = cum_ack; 4824 4825 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4826 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4827 break; 4828 } 4829 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4830 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4831 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4832 #ifdef INVARIANTS 4833 } else { 4834 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4835 #endif 4836 } 4837 } 4838 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4839 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4840 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4841 asoc->trigger_reset = 1; 4842 } 4843 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4844 if (PR_SCTP_ENABLED(tp1->flags)) { 4845 if (asoc->pr_sctp_cnt != 0) 4846 asoc->pr_sctp_cnt--; 4847 } 4848 asoc->sent_queue_cnt--; 4849 if (tp1->data) { 4850 /* sa_ignore NO_NULL_CHK */ 4851 sctp_free_bufspace(stcb, asoc, tp1, 1); 4852 sctp_m_freem(tp1->data); 4853 tp1->data = NULL; 4854 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4855 asoc->sent_queue_cnt_removeable--; 4856 } 4857 } 4858 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4859 sctp_log_sack(asoc->last_acked_seq, 4860 cum_ack, 4861 tp1->rec.data.tsn, 4862 0, 4863 0, 4864 SCTP_LOG_FREE_SENT); 4865 } 4866 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4867 wake_him++; 4868 } 4869 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4870 #ifdef INVARIANTS 4871 panic("Warning flight size is positive and should be 0"); 4872 #else 4873 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4874 asoc->total_flight); 4875 #endif 4876 asoc->total_flight = 0; 4877 } 4878 4879 /* sa_ignore NO_NULL_CHK */ 4880 if ((wake_him) && (stcb->sctp_socket)) { 4881 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4882 struct socket *so; 4883 4884 #endif 4885 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4887 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4888 } 4889 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4890 so = SCTP_INP_SO(stcb->sctp_ep); 4891 atomic_add_int(&stcb->asoc.refcnt, 1); 4892 SCTP_TCB_UNLOCK(stcb); 4893 SCTP_SOCKET_LOCK(so, 1); 4894 SCTP_TCB_LOCK(stcb); 4895 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4896 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4897 /* assoc was freed while we were unlocked */ 4898 SCTP_SOCKET_UNLOCK(so, 1); 4899 return; 4900 } 4901 #endif 4902 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4903 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4904 SCTP_SOCKET_UNLOCK(so, 1); 4905 #endif 4906 } else { 4907 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4908 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4909 } 4910 } 4911 4912 if (asoc->fast_retran_loss_recovery && accum_moved) { 4913 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4914 /* Setup so we will exit RFC2582 fast recovery */ 4915 will_exit_fast_recovery = 1; 4916 } 4917 } 4918 /* 4919 * Check for revoked fragments: 4920 * 4921 * if Previous sack - Had no frags then we can't have any revoked if 4922 * Previous sack - Had frag's then - If we now have frags aka 4923 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4924 * some of them. else - The peer revoked all ACKED fragments, since 4925 * we had some before and now we have NONE. 4926 */ 4927 4928 if (num_seg) { 4929 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4930 asoc->saw_sack_with_frags = 1; 4931 } else if (asoc->saw_sack_with_frags) { 4932 int cnt_revoked = 0; 4933 4934 /* Peer revoked all dg's marked or acked */ 4935 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4936 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4937 tp1->sent = SCTP_DATAGRAM_SENT; 4938 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4939 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4940 tp1->whoTo->flight_size, 4941 tp1->book_size, 4942 (uint32_t)(uintptr_t)tp1->whoTo, 4943 tp1->rec.data.tsn); 4944 } 4945 sctp_flight_size_increase(tp1); 4946 sctp_total_flight_increase(stcb, tp1); 4947 tp1->rec.data.chunk_was_revoked = 1; 4948 /* 4949 * To ensure that this increase in 4950 * flightsize, which is artificial, does not 4951 * throttle the sender, we also increase the 4952 * cwnd artificially. 4953 */ 4954 tp1->whoTo->cwnd += tp1->book_size; 4955 cnt_revoked++; 4956 } 4957 } 4958 if (cnt_revoked) { 4959 reneged_all = 1; 4960 } 4961 asoc->saw_sack_with_frags = 0; 4962 } 4963 if (num_nr_seg > 0) 4964 asoc->saw_sack_with_nr_frags = 1; 4965 else 4966 asoc->saw_sack_with_nr_frags = 0; 4967 4968 /* JRS - Use the congestion control given in the CC module */ 4969 if (ecne_seen == 0) { 4970 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4971 if (net->net_ack2 > 0) { 4972 /* 4973 * Karn's rule applies to clearing error 4974 * count, this is optional. 4975 */ 4976 net->error_count = 0; 4977 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4978 /* addr came good */ 4979 net->dest_state |= SCTP_ADDR_REACHABLE; 4980 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4981 0, (void *)net, SCTP_SO_NOT_LOCKED); 4982 } 4983 4984 if (net == stcb->asoc.primary_destination) { 4985 if (stcb->asoc.alternate) { 4986 /* 4987 * release the alternate, 4988 * primary is good 4989 */ 4990 sctp_free_remote_addr(stcb->asoc.alternate); 4991 stcb->asoc.alternate = NULL; 4992 } 4993 } 4994 4995 if (net->dest_state & SCTP_ADDR_PF) { 4996 net->dest_state &= ~SCTP_ADDR_PF; 4997 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4998 stcb->sctp_ep, stcb, net, 4999 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 5000 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 5001 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 5002 /* Done with this net */ 5003 net->net_ack = 0; 5004 } 5005 /* restore any doubled timers */ 5006 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 5007 if (net->RTO < stcb->asoc.minrto) { 5008 net->RTO = stcb->asoc.minrto; 5009 } 5010 if (net->RTO > stcb->asoc.maxrto) { 5011 net->RTO = stcb->asoc.maxrto; 5012 } 5013 } 5014 } 5015 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 5016 } 5017 5018 if (TAILQ_EMPTY(&asoc->sent_queue)) { 5019 /* nothing left in-flight */ 5020 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5021 /* stop all timers */ 5022 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5023 stcb, net, 5024 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 5025 net->flight_size = 0; 5026 net->partial_bytes_acked = 0; 5027 } 5028 asoc->total_flight = 0; 5029 asoc->total_flight_count = 0; 5030 } 5031 5032 /**********************************/ 5033 /* Now what about shutdown issues */ 5034 /**********************************/ 5035 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 5036 /* nothing left on sendqueue.. consider done */ 5037 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5038 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5039 asoc->peers_rwnd, 0, 0, a_rwnd); 5040 } 5041 asoc->peers_rwnd = a_rwnd; 5042 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5043 /* SWS sender side engages */ 5044 asoc->peers_rwnd = 0; 5045 } 5046 /* clean up */ 5047 if ((asoc->stream_queue_cnt == 1) && 5048 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5049 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 5050 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 5051 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 5052 } 5053 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5054 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 5055 (asoc->stream_queue_cnt == 1) && 5056 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 5057 struct mbuf *op_err; 5058 5059 *abort_now = 1; 5060 /* XXX */ 5061 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 5062 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 5063 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5064 return; 5065 } 5066 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5067 (asoc->stream_queue_cnt == 0)) { 5068 struct sctp_nets *netp; 5069 5070 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 5071 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5072 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5073 } 5074 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 5075 sctp_stop_timers_for_shutdown(stcb); 5076 if (asoc->alternate) { 5077 netp = asoc->alternate; 5078 } else { 5079 netp = asoc->primary_destination; 5080 } 5081 sctp_send_shutdown(stcb, netp); 5082 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5083 stcb->sctp_ep, stcb, netp); 5084 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5085 stcb->sctp_ep, stcb, netp); 5086 return; 5087 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5088 (asoc->stream_queue_cnt == 0)) { 5089 struct sctp_nets *netp; 5090 5091 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5092 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 5093 sctp_stop_timers_for_shutdown(stcb); 5094 if (asoc->alternate) { 5095 netp = asoc->alternate; 5096 } else { 5097 netp = asoc->primary_destination; 5098 } 5099 sctp_send_shutdown_ack(stcb, netp); 5100 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5101 stcb->sctp_ep, stcb, netp); 5102 return; 5103 } 5104 } 5105 /* 5106 * Now here we are going to recycle net_ack for a different use... 5107 * HEADS UP. 5108 */ 5109 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5110 net->net_ack = 0; 5111 } 5112 5113 /* 5114 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5115 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5116 * automatically ensure that. 5117 */ 5118 if ((asoc->sctp_cmt_on_off > 0) && 5119 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5120 (cmt_dac_flag == 0)) { 5121 this_sack_lowest_newack = cum_ack; 5122 } 5123 if ((num_seg > 0) || (num_nr_seg > 0)) { 5124 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5125 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5126 } 5127 /* JRS - Use the congestion control given in the CC module */ 5128 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5129 5130 /* Now are we exiting loss recovery ? */ 5131 if (will_exit_fast_recovery) { 5132 /* Ok, we must exit fast recovery */ 5133 asoc->fast_retran_loss_recovery = 0; 5134 } 5135 if ((asoc->sat_t3_loss_recovery) && 5136 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5137 /* end satellite t3 loss recovery */ 5138 asoc->sat_t3_loss_recovery = 0; 5139 } 5140 /* 5141 * CMT Fast recovery 5142 */ 5143 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5144 if (net->will_exit_fast_recovery) { 5145 /* Ok, we must exit fast recovery */ 5146 net->fast_retran_loss_recovery = 0; 5147 } 5148 } 5149 5150 /* Adjust and set the new rwnd value */ 5151 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5152 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5153 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5154 } 5155 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5156 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5157 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5158 /* SWS sender side engages */ 5159 asoc->peers_rwnd = 0; 5160 } 5161 if (asoc->peers_rwnd > old_rwnd) { 5162 win_probe_recovery = 1; 5163 } 5164 5165 /* 5166 * Now we must setup so we have a timer up for anyone with 5167 * outstanding data. 5168 */ 5169 done_once = 0; 5170 again: 5171 j = 0; 5172 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5173 if (win_probe_recovery && (net->window_probe)) { 5174 win_probe_recovered = 1; 5175 /*- 5176 * Find first chunk that was used with 5177 * window probe and clear the event. Put 5178 * it back into the send queue as if has 5179 * not been sent. 5180 */ 5181 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5182 if (tp1->window_probe) { 5183 sctp_window_probe_recovery(stcb, asoc, tp1); 5184 break; 5185 } 5186 } 5187 } 5188 if (net->flight_size) { 5189 j++; 5190 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5191 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5192 stcb->sctp_ep, stcb, net); 5193 } 5194 if (net->window_probe) { 5195 net->window_probe = 0; 5196 } 5197 } else { 5198 if (net->window_probe) { 5199 /* 5200 * In window probes we must assure a timer 5201 * is still running there 5202 */ 5203 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5204 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5205 stcb->sctp_ep, stcb, net); 5206 5207 } 5208 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5209 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5210 stcb, net, 5211 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5212 } 5213 } 5214 } 5215 if ((j == 0) && 5216 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5217 (asoc->sent_queue_retran_cnt == 0) && 5218 (win_probe_recovered == 0) && 5219 (done_once == 0)) { 5220 /* 5221 * huh, this should not happen unless all packets are 5222 * PR-SCTP and marked to skip of course. 5223 */ 5224 if (sctp_fs_audit(asoc)) { 5225 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5226 net->flight_size = 0; 5227 } 5228 asoc->total_flight = 0; 5229 asoc->total_flight_count = 0; 5230 asoc->sent_queue_retran_cnt = 0; 5231 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5232 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5233 sctp_flight_size_increase(tp1); 5234 sctp_total_flight_increase(stcb, tp1); 5235 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5236 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5237 } 5238 } 5239 } 5240 done_once = 1; 5241 goto again; 5242 } 5243 /*********************************************/ 5244 /* Here we perform PR-SCTP procedures */ 5245 /* (section 4.2) */ 5246 /*********************************************/ 5247 /* C1. update advancedPeerAckPoint */ 5248 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5249 asoc->advanced_peer_ack_point = cum_ack; 5250 } 5251 /* C2. try to further move advancedPeerAckPoint ahead */ 5252 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5253 struct sctp_tmit_chunk *lchk; 5254 uint32_t old_adv_peer_ack_point; 5255 5256 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5257 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5258 /* C3. See if we need to send a Fwd-TSN */ 5259 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5260 /* 5261 * ISSUE with ECN, see FWD-TSN processing. 5262 */ 5263 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5264 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5265 0xee, cum_ack, asoc->advanced_peer_ack_point, 5266 old_adv_peer_ack_point); 5267 } 5268 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5269 send_forward_tsn(stcb, asoc); 5270 } else if (lchk) { 5271 /* try to FR fwd-tsn's that get lost too */ 5272 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5273 send_forward_tsn(stcb, asoc); 5274 } 5275 } 5276 } 5277 if (lchk) { 5278 /* Assure a timer is up */ 5279 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5280 stcb->sctp_ep, stcb, lchk->whoTo); 5281 } 5282 } 5283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5284 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5285 a_rwnd, 5286 stcb->asoc.peers_rwnd, 5287 stcb->asoc.total_flight, 5288 stcb->asoc.total_output_queue_size); 5289 } 5290 } 5291 5292 void 5293 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5294 { 5295 /* Copy cum-ack */ 5296 uint32_t cum_ack, a_rwnd; 5297 5298 cum_ack = ntohl(cp->cumulative_tsn_ack); 5299 /* Arrange so a_rwnd does NOT change */ 5300 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5301 5302 /* Now call the express sack handling */ 5303 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5304 } 5305 5306 static void 5307 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5308 struct sctp_stream_in *strmin) 5309 { 5310 struct sctp_queued_to_read *control, *ncontrol; 5311 struct sctp_association *asoc; 5312 uint32_t mid; 5313 int need_reasm_check = 0; 5314 5315 asoc = &stcb->asoc; 5316 mid = strmin->last_mid_delivered; 5317 /* 5318 * First deliver anything prior to and including the stream no that 5319 * came in. 5320 */ 5321 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5322 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5323 /* this is deliverable now */ 5324 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5325 if (control->on_strm_q) { 5326 if (control->on_strm_q == SCTP_ON_ORDERED) { 5327 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5328 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5329 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5330 #ifdef INVARIANTS 5331 } else { 5332 panic("strmin: %p ctl: %p unknown %d", 5333 strmin, control, control->on_strm_q); 5334 #endif 5335 } 5336 control->on_strm_q = 0; 5337 } 5338 /* subtract pending on streams */ 5339 if (asoc->size_on_all_streams >= control->length) { 5340 asoc->size_on_all_streams -= control->length; 5341 } else { 5342 #ifdef INVARIANTS 5343 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5344 #else 5345 asoc->size_on_all_streams = 0; 5346 #endif 5347 } 5348 sctp_ucount_decr(asoc->cnt_on_all_streams); 5349 /* deliver it to at least the delivery-q */ 5350 if (stcb->sctp_socket) { 5351 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5352 sctp_add_to_readq(stcb->sctp_ep, stcb, 5353 control, 5354 &stcb->sctp_socket->so_rcv, 5355 1, SCTP_READ_LOCK_HELD, 5356 SCTP_SO_NOT_LOCKED); 5357 } 5358 } else { 5359 /* Its a fragmented message */ 5360 if (control->first_frag_seen) { 5361 /* 5362 * Make it so this is next to 5363 * deliver, we restore later 5364 */ 5365 strmin->last_mid_delivered = control->mid - 1; 5366 need_reasm_check = 1; 5367 break; 5368 } 5369 } 5370 } else { 5371 /* no more delivery now. */ 5372 break; 5373 } 5374 } 5375 if (need_reasm_check) { 5376 int ret; 5377 5378 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5379 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5380 /* Restore the next to deliver unless we are ahead */ 5381 strmin->last_mid_delivered = mid; 5382 } 5383 if (ret == 0) { 5384 /* Left the front Partial one on */ 5385 return; 5386 } 5387 need_reasm_check = 0; 5388 } 5389 /* 5390 * now we must deliver things in queue the normal way if any are 5391 * now ready. 5392 */ 5393 mid = strmin->last_mid_delivered + 1; 5394 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5395 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5396 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5397 /* this is deliverable now */ 5398 if (control->on_strm_q) { 5399 if (control->on_strm_q == SCTP_ON_ORDERED) { 5400 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5401 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5402 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5403 #ifdef INVARIANTS 5404 } else { 5405 panic("strmin: %p ctl: %p unknown %d", 5406 strmin, control, control->on_strm_q); 5407 #endif 5408 } 5409 control->on_strm_q = 0; 5410 } 5411 /* subtract pending on streams */ 5412 if (asoc->size_on_all_streams >= control->length) { 5413 asoc->size_on_all_streams -= control->length; 5414 } else { 5415 #ifdef INVARIANTS 5416 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5417 #else 5418 asoc->size_on_all_streams = 0; 5419 #endif 5420 } 5421 sctp_ucount_decr(asoc->cnt_on_all_streams); 5422 /* deliver it to at least the delivery-q */ 5423 strmin->last_mid_delivered = control->mid; 5424 if (stcb->sctp_socket) { 5425 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5426 sctp_add_to_readq(stcb->sctp_ep, stcb, 5427 control, 5428 &stcb->sctp_socket->so_rcv, 1, 5429 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5430 5431 } 5432 mid = strmin->last_mid_delivered + 1; 5433 } else { 5434 /* Its a fragmented message */ 5435 if (control->first_frag_seen) { 5436 /* 5437 * Make it so this is next to 5438 * deliver 5439 */ 5440 strmin->last_mid_delivered = control->mid - 1; 5441 need_reasm_check = 1; 5442 break; 5443 } 5444 } 5445 } else { 5446 break; 5447 } 5448 } 5449 if (need_reasm_check) { 5450 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5451 } 5452 } 5453 5454 5455 5456 static void 5457 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5458 struct sctp_association *asoc, 5459 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn) 5460 { 5461 struct sctp_queued_to_read *control; 5462 struct sctp_stream_in *strm; 5463 struct sctp_tmit_chunk *chk, *nchk; 5464 int cnt_removed = 0; 5465 5466 /* 5467 * For now large messages held on the stream reasm that are complete 5468 * will be tossed too. We could in theory do more work to spin 5469 * through and stop after dumping one msg aka seeing the start of a 5470 * new msg at the head, and call the delivery function... to see if 5471 * it can be delivered... But for now we just dump everything on the 5472 * queue. 5473 */ 5474 strm = &asoc->strmin[stream]; 5475 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported); 5476 if (control == NULL) { 5477 /* Not found */ 5478 return; 5479 } 5480 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5481 return; 5482 } 5483 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5484 /* Purge hanging chunks */ 5485 if (!asoc->idata_supported && (ordered == 0)) { 5486 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5487 break; 5488 } 5489 } 5490 cnt_removed++; 5491 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5492 if (asoc->size_on_reasm_queue >= chk->send_size) { 5493 asoc->size_on_reasm_queue -= chk->send_size; 5494 } else { 5495 #ifdef INVARIANTS 5496 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5497 #else 5498 asoc->size_on_reasm_queue = 0; 5499 #endif 5500 } 5501 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5502 if (chk->data) { 5503 sctp_m_freem(chk->data); 5504 chk->data = NULL; 5505 } 5506 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5507 } 5508 if (!TAILQ_EMPTY(&control->reasm)) { 5509 /* This has to be old data, unordered */ 5510 if (control->data) { 5511 sctp_m_freem(control->data); 5512 control->data = NULL; 5513 } 5514 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5515 chk = TAILQ_FIRST(&control->reasm); 5516 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5517 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5518 sctp_add_chk_to_control(control, strm, stcb, asoc, 5519 chk, SCTP_READ_LOCK_HELD); 5520 } 5521 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5522 return; 5523 } 5524 if (control->on_strm_q == SCTP_ON_ORDERED) { 5525 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5526 if (asoc->size_on_all_streams >= control->length) { 5527 asoc->size_on_all_streams -= control->length; 5528 } else { 5529 #ifdef INVARIANTS 5530 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5531 #else 5532 asoc->size_on_all_streams = 0; 5533 #endif 5534 } 5535 sctp_ucount_decr(asoc->cnt_on_all_streams); 5536 control->on_strm_q = 0; 5537 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5538 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5539 control->on_strm_q = 0; 5540 #ifdef INVARIANTS 5541 } else if (control->on_strm_q) { 5542 panic("strm: %p ctl: %p unknown %d", 5543 strm, control, control->on_strm_q); 5544 #endif 5545 } 5546 control->on_strm_q = 0; 5547 if (control->on_read_q == 0) { 5548 sctp_free_remote_addr(control->whoFrom); 5549 if (control->data) { 5550 sctp_m_freem(control->data); 5551 control->data = NULL; 5552 } 5553 sctp_free_a_readq(stcb, control); 5554 } 5555 } 5556 5557 void 5558 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5559 struct sctp_forward_tsn_chunk *fwd, 5560 int *abort_flag, struct mbuf *m, int offset) 5561 { 5562 /* The pr-sctp fwd tsn */ 5563 /* 5564 * here we will perform all the data receiver side steps for 5565 * processing FwdTSN, as required in by pr-sctp draft: 5566 * 5567 * Assume we get FwdTSN(x): 5568 * 5569 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5570 * + others we have 3) examine and update re-ordering queue on 5571 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5572 * report where we are. 5573 */ 5574 struct sctp_association *asoc; 5575 uint32_t new_cum_tsn, gap; 5576 unsigned int i, fwd_sz, m_size; 5577 uint32_t str_seq; 5578 struct sctp_stream_in *strm; 5579 struct sctp_queued_to_read *control, *sv; 5580 5581 asoc = &stcb->asoc; 5582 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5583 SCTPDBG(SCTP_DEBUG_INDATA1, 5584 "Bad size too small/big fwd-tsn\n"); 5585 return; 5586 } 5587 m_size = (stcb->asoc.mapping_array_size << 3); 5588 /*************************************************************/ 5589 /* 1. Here we update local cumTSN and shift the bitmap array */ 5590 /*************************************************************/ 5591 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5592 5593 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5594 /* Already got there ... */ 5595 return; 5596 } 5597 /* 5598 * now we know the new TSN is more advanced, let's find the actual 5599 * gap 5600 */ 5601 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5602 asoc->cumulative_tsn = new_cum_tsn; 5603 if (gap >= m_size) { 5604 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5605 struct mbuf *op_err; 5606 char msg[SCTP_DIAG_INFO_LEN]; 5607 5608 /* 5609 * out of range (of single byte chunks in the rwnd I 5610 * give out). This must be an attacker. 5611 */ 5612 *abort_flag = 1; 5613 snprintf(msg, sizeof(msg), 5614 "New cum ack %8.8x too high, highest TSN %8.8x", 5615 new_cum_tsn, asoc->highest_tsn_inside_map); 5616 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5617 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5618 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5619 return; 5620 } 5621 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5622 5623 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5624 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5625 asoc->highest_tsn_inside_map = new_cum_tsn; 5626 5627 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5628 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5629 5630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5631 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5632 } 5633 } else { 5634 SCTP_TCB_LOCK_ASSERT(stcb); 5635 for (i = 0; i <= gap; i++) { 5636 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5637 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5638 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5639 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5640 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5641 } 5642 } 5643 } 5644 } 5645 /*************************************************************/ 5646 /* 2. Clear up re-assembly queue */ 5647 /*************************************************************/ 5648 5649 /* This is now done as part of clearing up the stream/seq */ 5650 if (asoc->idata_supported == 0) { 5651 uint16_t sid; 5652 5653 /* Flush all the un-ordered data based on cum-tsn */ 5654 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5655 for (sid = 0; sid < asoc->streamincnt; sid++) { 5656 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn); 5657 } 5658 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5659 } 5660 /*******************************************************/ 5661 /* 3. Update the PR-stream re-ordering queues and fix */ 5662 /* delivery issues as needed. */ 5663 /*******************************************************/ 5664 fwd_sz -= sizeof(*fwd); 5665 if (m && fwd_sz) { 5666 /* New method. */ 5667 unsigned int num_str; 5668 uint32_t mid, cur_mid; 5669 uint16_t sid; 5670 uint16_t ordered, flags; 5671 struct sctp_strseq *stseq, strseqbuf; 5672 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5673 5674 offset += sizeof(*fwd); 5675 5676 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5677 if (asoc->idata_supported) { 5678 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5679 } else { 5680 num_str = fwd_sz / sizeof(struct sctp_strseq); 5681 } 5682 for (i = 0; i < num_str; i++) { 5683 if (asoc->idata_supported) { 5684 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5685 sizeof(struct sctp_strseq_mid), 5686 (uint8_t *)&strseqbuf_m); 5687 offset += sizeof(struct sctp_strseq_mid); 5688 if (stseq_m == NULL) { 5689 break; 5690 } 5691 sid = ntohs(stseq_m->sid); 5692 mid = ntohl(stseq_m->mid); 5693 flags = ntohs(stseq_m->flags); 5694 if (flags & PR_SCTP_UNORDERED_FLAG) { 5695 ordered = 0; 5696 } else { 5697 ordered = 1; 5698 } 5699 } else { 5700 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5701 sizeof(struct sctp_strseq), 5702 (uint8_t *)&strseqbuf); 5703 offset += sizeof(struct sctp_strseq); 5704 if (stseq == NULL) { 5705 break; 5706 } 5707 sid = ntohs(stseq->sid); 5708 mid = (uint32_t)ntohs(stseq->ssn); 5709 ordered = 1; 5710 } 5711 /* Convert */ 5712 5713 /* now process */ 5714 5715 /* 5716 * Ok we now look for the stream/seq on the read 5717 * queue where its not all delivered. If we find it 5718 * we transmute the read entry into a PDI_ABORTED. 5719 */ 5720 if (sid >= asoc->streamincnt) { 5721 /* screwed up streams, stop! */ 5722 break; 5723 } 5724 if ((asoc->str_of_pdapi == sid) && 5725 (asoc->ssn_of_pdapi == mid)) { 5726 /* 5727 * If this is the one we were partially 5728 * delivering now then we no longer are. 5729 * Note this will change with the reassembly 5730 * re-write. 5731 */ 5732 asoc->fragmented_delivery_inprogress = 0; 5733 } 5734 strm = &asoc->strmin[sid]; 5735 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) { 5736 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn); 5737 } 5738 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5739 if ((control->sinfo_stream == sid) && 5740 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5741 str_seq = (sid << 16) | (0x0000ffff & mid); 5742 control->pdapi_aborted = 1; 5743 sv = stcb->asoc.control_pdapi; 5744 control->end_added = 1; 5745 if (control->on_strm_q == SCTP_ON_ORDERED) { 5746 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5747 if (asoc->size_on_all_streams >= control->length) { 5748 asoc->size_on_all_streams -= control->length; 5749 } else { 5750 #ifdef INVARIANTS 5751 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5752 #else 5753 asoc->size_on_all_streams = 0; 5754 #endif 5755 } 5756 sctp_ucount_decr(asoc->cnt_on_all_streams); 5757 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5758 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5759 #ifdef INVARIANTS 5760 } else if (control->on_strm_q) { 5761 panic("strm: %p ctl: %p unknown %d", 5762 strm, control, control->on_strm_q); 5763 #endif 5764 } 5765 control->on_strm_q = 0; 5766 stcb->asoc.control_pdapi = control; 5767 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5768 stcb, 5769 SCTP_PARTIAL_DELIVERY_ABORTED, 5770 (void *)&str_seq, 5771 SCTP_SO_NOT_LOCKED); 5772 stcb->asoc.control_pdapi = sv; 5773 break; 5774 } else if ((control->sinfo_stream == sid) && 5775 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5776 /* We are past our victim SSN */ 5777 break; 5778 } 5779 } 5780 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5781 /* Update the sequence number */ 5782 strm->last_mid_delivered = mid; 5783 } 5784 /* now kick the stream the new way */ 5785 /* sa_ignore NO_NULL_CHK */ 5786 sctp_kick_prsctp_reorder_queue(stcb, strm); 5787 } 5788 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5789 } 5790 /* 5791 * Now slide thing forward. 5792 */ 5793 sctp_slide_mapping_arrays(stcb); 5794 } 5795