1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <netinet/sctp_os.h> 39 #include <sys/proc.h> 40 #include <netinet/sctp_var.h> 41 #include <netinet/sctp_sysctl.h> 42 #include <netinet/sctp_header.h> 43 #include <netinet/sctp_pcb.h> 44 #include <netinet/sctputil.h> 45 #include <netinet/sctp_output.h> 46 #include <netinet/sctp_uio.h> 47 #include <netinet/sctp_auth.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/sctp_input.h> 53 #include <netinet/sctp_crc32.h> 54 #include <netinet/sctp_lock_bsd.h> 55 /* 56 * NOTES: On the outbound side of things I need to check the sack timer to 57 * see if I should generate a sack into the chunk queue (if I have data to 58 * send that is and will be sending it .. for bundling. 59 * 60 * The callback in sctp_usrreq.c will get called when the socket is read from. 61 * This will cause sctp_service_queues() to get called on the top entry in 62 * the list. 63 */ 64 static uint32_t 65 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 66 struct sctp_stream_in *strm, 67 struct sctp_tcb *stcb, 68 struct sctp_association *asoc, 69 struct sctp_tmit_chunk *chk, int lock_held); 70 71 72 void 73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 74 { 75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 76 } 77 78 /* Calculate what the rwnd would be */ 79 uint32_t 80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 81 { 82 uint32_t calc = 0; 83 84 /* 85 * This is really set wrong with respect to a 1-2-m socket. Since 86 * the sb_cc is the count that everyone as put up. When we re-write 87 * sctp_soreceive then we will fix this so that ONLY this 88 * associations data is taken into account. 89 */ 90 if (stcb->sctp_socket == NULL) { 91 return (calc); 92 } 93 94 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 95 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 96 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 97 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 98 if (stcb->asoc.sb_cc == 0 && 99 asoc->cnt_on_reasm_queue == 0 && 100 asoc->cnt_on_all_streams == 0) { 101 /* Full rwnd granted */ 102 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 103 return (calc); 104 } 105 /* get actual space */ 106 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 107 /* 108 * take out what has NOT been put on socket queue and we yet hold 109 * for putting up. 110 */ 111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 112 asoc->cnt_on_reasm_queue * MSIZE)); 113 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 114 asoc->cnt_on_all_streams * MSIZE)); 115 if (calc == 0) { 116 /* out of space */ 117 return (calc); 118 } 119 120 /* what is the overhead of all these rwnd's */ 121 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 122 /* 123 * If the window gets too small due to ctrl-stuff, reduce it to 1, 124 * even it is 0. SWS engaged 125 */ 126 if (calc < stcb->asoc.my_rwnd_control_len) { 127 calc = 1; 128 } 129 return (calc); 130 } 131 132 133 134 /* 135 * Build out our readq entry based on the incoming packet. 136 */ 137 struct sctp_queued_to_read * 138 sctp_build_readq_entry(struct sctp_tcb *stcb, 139 struct sctp_nets *net, 140 uint32_t tsn, uint32_t ppid, 141 uint32_t context, uint16_t sid, 142 uint32_t mid, uint8_t flags, 143 struct mbuf *dm) 144 { 145 struct sctp_queued_to_read *read_queue_e = NULL; 146 147 sctp_alloc_a_readq(stcb, read_queue_e); 148 if (read_queue_e == NULL) { 149 goto failed_build; 150 } 151 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 152 read_queue_e->sinfo_stream = sid; 153 read_queue_e->sinfo_flags = (flags << 8); 154 read_queue_e->sinfo_ppid = ppid; 155 read_queue_e->sinfo_context = context; 156 read_queue_e->sinfo_tsn = tsn; 157 read_queue_e->sinfo_cumtsn = tsn; 158 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 159 read_queue_e->mid = mid; 160 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 161 TAILQ_INIT(&read_queue_e->reasm); 162 read_queue_e->whoFrom = net; 163 atomic_add_int(&net->ref_count, 1); 164 read_queue_e->data = dm; 165 read_queue_e->stcb = stcb; 166 read_queue_e->port_from = stcb->rport; 167 failed_build: 168 return (read_queue_e); 169 } 170 171 struct mbuf * 172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 173 { 174 struct sctp_extrcvinfo *seinfo; 175 struct sctp_sndrcvinfo *outinfo; 176 struct sctp_rcvinfo *rcvinfo; 177 struct sctp_nxtinfo *nxtinfo; 178 struct cmsghdr *cmh; 179 struct mbuf *ret; 180 int len; 181 int use_extended; 182 int provide_nxt; 183 184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 187 /* user does not want any ancillary data */ 188 return (NULL); 189 } 190 191 len = 0; 192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 194 } 195 seinfo = (struct sctp_extrcvinfo *)sinfo; 196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 198 provide_nxt = 1; 199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 200 } else { 201 provide_nxt = 0; 202 } 203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 205 use_extended = 1; 206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 207 } else { 208 use_extended = 0; 209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 210 } 211 } else { 212 use_extended = 0; 213 } 214 215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 216 if (ret == NULL) { 217 /* No space */ 218 return (ret); 219 } 220 SCTP_BUF_LEN(ret) = 0; 221 222 /* We need a CMSG header followed by the struct */ 223 cmh = mtod(ret, struct cmsghdr *); 224 /* 225 * Make sure that there is no un-initialized padding between the 226 * cmsg header and cmsg data and after the cmsg data. 227 */ 228 memset(cmh, 0, len); 229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 230 cmh->cmsg_level = IPPROTO_SCTP; 231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 232 cmh->cmsg_type = SCTP_RCVINFO; 233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 234 rcvinfo->rcv_sid = sinfo->sinfo_stream; 235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 236 rcvinfo->rcv_flags = sinfo->sinfo_flags; 237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 240 rcvinfo->rcv_context = sinfo->sinfo_context; 241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 244 } 245 if (provide_nxt) { 246 cmh->cmsg_level = IPPROTO_SCTP; 247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 248 cmh->cmsg_type = SCTP_NXTINFO; 249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 251 nxtinfo->nxt_flags = 0; 252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 253 nxtinfo->nxt_flags |= SCTP_UNORDERED; 254 } 255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 257 } 258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 259 nxtinfo->nxt_flags |= SCTP_COMPLETE; 260 } 261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 262 nxtinfo->nxt_length = seinfo->serinfo_next_length; 263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 266 } 267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 268 cmh->cmsg_level = IPPROTO_SCTP; 269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 270 if (use_extended) { 271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 272 cmh->cmsg_type = SCTP_EXTRCV; 273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 275 } else { 276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 277 cmh->cmsg_type = SCTP_SNDRCV; 278 *outinfo = *sinfo; 279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 280 } 281 } 282 return (ret); 283 } 284 285 286 static void 287 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 288 { 289 uint32_t gap, i, cumackp1; 290 int fnd = 0; 291 int in_r = 0, in_nr = 0; 292 293 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 294 return; 295 } 296 cumackp1 = asoc->cumulative_tsn + 1; 297 if (SCTP_TSN_GT(cumackp1, tsn)) { 298 /* 299 * this tsn is behind the cum ack and thus we don't need to 300 * worry about it being moved from one to the other. 301 */ 302 return; 303 } 304 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 305 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 306 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 307 if ((in_r == 0) && (in_nr == 0)) { 308 #ifdef INVARIANTS 309 panic("Things are really messed up now"); 310 #else 311 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 312 sctp_print_mapping_array(asoc); 313 #endif 314 } 315 if (in_nr == 0) 316 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 317 if (in_r) 318 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 319 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 320 asoc->highest_tsn_inside_nr_map = tsn; 321 } 322 if (tsn == asoc->highest_tsn_inside_map) { 323 /* We must back down to see what the new highest is */ 324 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 325 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 326 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 327 asoc->highest_tsn_inside_map = i; 328 fnd = 1; 329 break; 330 } 331 } 332 if (!fnd) { 333 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 334 } 335 } 336 } 337 338 static int 339 sctp_place_control_in_stream(struct sctp_stream_in *strm, 340 struct sctp_association *asoc, 341 struct sctp_queued_to_read *control) 342 { 343 struct sctp_queued_to_read *at; 344 struct sctp_readhead *q; 345 uint8_t flags, unordered; 346 347 flags = (control->sinfo_flags >> 8); 348 unordered = flags & SCTP_DATA_UNORDERED; 349 if (unordered) { 350 q = &strm->uno_inqueue; 351 if (asoc->idata_supported == 0) { 352 if (!TAILQ_EMPTY(q)) { 353 /* 354 * Only one stream can be here in old style 355 * -- abort 356 */ 357 return (-1); 358 } 359 TAILQ_INSERT_TAIL(q, control, next_instrm); 360 control->on_strm_q = SCTP_ON_UNORDERED; 361 return (0); 362 } 363 } else { 364 q = &strm->inqueue; 365 } 366 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 367 control->end_added = 1; 368 control->first_frag_seen = 1; 369 control->last_frag_seen = 1; 370 } 371 if (TAILQ_EMPTY(q)) { 372 /* Empty queue */ 373 TAILQ_INSERT_HEAD(q, control, next_instrm); 374 if (unordered) { 375 control->on_strm_q = SCTP_ON_UNORDERED; 376 } else { 377 control->on_strm_q = SCTP_ON_ORDERED; 378 } 379 return (0); 380 } else { 381 TAILQ_FOREACH(at, q, next_instrm) { 382 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 383 /* 384 * one in queue is bigger than the new one, 385 * insert before this one 386 */ 387 TAILQ_INSERT_BEFORE(at, control, next_instrm); 388 if (unordered) { 389 control->on_strm_q = SCTP_ON_UNORDERED; 390 } else { 391 control->on_strm_q = SCTP_ON_ORDERED; 392 } 393 break; 394 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 395 /* 396 * Gak, He sent me a duplicate msg id 397 * number?? return -1 to abort. 398 */ 399 return (-1); 400 } else { 401 if (TAILQ_NEXT(at, next_instrm) == NULL) { 402 /* 403 * We are at the end, insert it 404 * after this one 405 */ 406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 407 sctp_log_strm_del(control, at, 408 SCTP_STR_LOG_FROM_INSERT_TL); 409 } 410 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 411 if (unordered) { 412 control->on_strm_q = SCTP_ON_UNORDERED; 413 } else { 414 control->on_strm_q = SCTP_ON_ORDERED; 415 } 416 break; 417 } 418 } 419 } 420 } 421 return (0); 422 } 423 424 static void 425 sctp_abort_in_reasm(struct sctp_tcb *stcb, 426 struct sctp_queued_to_read *control, 427 struct sctp_tmit_chunk *chk, 428 int *abort_flag, int opspot) 429 { 430 char msg[SCTP_DIAG_INFO_LEN]; 431 struct mbuf *oper; 432 433 if (stcb->asoc.idata_supported) { 434 snprintf(msg, sizeof(msg), 435 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 436 opspot, 437 control->fsn_included, 438 chk->rec.data.tsn, 439 chk->rec.data.sid, 440 chk->rec.data.fsn, chk->rec.data.mid); 441 } else { 442 snprintf(msg, sizeof(msg), 443 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 444 opspot, 445 control->fsn_included, 446 chk->rec.data.tsn, 447 chk->rec.data.sid, 448 chk->rec.data.fsn, 449 (uint16_t)chk->rec.data.mid); 450 } 451 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 452 sctp_m_freem(chk->data); 453 chk->data = NULL; 454 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 455 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 456 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 457 *abort_flag = 1; 458 } 459 460 static void 461 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 462 { 463 /* 464 * The control could not be placed and must be cleaned. 465 */ 466 struct sctp_tmit_chunk *chk, *nchk; 467 468 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 469 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 470 if (chk->data) 471 sctp_m_freem(chk->data); 472 chk->data = NULL; 473 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 474 } 475 sctp_free_remote_addr(control->whoFrom); 476 if (control->data) { 477 sctp_m_freem(control->data); 478 control->data = NULL; 479 } 480 sctp_free_a_readq(stcb, control); 481 } 482 483 /* 484 * Queue the chunk either right into the socket buffer if it is the next one 485 * to go OR put it in the correct place in the delivery queue. If we do 486 * append to the so_buf, keep doing so until we are out of order as 487 * long as the control's entered are non-fragmented. 488 */ 489 static void 490 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 491 struct sctp_association *asoc, 492 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 493 { 494 /* 495 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 496 * all the data in one stream this could happen quite rapidly. One 497 * could use the TSN to keep track of things, but this scheme breaks 498 * down in the other type of stream usage that could occur. Send a 499 * single msg to stream 0, send 4Billion messages to stream 1, now 500 * send a message to stream 0. You have a situation where the TSN 501 * has wrapped but not in the stream. Is this worth worrying about 502 * or should we just change our queue sort at the bottom to be by 503 * TSN. 504 * 505 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 506 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 507 * assignment this could happen... and I don't see how this would be 508 * a violation. So for now I am undecided an will leave the sort by 509 * SSN alone. Maybe a hybred approach is the answer 510 * 511 */ 512 struct sctp_queued_to_read *at; 513 int queue_needed; 514 uint32_t nxt_todel; 515 struct mbuf *op_err; 516 struct sctp_stream_in *strm; 517 char msg[SCTP_DIAG_INFO_LEN]; 518 519 strm = &asoc->strmin[control->sinfo_stream]; 520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 521 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 522 } 523 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 524 /* The incoming sseq is behind where we last delivered? */ 525 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 526 strm->last_mid_delivered, control->mid); 527 /* 528 * throw it in the stream so it gets cleaned up in 529 * association destruction 530 */ 531 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 532 if (asoc->idata_supported) { 533 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 534 strm->last_mid_delivered, control->sinfo_tsn, 535 control->sinfo_stream, control->mid); 536 } else { 537 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 538 (uint16_t)strm->last_mid_delivered, 539 control->sinfo_tsn, 540 control->sinfo_stream, 541 (uint16_t)control->mid); 542 } 543 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 544 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 545 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 546 *abort_flag = 1; 547 return; 548 549 } 550 queue_needed = 1; 551 asoc->size_on_all_streams += control->length; 552 sctp_ucount_incr(asoc->cnt_on_all_streams); 553 nxt_todel = strm->last_mid_delivered + 1; 554 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 555 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 556 struct socket *so; 557 558 so = SCTP_INP_SO(stcb->sctp_ep); 559 atomic_add_int(&stcb->asoc.refcnt, 1); 560 SCTP_TCB_UNLOCK(stcb); 561 SCTP_SOCKET_LOCK(so, 1); 562 SCTP_TCB_LOCK(stcb); 563 atomic_subtract_int(&stcb->asoc.refcnt, 1); 564 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 565 SCTP_SOCKET_UNLOCK(so, 1); 566 return; 567 } 568 #endif 569 /* can be delivered right away? */ 570 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 571 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 572 } 573 /* EY it wont be queued if it could be delivered directly */ 574 queue_needed = 0; 575 if (asoc->size_on_all_streams >= control->length) { 576 asoc->size_on_all_streams -= control->length; 577 } else { 578 #ifdef INVARIANTS 579 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 580 #else 581 asoc->size_on_all_streams = 0; 582 #endif 583 } 584 sctp_ucount_decr(asoc->cnt_on_all_streams); 585 strm->last_mid_delivered++; 586 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 587 sctp_add_to_readq(stcb->sctp_ep, stcb, 588 control, 589 &stcb->sctp_socket->so_rcv, 1, 590 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 591 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 592 /* all delivered */ 593 nxt_todel = strm->last_mid_delivered + 1; 594 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 595 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 596 if (control->on_strm_q == SCTP_ON_ORDERED) { 597 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 598 if (asoc->size_on_all_streams >= control->length) { 599 asoc->size_on_all_streams -= control->length; 600 } else { 601 #ifdef INVARIANTS 602 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 603 #else 604 asoc->size_on_all_streams = 0; 605 #endif 606 } 607 sctp_ucount_decr(asoc->cnt_on_all_streams); 608 #ifdef INVARIANTS 609 } else { 610 panic("Huh control: %p is on_strm_q: %d", 611 control, control->on_strm_q); 612 #endif 613 } 614 control->on_strm_q = 0; 615 strm->last_mid_delivered++; 616 /* 617 * We ignore the return of deliver_data here 618 * since we always can hold the chunk on the 619 * d-queue. And we have a finite number that 620 * can be delivered from the strq. 621 */ 622 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 623 sctp_log_strm_del(control, NULL, 624 SCTP_STR_LOG_FROM_IMMED_DEL); 625 } 626 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 627 sctp_add_to_readq(stcb->sctp_ep, stcb, 628 control, 629 &stcb->sctp_socket->so_rcv, 1, 630 SCTP_READ_LOCK_NOT_HELD, 631 SCTP_SO_LOCKED); 632 continue; 633 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 634 *need_reasm = 1; 635 } 636 break; 637 } 638 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 639 SCTP_SOCKET_UNLOCK(so, 1); 640 #endif 641 } 642 if (queue_needed) { 643 /* 644 * Ok, we did not deliver this guy, find the correct place 645 * to put it on the queue. 646 */ 647 if (sctp_place_control_in_stream(strm, asoc, control)) { 648 snprintf(msg, sizeof(msg), 649 "Queue to str MID: %u duplicate", 650 control->mid); 651 sctp_clean_up_control(stcb, control); 652 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 653 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 654 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 655 *abort_flag = 1; 656 } 657 } 658 } 659 660 661 static void 662 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 663 { 664 struct mbuf *m, *prev = NULL; 665 struct sctp_tcb *stcb; 666 667 stcb = control->stcb; 668 control->held_length = 0; 669 control->length = 0; 670 m = control->data; 671 while (m) { 672 if (SCTP_BUF_LEN(m) == 0) { 673 /* Skip mbufs with NO length */ 674 if (prev == NULL) { 675 /* First one */ 676 control->data = sctp_m_free(m); 677 m = control->data; 678 } else { 679 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 680 m = SCTP_BUF_NEXT(prev); 681 } 682 if (m == NULL) { 683 control->tail_mbuf = prev; 684 } 685 continue; 686 } 687 prev = m; 688 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 689 if (control->on_read_q) { 690 /* 691 * On read queue so we must increment the SB stuff, 692 * we assume caller has done any locks of SB. 693 */ 694 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 695 } 696 m = SCTP_BUF_NEXT(m); 697 } 698 if (prev) { 699 control->tail_mbuf = prev; 700 } 701 } 702 703 static void 704 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 705 { 706 struct mbuf *prev = NULL; 707 struct sctp_tcb *stcb; 708 709 stcb = control->stcb; 710 if (stcb == NULL) { 711 #ifdef INVARIANTS 712 panic("Control broken"); 713 #else 714 return; 715 #endif 716 } 717 if (control->tail_mbuf == NULL) { 718 /* TSNH */ 719 control->data = m; 720 sctp_setup_tail_pointer(control); 721 return; 722 } 723 control->tail_mbuf->m_next = m; 724 while (m) { 725 if (SCTP_BUF_LEN(m) == 0) { 726 /* Skip mbufs with NO length */ 727 if (prev == NULL) { 728 /* First one */ 729 control->tail_mbuf->m_next = sctp_m_free(m); 730 m = control->tail_mbuf->m_next; 731 } else { 732 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 733 m = SCTP_BUF_NEXT(prev); 734 } 735 if (m == NULL) { 736 control->tail_mbuf = prev; 737 } 738 continue; 739 } 740 prev = m; 741 if (control->on_read_q) { 742 /* 743 * On read queue so we must increment the SB stuff, 744 * we assume caller has done any locks of SB. 745 */ 746 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 747 } 748 *added += SCTP_BUF_LEN(m); 749 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 750 m = SCTP_BUF_NEXT(m); 751 } 752 if (prev) { 753 control->tail_mbuf = prev; 754 } 755 } 756 757 static void 758 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 759 { 760 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 761 nc->sinfo_stream = control->sinfo_stream; 762 nc->mid = control->mid; 763 TAILQ_INIT(&nc->reasm); 764 nc->top_fsn = control->top_fsn; 765 nc->mid = control->mid; 766 nc->sinfo_flags = control->sinfo_flags; 767 nc->sinfo_ppid = control->sinfo_ppid; 768 nc->sinfo_context = control->sinfo_context; 769 nc->fsn_included = 0xffffffff; 770 nc->sinfo_tsn = control->sinfo_tsn; 771 nc->sinfo_cumtsn = control->sinfo_cumtsn; 772 nc->sinfo_assoc_id = control->sinfo_assoc_id; 773 nc->whoFrom = control->whoFrom; 774 atomic_add_int(&nc->whoFrom->ref_count, 1); 775 nc->stcb = control->stcb; 776 nc->port_from = control->port_from; 777 } 778 779 static void 780 sctp_reset_a_control(struct sctp_queued_to_read *control, 781 struct sctp_inpcb *inp, uint32_t tsn) 782 { 783 control->fsn_included = tsn; 784 if (control->on_read_q) { 785 /* 786 * We have to purge it from there, hopefully this will work 787 * :-) 788 */ 789 TAILQ_REMOVE(&inp->read_queue, control, next); 790 control->on_read_q = 0; 791 } 792 } 793 794 static int 795 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 796 struct sctp_association *asoc, 797 struct sctp_stream_in *strm, 798 struct sctp_queued_to_read *control, 799 uint32_t pd_point, 800 int inp_read_lock_held) 801 { 802 /* 803 * Special handling for the old un-ordered data chunk. All the 804 * chunks/TSN's go to mid 0. So we have to do the old style watching 805 * to see if we have it all. If you return one, no other control 806 * entries on the un-ordered queue will be looked at. In theory 807 * there should be no others entries in reality, unless the guy is 808 * sending both unordered NDATA and unordered DATA... 809 */ 810 struct sctp_tmit_chunk *chk, *lchk, *tchk; 811 uint32_t fsn; 812 struct sctp_queued_to_read *nc; 813 int cnt_added; 814 815 if (control->first_frag_seen == 0) { 816 /* Nothing we can do, we have not seen the first piece yet */ 817 return (1); 818 } 819 /* Collapse any we can */ 820 cnt_added = 0; 821 restart: 822 fsn = control->fsn_included + 1; 823 /* Now what can we add? */ 824 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 825 if (chk->rec.data.fsn == fsn) { 826 /* Ok lets add it */ 827 sctp_alloc_a_readq(stcb, nc); 828 if (nc == NULL) { 829 break; 830 } 831 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 832 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 833 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD); 834 fsn++; 835 cnt_added++; 836 chk = NULL; 837 if (control->end_added) { 838 /* We are done */ 839 if (!TAILQ_EMPTY(&control->reasm)) { 840 /* 841 * Ok we have to move anything left 842 * on the control queue to a new 843 * control. 844 */ 845 sctp_build_readq_entry_from_ctl(nc, control); 846 tchk = TAILQ_FIRST(&control->reasm); 847 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 848 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 849 if (asoc->size_on_reasm_queue >= tchk->send_size) { 850 asoc->size_on_reasm_queue -= tchk->send_size; 851 } else { 852 #ifdef INVARIANTS 853 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 854 #else 855 asoc->size_on_reasm_queue = 0; 856 #endif 857 } 858 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 859 nc->first_frag_seen = 1; 860 nc->fsn_included = tchk->rec.data.fsn; 861 nc->data = tchk->data; 862 nc->sinfo_ppid = tchk->rec.data.ppid; 863 nc->sinfo_tsn = tchk->rec.data.tsn; 864 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 865 tchk->data = NULL; 866 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 867 sctp_setup_tail_pointer(nc); 868 tchk = TAILQ_FIRST(&control->reasm); 869 } 870 /* Spin the rest onto the queue */ 871 while (tchk) { 872 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 873 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 874 tchk = TAILQ_FIRST(&control->reasm); 875 } 876 /* 877 * Now lets add it to the queue 878 * after removing control 879 */ 880 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 881 nc->on_strm_q = SCTP_ON_UNORDERED; 882 if (control->on_strm_q) { 883 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 884 control->on_strm_q = 0; 885 } 886 } 887 if (control->pdapi_started) { 888 strm->pd_api_started = 0; 889 control->pdapi_started = 0; 890 } 891 if (control->on_strm_q) { 892 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 893 control->on_strm_q = 0; 894 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 895 } 896 if (control->on_read_q == 0) { 897 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 898 &stcb->sctp_socket->so_rcv, control->end_added, 899 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 900 } 901 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 902 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 903 /* 904 * Switch to the new guy and 905 * continue 906 */ 907 control = nc; 908 goto restart; 909 } else { 910 if (nc->on_strm_q == 0) { 911 sctp_free_a_readq(stcb, nc); 912 } 913 } 914 return (1); 915 } else { 916 sctp_free_a_readq(stcb, nc); 917 } 918 } else { 919 /* Can't add more */ 920 break; 921 } 922 } 923 if (cnt_added && strm->pd_api_started) { 924 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 925 } 926 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 927 strm->pd_api_started = 1; 928 control->pdapi_started = 1; 929 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 930 &stcb->sctp_socket->so_rcv, control->end_added, 931 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 932 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 933 return (0); 934 } else { 935 return (1); 936 } 937 } 938 939 static void 940 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 941 struct sctp_association *asoc, 942 struct sctp_queued_to_read *control, 943 struct sctp_tmit_chunk *chk, 944 int *abort_flag) 945 { 946 struct sctp_tmit_chunk *at; 947 int inserted; 948 949 /* 950 * Here we need to place the chunk into the control structure sorted 951 * in the correct order. 952 */ 953 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 954 /* Its the very first one. */ 955 SCTPDBG(SCTP_DEBUG_XXX, 956 "chunk is a first fsn: %u becomes fsn_included\n", 957 chk->rec.data.fsn); 958 at = TAILQ_FIRST(&control->reasm); 959 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { 960 /* 961 * The first chunk in the reassembly is a smaller 962 * TSN than this one, even though this has a first, 963 * it must be from a subsequent msg. 964 */ 965 goto place_chunk; 966 } 967 if (control->first_frag_seen) { 968 /* 969 * In old un-ordered we can reassembly on one 970 * control multiple messages. As long as the next 971 * FIRST is greater then the old first (TSN i.e. FSN 972 * wise) 973 */ 974 struct mbuf *tdata; 975 uint32_t tmp; 976 977 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 978 /* 979 * Easy way the start of a new guy beyond 980 * the lowest 981 */ 982 goto place_chunk; 983 } 984 if ((chk->rec.data.fsn == control->fsn_included) || 985 (control->pdapi_started)) { 986 /* 987 * Ok this should not happen, if it does we 988 * started the pd-api on the higher TSN 989 * (since the equals part is a TSN failure 990 * it must be that). 991 * 992 * We are completly hosed in that case since 993 * I have no way to recover. This really 994 * will only happen if we can get more TSN's 995 * higher before the pd-api-point. 996 */ 997 sctp_abort_in_reasm(stcb, control, chk, 998 abort_flag, 999 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 1000 1001 return; 1002 } 1003 /* 1004 * Ok we have two firsts and the one we just got is 1005 * smaller than the one we previously placed.. yuck! 1006 * We must swap them out. 1007 */ 1008 /* swap the mbufs */ 1009 tdata = control->data; 1010 control->data = chk->data; 1011 chk->data = tdata; 1012 /* Save the lengths */ 1013 chk->send_size = control->length; 1014 /* Recompute length of control and tail pointer */ 1015 sctp_setup_tail_pointer(control); 1016 /* Fix the FSN included */ 1017 tmp = control->fsn_included; 1018 control->fsn_included = chk->rec.data.fsn; 1019 chk->rec.data.fsn = tmp; 1020 /* Fix the TSN included */ 1021 tmp = control->sinfo_tsn; 1022 control->sinfo_tsn = chk->rec.data.tsn; 1023 chk->rec.data.tsn = tmp; 1024 /* Fix the PPID included */ 1025 tmp = control->sinfo_ppid; 1026 control->sinfo_ppid = chk->rec.data.ppid; 1027 chk->rec.data.ppid = tmp; 1028 /* Fix tail pointer */ 1029 goto place_chunk; 1030 } 1031 control->first_frag_seen = 1; 1032 control->fsn_included = chk->rec.data.fsn; 1033 control->top_fsn = chk->rec.data.fsn; 1034 control->sinfo_tsn = chk->rec.data.tsn; 1035 control->sinfo_ppid = chk->rec.data.ppid; 1036 control->data = chk->data; 1037 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1038 chk->data = NULL; 1039 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1040 sctp_setup_tail_pointer(control); 1041 return; 1042 } 1043 place_chunk: 1044 inserted = 0; 1045 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1046 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1047 /* 1048 * This one in queue is bigger than the new one, 1049 * insert the new one before at. 1050 */ 1051 asoc->size_on_reasm_queue += chk->send_size; 1052 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1053 inserted = 1; 1054 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1055 break; 1056 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1057 /* 1058 * They sent a duplicate fsn number. This really 1059 * should not happen since the FSN is a TSN and it 1060 * should have been dropped earlier. 1061 */ 1062 sctp_abort_in_reasm(stcb, control, chk, 1063 abort_flag, 1064 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1065 return; 1066 } 1067 1068 } 1069 if (inserted == 0) { 1070 /* Its at the end */ 1071 asoc->size_on_reasm_queue += chk->send_size; 1072 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1073 control->top_fsn = chk->rec.data.fsn; 1074 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1075 } 1076 } 1077 1078 static int 1079 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1080 struct sctp_stream_in *strm, int inp_read_lock_held) 1081 { 1082 /* 1083 * Given a stream, strm, see if any of the SSN's on it that are 1084 * fragmented are ready to deliver. If so go ahead and place them on 1085 * the read queue. In so placing if we have hit the end, then we 1086 * need to remove them from the stream's queue. 1087 */ 1088 struct sctp_queued_to_read *control, *nctl = NULL; 1089 uint32_t next_to_del; 1090 uint32_t pd_point; 1091 int ret = 0; 1092 1093 if (stcb->sctp_socket) { 1094 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1095 stcb->sctp_ep->partial_delivery_point); 1096 } else { 1097 pd_point = stcb->sctp_ep->partial_delivery_point; 1098 } 1099 control = TAILQ_FIRST(&strm->uno_inqueue); 1100 1101 if ((control != NULL) && 1102 (asoc->idata_supported == 0)) { 1103 /* Special handling needed for "old" data format */ 1104 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1105 goto done_un; 1106 } 1107 } 1108 if (strm->pd_api_started) { 1109 /* Can't add more */ 1110 return (0); 1111 } 1112 while (control) { 1113 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1114 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1115 nctl = TAILQ_NEXT(control, next_instrm); 1116 if (control->end_added) { 1117 /* We just put the last bit on */ 1118 if (control->on_strm_q) { 1119 #ifdef INVARIANTS 1120 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1121 panic("Huh control: %p on_q: %d -- not unordered?", 1122 control, control->on_strm_q); 1123 } 1124 #endif 1125 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1126 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1127 control->on_strm_q = 0; 1128 } 1129 if (control->on_read_q == 0) { 1130 sctp_add_to_readq(stcb->sctp_ep, stcb, 1131 control, 1132 &stcb->sctp_socket->so_rcv, control->end_added, 1133 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1134 } 1135 } else { 1136 /* Can we do a PD-API for this un-ordered guy? */ 1137 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1138 strm->pd_api_started = 1; 1139 control->pdapi_started = 1; 1140 sctp_add_to_readq(stcb->sctp_ep, stcb, 1141 control, 1142 &stcb->sctp_socket->so_rcv, control->end_added, 1143 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1144 1145 break; 1146 } 1147 } 1148 control = nctl; 1149 } 1150 done_un: 1151 control = TAILQ_FIRST(&strm->inqueue); 1152 if (strm->pd_api_started) { 1153 /* Can't add more */ 1154 return (0); 1155 } 1156 if (control == NULL) { 1157 return (ret); 1158 } 1159 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1160 /* 1161 * Ok the guy at the top was being partially delivered 1162 * completed, so we remove it. Note the pd_api flag was 1163 * taken off when the chunk was merged on in 1164 * sctp_queue_data_for_reasm below. 1165 */ 1166 nctl = TAILQ_NEXT(control, next_instrm); 1167 SCTPDBG(SCTP_DEBUG_XXX, 1168 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1169 control, control->end_added, control->mid, 1170 control->top_fsn, control->fsn_included, 1171 strm->last_mid_delivered); 1172 if (control->end_added) { 1173 if (control->on_strm_q) { 1174 #ifdef INVARIANTS 1175 if (control->on_strm_q != SCTP_ON_ORDERED) { 1176 panic("Huh control: %p on_q: %d -- not ordered?", 1177 control, control->on_strm_q); 1178 } 1179 #endif 1180 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1181 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1182 if (asoc->size_on_all_streams >= control->length) { 1183 asoc->size_on_all_streams -= control->length; 1184 } else { 1185 #ifdef INVARIANTS 1186 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1187 #else 1188 asoc->size_on_all_streams = 0; 1189 #endif 1190 } 1191 sctp_ucount_decr(asoc->cnt_on_all_streams); 1192 control->on_strm_q = 0; 1193 } 1194 if (strm->pd_api_started && control->pdapi_started) { 1195 control->pdapi_started = 0; 1196 strm->pd_api_started = 0; 1197 } 1198 if (control->on_read_q == 0) { 1199 sctp_add_to_readq(stcb->sctp_ep, stcb, 1200 control, 1201 &stcb->sctp_socket->so_rcv, control->end_added, 1202 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1203 } 1204 control = nctl; 1205 } 1206 } 1207 if (strm->pd_api_started) { 1208 /* 1209 * Can't add more must have gotten an un-ordered above being 1210 * partially delivered. 1211 */ 1212 return (0); 1213 } 1214 deliver_more: 1215 next_to_del = strm->last_mid_delivered + 1; 1216 if (control) { 1217 SCTPDBG(SCTP_DEBUG_XXX, 1218 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1219 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1220 next_to_del); 1221 nctl = TAILQ_NEXT(control, next_instrm); 1222 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1223 (control->first_frag_seen)) { 1224 int done; 1225 1226 /* Ok we can deliver it onto the stream. */ 1227 if (control->end_added) { 1228 /* We are done with it afterwards */ 1229 if (control->on_strm_q) { 1230 #ifdef INVARIANTS 1231 if (control->on_strm_q != SCTP_ON_ORDERED) { 1232 panic("Huh control: %p on_q: %d -- not ordered?", 1233 control, control->on_strm_q); 1234 } 1235 #endif 1236 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1237 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1238 if (asoc->size_on_all_streams >= control->length) { 1239 asoc->size_on_all_streams -= control->length; 1240 } else { 1241 #ifdef INVARIANTS 1242 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1243 #else 1244 asoc->size_on_all_streams = 0; 1245 #endif 1246 } 1247 sctp_ucount_decr(asoc->cnt_on_all_streams); 1248 control->on_strm_q = 0; 1249 } 1250 ret++; 1251 } 1252 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1253 /* 1254 * A singleton now slipping through - mark 1255 * it non-revokable too 1256 */ 1257 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1258 } else if (control->end_added == 0) { 1259 /* 1260 * Check if we can defer adding until its 1261 * all there 1262 */ 1263 if ((control->length < pd_point) || (strm->pd_api_started)) { 1264 /* 1265 * Don't need it or cannot add more 1266 * (one being delivered that way) 1267 */ 1268 goto out; 1269 } 1270 } 1271 done = (control->end_added) && (control->last_frag_seen); 1272 if (control->on_read_q == 0) { 1273 if (!done) { 1274 if (asoc->size_on_all_streams >= control->length) { 1275 asoc->size_on_all_streams -= control->length; 1276 } else { 1277 #ifdef INVARIANTS 1278 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1279 #else 1280 asoc->size_on_all_streams = 0; 1281 #endif 1282 } 1283 strm->pd_api_started = 1; 1284 control->pdapi_started = 1; 1285 } 1286 sctp_add_to_readq(stcb->sctp_ep, stcb, 1287 control, 1288 &stcb->sctp_socket->so_rcv, control->end_added, 1289 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1290 } 1291 strm->last_mid_delivered = next_to_del; 1292 if (done) { 1293 control = nctl; 1294 goto deliver_more; 1295 } 1296 } 1297 } 1298 out: 1299 return (ret); 1300 } 1301 1302 1303 uint32_t 1304 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1305 struct sctp_stream_in *strm, 1306 struct sctp_tcb *stcb, struct sctp_association *asoc, 1307 struct sctp_tmit_chunk *chk, int hold_rlock) 1308 { 1309 /* 1310 * Given a control and a chunk, merge the data from the chk onto the 1311 * control and free up the chunk resources. 1312 */ 1313 uint32_t added = 0; 1314 int i_locked = 0; 1315 1316 if (control->on_read_q && (hold_rlock == 0)) { 1317 /* 1318 * Its being pd-api'd so we must do some locks. 1319 */ 1320 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1321 i_locked = 1; 1322 } 1323 if (control->data == NULL) { 1324 control->data = chk->data; 1325 sctp_setup_tail_pointer(control); 1326 } else { 1327 sctp_add_to_tail_pointer(control, chk->data, &added); 1328 } 1329 control->fsn_included = chk->rec.data.fsn; 1330 asoc->size_on_reasm_queue -= chk->send_size; 1331 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1332 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1333 chk->data = NULL; 1334 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1335 control->first_frag_seen = 1; 1336 control->sinfo_tsn = chk->rec.data.tsn; 1337 control->sinfo_ppid = chk->rec.data.ppid; 1338 } 1339 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1340 /* Its complete */ 1341 if ((control->on_strm_q) && (control->on_read_q)) { 1342 if (control->pdapi_started) { 1343 control->pdapi_started = 0; 1344 strm->pd_api_started = 0; 1345 } 1346 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1347 /* Unordered */ 1348 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1349 control->on_strm_q = 0; 1350 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1351 /* Ordered */ 1352 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1353 /* 1354 * Don't need to decrement 1355 * size_on_all_streams, since control is on 1356 * the read queue. 1357 */ 1358 sctp_ucount_decr(asoc->cnt_on_all_streams); 1359 control->on_strm_q = 0; 1360 #ifdef INVARIANTS 1361 } else if (control->on_strm_q) { 1362 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1363 control->on_strm_q); 1364 #endif 1365 } 1366 } 1367 control->end_added = 1; 1368 control->last_frag_seen = 1; 1369 } 1370 if (i_locked) { 1371 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1372 } 1373 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1374 return (added); 1375 } 1376 1377 /* 1378 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1379 * queue, see if anthing can be delivered. If so pull it off (or as much as 1380 * we can. If we run out of space then we must dump what we can and set the 1381 * appropriate flag to say we queued what we could. 1382 */ 1383 static void 1384 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1385 struct sctp_queued_to_read *control, 1386 struct sctp_tmit_chunk *chk, 1387 int created_control, 1388 int *abort_flag, uint32_t tsn) 1389 { 1390 uint32_t next_fsn; 1391 struct sctp_tmit_chunk *at, *nat; 1392 struct sctp_stream_in *strm; 1393 int do_wakeup, unordered; 1394 uint32_t lenadded; 1395 1396 strm = &asoc->strmin[control->sinfo_stream]; 1397 /* 1398 * For old un-ordered data chunks. 1399 */ 1400 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1401 unordered = 1; 1402 } else { 1403 unordered = 0; 1404 } 1405 /* Must be added to the stream-in queue */ 1406 if (created_control) { 1407 if (unordered == 0) { 1408 sctp_ucount_incr(asoc->cnt_on_all_streams); 1409 } 1410 if (sctp_place_control_in_stream(strm, asoc, control)) { 1411 /* Duplicate SSN? */ 1412 sctp_abort_in_reasm(stcb, control, chk, 1413 abort_flag, 1414 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1415 sctp_clean_up_control(stcb, control); 1416 return; 1417 } 1418 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1419 /* 1420 * Ok we created this control and now lets validate 1421 * that its legal i.e. there is a B bit set, if not 1422 * and we have up to the cum-ack then its invalid. 1423 */ 1424 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1425 sctp_abort_in_reasm(stcb, control, chk, 1426 abort_flag, 1427 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1428 return; 1429 } 1430 } 1431 } 1432 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1433 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1434 return; 1435 } 1436 /* 1437 * Ok we must queue the chunk into the reasembly portion: o if its 1438 * the first it goes to the control mbuf. o if its not first but the 1439 * next in sequence it goes to the control, and each succeeding one 1440 * in order also goes. o if its not in order we place it on the list 1441 * in its place. 1442 */ 1443 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1444 /* Its the very first one. */ 1445 SCTPDBG(SCTP_DEBUG_XXX, 1446 "chunk is a first fsn: %u becomes fsn_included\n", 1447 chk->rec.data.fsn); 1448 if (control->first_frag_seen) { 1449 /* 1450 * Error on senders part, they either sent us two 1451 * data chunks with FIRST, or they sent two 1452 * un-ordered chunks that were fragmented at the 1453 * same time in the same stream. 1454 */ 1455 sctp_abort_in_reasm(stcb, control, chk, 1456 abort_flag, 1457 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1458 return; 1459 } 1460 control->first_frag_seen = 1; 1461 control->sinfo_ppid = chk->rec.data.ppid; 1462 control->sinfo_tsn = chk->rec.data.tsn; 1463 control->fsn_included = chk->rec.data.fsn; 1464 control->data = chk->data; 1465 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1466 chk->data = NULL; 1467 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1468 sctp_setup_tail_pointer(control); 1469 asoc->size_on_all_streams += control->length; 1470 } else { 1471 /* Place the chunk in our list */ 1472 int inserted = 0; 1473 1474 if (control->last_frag_seen == 0) { 1475 /* Still willing to raise highest FSN seen */ 1476 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1477 SCTPDBG(SCTP_DEBUG_XXX, 1478 "We have a new top_fsn: %u\n", 1479 chk->rec.data.fsn); 1480 control->top_fsn = chk->rec.data.fsn; 1481 } 1482 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1483 SCTPDBG(SCTP_DEBUG_XXX, 1484 "The last fsn is now in place fsn: %u\n", 1485 chk->rec.data.fsn); 1486 control->last_frag_seen = 1; 1487 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) { 1488 SCTPDBG(SCTP_DEBUG_XXX, 1489 "New fsn: %u is not at top_fsn: %u -- abort\n", 1490 chk->rec.data.fsn, 1491 control->top_fsn); 1492 sctp_abort_in_reasm(stcb, control, chk, 1493 abort_flag, 1494 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1495 return; 1496 } 1497 } 1498 if (asoc->idata_supported || control->first_frag_seen) { 1499 /* 1500 * For IDATA we always check since we know 1501 * that the first fragment is 0. For old 1502 * DATA we have to receive the first before 1503 * we know the first FSN (which is the TSN). 1504 */ 1505 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1506 /* 1507 * We have already delivered up to 1508 * this so its a dup 1509 */ 1510 sctp_abort_in_reasm(stcb, control, chk, 1511 abort_flag, 1512 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1513 return; 1514 } 1515 } 1516 } else { 1517 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1518 /* Second last? huh? */ 1519 SCTPDBG(SCTP_DEBUG_XXX, 1520 "Duplicate last fsn: %u (top: %u) -- abort\n", 1521 chk->rec.data.fsn, control->top_fsn); 1522 sctp_abort_in_reasm(stcb, control, 1523 chk, abort_flag, 1524 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1525 return; 1526 } 1527 if (asoc->idata_supported || control->first_frag_seen) { 1528 /* 1529 * For IDATA we always check since we know 1530 * that the first fragment is 0. For old 1531 * DATA we have to receive the first before 1532 * we know the first FSN (which is the TSN). 1533 */ 1534 1535 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1536 /* 1537 * We have already delivered up to 1538 * this so its a dup 1539 */ 1540 SCTPDBG(SCTP_DEBUG_XXX, 1541 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1542 chk->rec.data.fsn, control->fsn_included); 1543 sctp_abort_in_reasm(stcb, control, chk, 1544 abort_flag, 1545 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1546 return; 1547 } 1548 } 1549 /* 1550 * validate not beyond top FSN if we have seen last 1551 * one 1552 */ 1553 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1554 SCTPDBG(SCTP_DEBUG_XXX, 1555 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1556 chk->rec.data.fsn, 1557 control->top_fsn); 1558 sctp_abort_in_reasm(stcb, control, chk, 1559 abort_flag, 1560 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1561 return; 1562 } 1563 } 1564 /* 1565 * If we reach here, we need to place the new chunk in the 1566 * reassembly for this control. 1567 */ 1568 SCTPDBG(SCTP_DEBUG_XXX, 1569 "chunk is a not first fsn: %u needs to be inserted\n", 1570 chk->rec.data.fsn); 1571 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1572 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1573 /* 1574 * This one in queue is bigger than the new 1575 * one, insert the new one before at. 1576 */ 1577 SCTPDBG(SCTP_DEBUG_XXX, 1578 "Insert it before fsn: %u\n", 1579 at->rec.data.fsn); 1580 asoc->size_on_reasm_queue += chk->send_size; 1581 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1582 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1583 inserted = 1; 1584 break; 1585 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1586 /* 1587 * Gak, He sent me a duplicate str seq 1588 * number 1589 */ 1590 /* 1591 * foo bar, I guess I will just free this 1592 * new guy, should we abort too? FIX ME 1593 * MAYBE? Or it COULD be that the SSN's have 1594 * wrapped. Maybe I should compare to TSN 1595 * somehow... sigh for now just blow away 1596 * the chunk! 1597 */ 1598 SCTPDBG(SCTP_DEBUG_XXX, 1599 "Duplicate to fsn: %u -- abort\n", 1600 at->rec.data.fsn); 1601 sctp_abort_in_reasm(stcb, control, 1602 chk, abort_flag, 1603 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1604 return; 1605 } 1606 } 1607 if (inserted == 0) { 1608 /* Goes on the end */ 1609 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1610 chk->rec.data.fsn); 1611 asoc->size_on_reasm_queue += chk->send_size; 1612 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1613 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1614 } 1615 } 1616 /* 1617 * Ok lets see if we can suck any up into the control structure that 1618 * are in seq if it makes sense. 1619 */ 1620 do_wakeup = 0; 1621 /* 1622 * If the first fragment has not been seen there is no sense in 1623 * looking. 1624 */ 1625 if (control->first_frag_seen) { 1626 next_fsn = control->fsn_included + 1; 1627 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1628 if (at->rec.data.fsn == next_fsn) { 1629 /* We can add this one now to the control */ 1630 SCTPDBG(SCTP_DEBUG_XXX, 1631 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1632 control, at, 1633 at->rec.data.fsn, 1634 next_fsn, control->fsn_included); 1635 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1636 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1637 if (control->on_read_q) { 1638 do_wakeup = 1; 1639 } else { 1640 /* 1641 * We only add to the 1642 * size-on-all-streams if its not on 1643 * the read q. The read q flag will 1644 * cause a sballoc so its accounted 1645 * for there. 1646 */ 1647 asoc->size_on_all_streams += lenadded; 1648 } 1649 next_fsn++; 1650 if (control->end_added && control->pdapi_started) { 1651 if (strm->pd_api_started) { 1652 strm->pd_api_started = 0; 1653 control->pdapi_started = 0; 1654 } 1655 if (control->on_read_q == 0) { 1656 sctp_add_to_readq(stcb->sctp_ep, stcb, 1657 control, 1658 &stcb->sctp_socket->so_rcv, control->end_added, 1659 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1660 } 1661 break; 1662 } 1663 } else { 1664 break; 1665 } 1666 } 1667 } 1668 if (do_wakeup) { 1669 /* Need to wakeup the reader */ 1670 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1671 } 1672 } 1673 1674 static struct sctp_queued_to_read * 1675 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1676 { 1677 struct sctp_queued_to_read *control; 1678 1679 if (ordered) { 1680 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1681 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1682 break; 1683 } 1684 } 1685 } else { 1686 if (idata_supported) { 1687 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1688 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1689 break; 1690 } 1691 } 1692 } else { 1693 control = TAILQ_FIRST(&strm->uno_inqueue); 1694 } 1695 } 1696 return (control); 1697 } 1698 1699 static int 1700 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1701 struct mbuf **m, int offset, int chk_length, 1702 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1703 int *break_flag, int last_chunk, uint8_t chk_type) 1704 { 1705 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ 1706 uint32_t tsn, fsn, gap, mid; 1707 struct mbuf *dmbuf; 1708 int the_len; 1709 int need_reasm_check = 0; 1710 uint16_t sid; 1711 struct mbuf *op_err; 1712 char msg[SCTP_DIAG_INFO_LEN]; 1713 struct sctp_queued_to_read *control, *ncontrol; 1714 uint32_t ppid; 1715 uint8_t chk_flags; 1716 struct sctp_stream_reset_list *liste; 1717 int ordered; 1718 size_t clen; 1719 int created_control = 0; 1720 1721 if (chk_type == SCTP_IDATA) { 1722 struct sctp_idata_chunk *chunk, chunk_buf; 1723 1724 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1725 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1726 chk_flags = chunk->ch.chunk_flags; 1727 clen = sizeof(struct sctp_idata_chunk); 1728 tsn = ntohl(chunk->dp.tsn); 1729 sid = ntohs(chunk->dp.sid); 1730 mid = ntohl(chunk->dp.mid); 1731 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1732 fsn = 0; 1733 ppid = chunk->dp.ppid_fsn.ppid; 1734 } else { 1735 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1736 ppid = 0xffffffff; /* Use as an invalid value. */ 1737 } 1738 } else { 1739 struct sctp_data_chunk *chunk, chunk_buf; 1740 1741 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1742 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1743 chk_flags = chunk->ch.chunk_flags; 1744 clen = sizeof(struct sctp_data_chunk); 1745 tsn = ntohl(chunk->dp.tsn); 1746 sid = ntohs(chunk->dp.sid); 1747 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1748 fsn = tsn; 1749 ppid = chunk->dp.ppid; 1750 } 1751 if ((size_t)chk_length == clen) { 1752 /* 1753 * Need to send an abort since we had a empty data chunk. 1754 */ 1755 op_err = sctp_generate_no_user_data_cause(tsn); 1756 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1757 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1758 *abort_flag = 1; 1759 return (0); 1760 } 1761 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1762 asoc->send_sack = 1; 1763 } 1764 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1765 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1766 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1767 } 1768 if (stcb == NULL) { 1769 return (0); 1770 } 1771 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1772 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1773 /* It is a duplicate */ 1774 SCTP_STAT_INCR(sctps_recvdupdata); 1775 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1776 /* Record a dup for the next outbound sack */ 1777 asoc->dup_tsns[asoc->numduptsns] = tsn; 1778 asoc->numduptsns++; 1779 } 1780 asoc->send_sack = 1; 1781 return (0); 1782 } 1783 /* Calculate the number of TSN's between the base and this TSN */ 1784 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1785 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1786 /* Can't hold the bit in the mapping at max array, toss it */ 1787 return (0); 1788 } 1789 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1790 SCTP_TCB_LOCK_ASSERT(stcb); 1791 if (sctp_expand_mapping_array(asoc, gap)) { 1792 /* Can't expand, drop it */ 1793 return (0); 1794 } 1795 } 1796 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1797 *high_tsn = tsn; 1798 } 1799 /* See if we have received this one already */ 1800 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1801 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1802 SCTP_STAT_INCR(sctps_recvdupdata); 1803 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1804 /* Record a dup for the next outbound sack */ 1805 asoc->dup_tsns[asoc->numduptsns] = tsn; 1806 asoc->numduptsns++; 1807 } 1808 asoc->send_sack = 1; 1809 return (0); 1810 } 1811 /* 1812 * Check to see about the GONE flag, duplicates would cause a sack 1813 * to be sent up above 1814 */ 1815 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1816 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1817 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1818 /* 1819 * wait a minute, this guy is gone, there is no longer a 1820 * receiver. Send peer an ABORT! 1821 */ 1822 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1823 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1824 *abort_flag = 1; 1825 return (0); 1826 } 1827 /* 1828 * Now before going further we see if there is room. If NOT then we 1829 * MAY let one through only IF this TSN is the one we are waiting 1830 * for on a partial delivery API. 1831 */ 1832 1833 /* Is the stream valid? */ 1834 if (sid >= asoc->streamincnt) { 1835 struct sctp_error_invalid_stream *cause; 1836 1837 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1838 0, M_NOWAIT, 1, MT_DATA); 1839 if (op_err != NULL) { 1840 /* add some space up front so prepend will work well */ 1841 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1842 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1843 /* 1844 * Error causes are just param's and this one has 1845 * two back to back phdr, one with the error type 1846 * and size, the other with the streamid and a rsvd 1847 */ 1848 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1849 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1850 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1851 cause->stream_id = htons(sid); 1852 cause->reserved = htons(0); 1853 sctp_queue_op_err(stcb, op_err); 1854 } 1855 SCTP_STAT_INCR(sctps_badsid); 1856 SCTP_TCB_LOCK_ASSERT(stcb); 1857 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1858 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1859 asoc->highest_tsn_inside_nr_map = tsn; 1860 } 1861 if (tsn == (asoc->cumulative_tsn + 1)) { 1862 /* Update cum-ack */ 1863 asoc->cumulative_tsn = tsn; 1864 } 1865 return (0); 1866 } 1867 /* 1868 * If its a fragmented message, lets see if we can find the control 1869 * on the reassembly queues. 1870 */ 1871 if ((chk_type == SCTP_IDATA) && 1872 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1873 (fsn == 0)) { 1874 /* 1875 * The first *must* be fsn 0, and other (middle/end) pieces 1876 * can *not* be fsn 0. XXX: This can happen in case of a 1877 * wrap around. Ignore is for now. 1878 */ 1879 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", 1880 mid, chk_flags); 1881 goto err_out; 1882 } 1883 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1884 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1885 chk_flags, control); 1886 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1887 /* See if we can find the re-assembly entity */ 1888 if (control != NULL) { 1889 /* We found something, does it belong? */ 1890 if (ordered && (mid != control->mid)) { 1891 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1892 err_out: 1893 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1894 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1895 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1896 *abort_flag = 1; 1897 return (0); 1898 } 1899 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1900 /* 1901 * We can't have a switched order with an 1902 * unordered chunk 1903 */ 1904 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1905 tsn); 1906 goto err_out; 1907 } 1908 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1909 /* 1910 * We can't have a switched unordered with a 1911 * ordered chunk 1912 */ 1913 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1914 tsn); 1915 goto err_out; 1916 } 1917 } 1918 } else { 1919 /* 1920 * Its a complete segment. Lets validate we don't have a 1921 * re-assembly going on with the same Stream/Seq (for 1922 * ordered) or in the same Stream for unordered. 1923 */ 1924 if (control != NULL) { 1925 if (ordered || asoc->idata_supported) { 1926 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1927 chk_flags, mid); 1928 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1929 goto err_out; 1930 } else { 1931 if ((tsn == control->fsn_included + 1) && 1932 (control->end_added == 0)) { 1933 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included); 1934 goto err_out; 1935 } else { 1936 control = NULL; 1937 } 1938 } 1939 } 1940 } 1941 /* now do the tests */ 1942 if (((asoc->cnt_on_all_streams + 1943 asoc->cnt_on_reasm_queue + 1944 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1945 (((int)asoc->my_rwnd) <= 0)) { 1946 /* 1947 * When we have NO room in the rwnd we check to make sure 1948 * the reader is doing its job... 1949 */ 1950 if (stcb->sctp_socket->so_rcv.sb_cc) { 1951 /* some to read, wake-up */ 1952 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1953 struct socket *so; 1954 1955 so = SCTP_INP_SO(stcb->sctp_ep); 1956 atomic_add_int(&stcb->asoc.refcnt, 1); 1957 SCTP_TCB_UNLOCK(stcb); 1958 SCTP_SOCKET_LOCK(so, 1); 1959 SCTP_TCB_LOCK(stcb); 1960 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1961 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1962 /* assoc was freed while we were unlocked */ 1963 SCTP_SOCKET_UNLOCK(so, 1); 1964 return (0); 1965 } 1966 #endif 1967 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1968 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1969 SCTP_SOCKET_UNLOCK(so, 1); 1970 #endif 1971 } 1972 /* now is it in the mapping array of what we have accepted? */ 1973 if (chk_type == SCTP_DATA) { 1974 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1975 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1976 /* Nope not in the valid range dump it */ 1977 dump_packet: 1978 sctp_set_rwnd(stcb, asoc); 1979 if ((asoc->cnt_on_all_streams + 1980 asoc->cnt_on_reasm_queue + 1981 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1982 SCTP_STAT_INCR(sctps_datadropchklmt); 1983 } else { 1984 SCTP_STAT_INCR(sctps_datadroprwnd); 1985 } 1986 *break_flag = 1; 1987 return (0); 1988 } 1989 } else { 1990 if (control == NULL) { 1991 goto dump_packet; 1992 } 1993 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1994 goto dump_packet; 1995 } 1996 } 1997 } 1998 #ifdef SCTP_ASOCLOG_OF_TSNS 1999 SCTP_TCB_LOCK_ASSERT(stcb); 2000 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 2001 asoc->tsn_in_at = 0; 2002 asoc->tsn_in_wrapped = 1; 2003 } 2004 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 2005 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 2006 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 2007 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 2008 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 2009 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 2010 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 2011 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 2012 asoc->tsn_in_at++; 2013 #endif 2014 /* 2015 * Before we continue lets validate that we are not being fooled by 2016 * an evil attacker. We can only have Nk chunks based on our TSN 2017 * spread allowed by the mapping array N * 8 bits, so there is no 2018 * way our stream sequence numbers could have wrapped. We of course 2019 * only validate the FIRST fragment so the bit must be set. 2020 */ 2021 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 2022 (TAILQ_EMPTY(&asoc->resetHead)) && 2023 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 2024 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 2025 /* The incoming sseq is behind where we last delivered? */ 2026 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 2027 mid, asoc->strmin[sid].last_mid_delivered); 2028 2029 if (asoc->idata_supported) { 2030 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2031 asoc->strmin[sid].last_mid_delivered, 2032 tsn, 2033 sid, 2034 mid); 2035 } else { 2036 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2037 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2038 tsn, 2039 sid, 2040 (uint16_t)mid); 2041 } 2042 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2043 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 2044 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 2045 *abort_flag = 1; 2046 return (0); 2047 } 2048 if (chk_type == SCTP_IDATA) { 2049 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2050 } else { 2051 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2052 } 2053 if (last_chunk == 0) { 2054 if (chk_type == SCTP_IDATA) { 2055 dmbuf = SCTP_M_COPYM(*m, 2056 (offset + sizeof(struct sctp_idata_chunk)), 2057 the_len, M_NOWAIT); 2058 } else { 2059 dmbuf = SCTP_M_COPYM(*m, 2060 (offset + sizeof(struct sctp_data_chunk)), 2061 the_len, M_NOWAIT); 2062 } 2063 #ifdef SCTP_MBUF_LOGGING 2064 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2065 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2066 } 2067 #endif 2068 } else { 2069 /* We can steal the last chunk */ 2070 int l_len; 2071 2072 dmbuf = *m; 2073 /* lop off the top part */ 2074 if (chk_type == SCTP_IDATA) { 2075 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2076 } else { 2077 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2078 } 2079 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2080 l_len = SCTP_BUF_LEN(dmbuf); 2081 } else { 2082 /* 2083 * need to count up the size hopefully does not hit 2084 * this to often :-0 2085 */ 2086 struct mbuf *lat; 2087 2088 l_len = 0; 2089 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2090 l_len += SCTP_BUF_LEN(lat); 2091 } 2092 } 2093 if (l_len > the_len) { 2094 /* Trim the end round bytes off too */ 2095 m_adj(dmbuf, -(l_len - the_len)); 2096 } 2097 } 2098 if (dmbuf == NULL) { 2099 SCTP_STAT_INCR(sctps_nomem); 2100 return (0); 2101 } 2102 /* 2103 * Now no matter what, we need a control, get one if we don't have 2104 * one (we may have gotten it above when we found the message was 2105 * fragmented 2106 */ 2107 if (control == NULL) { 2108 sctp_alloc_a_readq(stcb, control); 2109 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2110 ppid, 2111 sid, 2112 chk_flags, 2113 NULL, fsn, mid); 2114 if (control == NULL) { 2115 SCTP_STAT_INCR(sctps_nomem); 2116 return (0); 2117 } 2118 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2119 struct mbuf *mm; 2120 2121 control->data = dmbuf; 2122 for (mm = control->data; mm; mm = mm->m_next) { 2123 control->length += SCTP_BUF_LEN(mm); 2124 } 2125 control->tail_mbuf = NULL; 2126 control->end_added = 1; 2127 control->last_frag_seen = 1; 2128 control->first_frag_seen = 1; 2129 control->fsn_included = fsn; 2130 control->top_fsn = fsn; 2131 } 2132 created_control = 1; 2133 } 2134 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2135 chk_flags, ordered, mid, control); 2136 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2137 TAILQ_EMPTY(&asoc->resetHead) && 2138 ((ordered == 0) || 2139 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2140 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2141 /* Candidate for express delivery */ 2142 /* 2143 * Its not fragmented, No PD-API is up, Nothing in the 2144 * delivery queue, Its un-ordered OR ordered and the next to 2145 * deliver AND nothing else is stuck on the stream queue, 2146 * And there is room for it in the socket buffer. Lets just 2147 * stuff it up the buffer.... 2148 */ 2149 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2150 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2151 asoc->highest_tsn_inside_nr_map = tsn; 2152 } 2153 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2154 control, mid); 2155 2156 sctp_add_to_readq(stcb->sctp_ep, stcb, 2157 control, &stcb->sctp_socket->so_rcv, 2158 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2159 2160 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2161 /* for ordered, bump what we delivered */ 2162 asoc->strmin[sid].last_mid_delivered++; 2163 } 2164 SCTP_STAT_INCR(sctps_recvexpress); 2165 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2166 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2167 SCTP_STR_LOG_FROM_EXPRS_DEL); 2168 } 2169 control = NULL; 2170 goto finish_express_del; 2171 } 2172 2173 /* Now will we need a chunk too? */ 2174 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2175 sctp_alloc_a_chunk(stcb, chk); 2176 if (chk == NULL) { 2177 /* No memory so we drop the chunk */ 2178 SCTP_STAT_INCR(sctps_nomem); 2179 if (last_chunk == 0) { 2180 /* we copied it, free the copy */ 2181 sctp_m_freem(dmbuf); 2182 } 2183 return (0); 2184 } 2185 chk->rec.data.tsn = tsn; 2186 chk->no_fr_allowed = 0; 2187 chk->rec.data.fsn = fsn; 2188 chk->rec.data.mid = mid; 2189 chk->rec.data.sid = sid; 2190 chk->rec.data.ppid = ppid; 2191 chk->rec.data.context = stcb->asoc.context; 2192 chk->rec.data.doing_fast_retransmit = 0; 2193 chk->rec.data.rcv_flags = chk_flags; 2194 chk->asoc = asoc; 2195 chk->send_size = the_len; 2196 chk->whoTo = net; 2197 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2198 chk, 2199 control, mid); 2200 atomic_add_int(&net->ref_count, 1); 2201 chk->data = dmbuf; 2202 } 2203 /* Set the appropriate TSN mark */ 2204 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2205 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2206 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2207 asoc->highest_tsn_inside_nr_map = tsn; 2208 } 2209 } else { 2210 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2211 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2212 asoc->highest_tsn_inside_map = tsn; 2213 } 2214 } 2215 /* Now is it complete (i.e. not fragmented)? */ 2216 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2217 /* 2218 * Special check for when streams are resetting. We could be 2219 * more smart about this and check the actual stream to see 2220 * if it is not being reset.. that way we would not create a 2221 * HOLB when amongst streams being reset and those not being 2222 * reset. 2223 * 2224 */ 2225 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2226 SCTP_TSN_GT(tsn, liste->tsn)) { 2227 /* 2228 * yep its past where we need to reset... go ahead 2229 * and queue it. 2230 */ 2231 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2232 /* first one on */ 2233 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2234 } else { 2235 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2236 unsigned char inserted = 0; 2237 2238 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2239 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2240 2241 continue; 2242 } else { 2243 /* found it */ 2244 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2245 inserted = 1; 2246 break; 2247 } 2248 } 2249 if (inserted == 0) { 2250 /* 2251 * must be put at end, use prevP 2252 * (all setup from loop) to setup 2253 * nextP. 2254 */ 2255 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2256 } 2257 } 2258 goto finish_express_del; 2259 } 2260 if (chk_flags & SCTP_DATA_UNORDERED) { 2261 /* queue directly into socket buffer */ 2262 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2263 control, mid); 2264 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2265 sctp_add_to_readq(stcb->sctp_ep, stcb, 2266 control, 2267 &stcb->sctp_socket->so_rcv, 1, 2268 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2269 2270 } else { 2271 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2272 mid); 2273 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2274 if (*abort_flag) { 2275 if (last_chunk) { 2276 *m = NULL; 2277 } 2278 return (0); 2279 } 2280 } 2281 goto finish_express_del; 2282 } 2283 /* If we reach here its a reassembly */ 2284 need_reasm_check = 1; 2285 SCTPDBG(SCTP_DEBUG_XXX, 2286 "Queue data to stream for reasm control: %p MID: %u\n", 2287 control, mid); 2288 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2289 if (*abort_flag) { 2290 /* 2291 * the assoc is now gone and chk was put onto the reasm 2292 * queue, which has all been freed. 2293 */ 2294 if (last_chunk) { 2295 *m = NULL; 2296 } 2297 return (0); 2298 } 2299 finish_express_del: 2300 /* Here we tidy up things */ 2301 if (tsn == (asoc->cumulative_tsn + 1)) { 2302 /* Update cum-ack */ 2303 asoc->cumulative_tsn = tsn; 2304 } 2305 if (last_chunk) { 2306 *m = NULL; 2307 } 2308 if (ordered) { 2309 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2310 } else { 2311 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2312 } 2313 SCTP_STAT_INCR(sctps_recvdata); 2314 /* Set it present please */ 2315 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2316 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2317 } 2318 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2319 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2320 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2321 } 2322 if (need_reasm_check) { 2323 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2324 need_reasm_check = 0; 2325 } 2326 /* check the special flag for stream resets */ 2327 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2328 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2329 /* 2330 * we have finished working through the backlogged TSN's now 2331 * time to reset streams. 1: call reset function. 2: free 2332 * pending_reply space 3: distribute any chunks in 2333 * pending_reply_queue. 2334 */ 2335 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2336 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2337 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2338 SCTP_FREE(liste, SCTP_M_STRESET); 2339 /* sa_ignore FREED_MEMORY */ 2340 liste = TAILQ_FIRST(&asoc->resetHead); 2341 if (TAILQ_EMPTY(&asoc->resetHead)) { 2342 /* All can be removed */ 2343 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2344 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2345 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2346 if (*abort_flag) { 2347 return (0); 2348 } 2349 if (need_reasm_check) { 2350 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2351 need_reasm_check = 0; 2352 } 2353 } 2354 } else { 2355 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2356 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2357 break; 2358 } 2359 /* 2360 * if control->sinfo_tsn is <= liste->tsn we 2361 * can process it which is the NOT of 2362 * control->sinfo_tsn > liste->tsn 2363 */ 2364 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2365 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2366 if (*abort_flag) { 2367 return (0); 2368 } 2369 if (need_reasm_check) { 2370 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2371 need_reasm_check = 0; 2372 } 2373 } 2374 } 2375 } 2376 return (1); 2377 } 2378 2379 static const int8_t sctp_map_lookup_tab[256] = { 2380 0, 1, 0, 2, 0, 1, 0, 3, 2381 0, 1, 0, 2, 0, 1, 0, 4, 2382 0, 1, 0, 2, 0, 1, 0, 3, 2383 0, 1, 0, 2, 0, 1, 0, 5, 2384 0, 1, 0, 2, 0, 1, 0, 3, 2385 0, 1, 0, 2, 0, 1, 0, 4, 2386 0, 1, 0, 2, 0, 1, 0, 3, 2387 0, 1, 0, 2, 0, 1, 0, 6, 2388 0, 1, 0, 2, 0, 1, 0, 3, 2389 0, 1, 0, 2, 0, 1, 0, 4, 2390 0, 1, 0, 2, 0, 1, 0, 3, 2391 0, 1, 0, 2, 0, 1, 0, 5, 2392 0, 1, 0, 2, 0, 1, 0, 3, 2393 0, 1, 0, 2, 0, 1, 0, 4, 2394 0, 1, 0, 2, 0, 1, 0, 3, 2395 0, 1, 0, 2, 0, 1, 0, 7, 2396 0, 1, 0, 2, 0, 1, 0, 3, 2397 0, 1, 0, 2, 0, 1, 0, 4, 2398 0, 1, 0, 2, 0, 1, 0, 3, 2399 0, 1, 0, 2, 0, 1, 0, 5, 2400 0, 1, 0, 2, 0, 1, 0, 3, 2401 0, 1, 0, 2, 0, 1, 0, 4, 2402 0, 1, 0, 2, 0, 1, 0, 3, 2403 0, 1, 0, 2, 0, 1, 0, 6, 2404 0, 1, 0, 2, 0, 1, 0, 3, 2405 0, 1, 0, 2, 0, 1, 0, 4, 2406 0, 1, 0, 2, 0, 1, 0, 3, 2407 0, 1, 0, 2, 0, 1, 0, 5, 2408 0, 1, 0, 2, 0, 1, 0, 3, 2409 0, 1, 0, 2, 0, 1, 0, 4, 2410 0, 1, 0, 2, 0, 1, 0, 3, 2411 0, 1, 0, 2, 0, 1, 0, 8 2412 }; 2413 2414 2415 void 2416 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2417 { 2418 /* 2419 * Now we also need to check the mapping array in a couple of ways. 2420 * 1) Did we move the cum-ack point? 2421 * 2422 * When you first glance at this you might think that all entries 2423 * that make up the position of the cum-ack would be in the 2424 * nr-mapping array only.. i.e. things up to the cum-ack are always 2425 * deliverable. Thats true with one exception, when its a fragmented 2426 * message we may not deliver the data until some threshold (or all 2427 * of it) is in place. So we must OR the nr_mapping_array and 2428 * mapping_array to get a true picture of the cum-ack. 2429 */ 2430 struct sctp_association *asoc; 2431 int at; 2432 uint8_t val; 2433 int slide_from, slide_end, lgap, distance; 2434 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2435 2436 asoc = &stcb->asoc; 2437 2438 old_cumack = asoc->cumulative_tsn; 2439 old_base = asoc->mapping_array_base_tsn; 2440 old_highest = asoc->highest_tsn_inside_map; 2441 /* 2442 * We could probably improve this a small bit by calculating the 2443 * offset of the current cum-ack as the starting point. 2444 */ 2445 at = 0; 2446 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2447 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2448 if (val == 0xff) { 2449 at += 8; 2450 } else { 2451 /* there is a 0 bit */ 2452 at += sctp_map_lookup_tab[val]; 2453 break; 2454 } 2455 } 2456 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2457 2458 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2459 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2460 #ifdef INVARIANTS 2461 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2462 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2463 #else 2464 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2465 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2466 sctp_print_mapping_array(asoc); 2467 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2468 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2469 } 2470 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2471 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2472 #endif 2473 } 2474 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2475 highest_tsn = asoc->highest_tsn_inside_nr_map; 2476 } else { 2477 highest_tsn = asoc->highest_tsn_inside_map; 2478 } 2479 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2480 /* The complete array was completed by a single FR */ 2481 /* highest becomes the cum-ack */ 2482 int clr; 2483 #ifdef INVARIANTS 2484 unsigned int i; 2485 #endif 2486 2487 /* clear the array */ 2488 clr = ((at + 7) >> 3); 2489 if (clr > asoc->mapping_array_size) { 2490 clr = asoc->mapping_array_size; 2491 } 2492 memset(asoc->mapping_array, 0, clr); 2493 memset(asoc->nr_mapping_array, 0, clr); 2494 #ifdef INVARIANTS 2495 for (i = 0; i < asoc->mapping_array_size; i++) { 2496 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2497 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2498 sctp_print_mapping_array(asoc); 2499 } 2500 } 2501 #endif 2502 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2503 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2504 } else if (at >= 8) { 2505 /* we can slide the mapping array down */ 2506 /* slide_from holds where we hit the first NON 0xff byte */ 2507 2508 /* 2509 * now calculate the ceiling of the move using our highest 2510 * TSN value 2511 */ 2512 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2513 slide_end = (lgap >> 3); 2514 if (slide_end < slide_from) { 2515 sctp_print_mapping_array(asoc); 2516 #ifdef INVARIANTS 2517 panic("impossible slide"); 2518 #else 2519 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2520 lgap, slide_end, slide_from, at); 2521 return; 2522 #endif 2523 } 2524 if (slide_end > asoc->mapping_array_size) { 2525 #ifdef INVARIANTS 2526 panic("would overrun buffer"); 2527 #else 2528 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2529 asoc->mapping_array_size, slide_end); 2530 slide_end = asoc->mapping_array_size; 2531 #endif 2532 } 2533 distance = (slide_end - slide_from) + 1; 2534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2535 sctp_log_map(old_base, old_cumack, old_highest, 2536 SCTP_MAP_PREPARE_SLIDE); 2537 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2538 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2539 } 2540 if (distance + slide_from > asoc->mapping_array_size || 2541 distance < 0) { 2542 /* 2543 * Here we do NOT slide forward the array so that 2544 * hopefully when more data comes in to fill it up 2545 * we will be able to slide it forward. Really I 2546 * don't think this should happen :-0 2547 */ 2548 2549 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2550 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2551 (uint32_t)asoc->mapping_array_size, 2552 SCTP_MAP_SLIDE_NONE); 2553 } 2554 } else { 2555 int ii; 2556 2557 for (ii = 0; ii < distance; ii++) { 2558 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2559 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2560 2561 } 2562 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2563 asoc->mapping_array[ii] = 0; 2564 asoc->nr_mapping_array[ii] = 0; 2565 } 2566 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2567 asoc->highest_tsn_inside_map += (slide_from << 3); 2568 } 2569 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2570 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2571 } 2572 asoc->mapping_array_base_tsn += (slide_from << 3); 2573 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2574 sctp_log_map(asoc->mapping_array_base_tsn, 2575 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2576 SCTP_MAP_SLIDE_RESULT); 2577 } 2578 } 2579 } 2580 } 2581 2582 void 2583 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2584 { 2585 struct sctp_association *asoc; 2586 uint32_t highest_tsn; 2587 int is_a_gap; 2588 2589 sctp_slide_mapping_arrays(stcb); 2590 asoc = &stcb->asoc; 2591 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2592 highest_tsn = asoc->highest_tsn_inside_nr_map; 2593 } else { 2594 highest_tsn = asoc->highest_tsn_inside_map; 2595 } 2596 /* Is there a gap now? */ 2597 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2598 2599 /* 2600 * Now we need to see if we need to queue a sack or just start the 2601 * timer (if allowed). 2602 */ 2603 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2604 /* 2605 * Ok special case, in SHUTDOWN-SENT case. here we maker 2606 * sure SACK timer is off and instead send a SHUTDOWN and a 2607 * SACK 2608 */ 2609 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2610 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2611 stcb->sctp_ep, stcb, NULL, 2612 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2613 } 2614 sctp_send_shutdown(stcb, 2615 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2616 if (is_a_gap) { 2617 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2618 } 2619 } else { 2620 /* 2621 * CMT DAC algorithm: increase number of packets received 2622 * since last ack 2623 */ 2624 stcb->asoc.cmt_dac_pkts_rcvd++; 2625 2626 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2627 * SACK */ 2628 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2629 * longer is one */ 2630 (stcb->asoc.numduptsns) || /* we have dup's */ 2631 (is_a_gap) || /* is still a gap */ 2632 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2633 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2634 ) { 2635 2636 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2637 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2638 (stcb->asoc.send_sack == 0) && 2639 (stcb->asoc.numduptsns == 0) && 2640 (stcb->asoc.delayed_ack) && 2641 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2642 2643 /* 2644 * CMT DAC algorithm: With CMT, delay acks 2645 * even in the face of 2646 * 2647 * reordering. Therefore, if acks that do 2648 * not have to be sent because of the above 2649 * reasons, will be delayed. That is, acks 2650 * that would have been sent due to gap 2651 * reports will be delayed with DAC. Start 2652 * the delayed ack timer. 2653 */ 2654 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2655 stcb->sctp_ep, stcb, NULL); 2656 } else { 2657 /* 2658 * Ok we must build a SACK since the timer 2659 * is pending, we got our first packet OR 2660 * there are gaps or duplicates. 2661 */ 2662 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2663 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2664 } 2665 } else { 2666 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2667 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2668 stcb->sctp_ep, stcb, NULL); 2669 } 2670 } 2671 } 2672 } 2673 2674 int 2675 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2676 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2677 struct sctp_nets *net, uint32_t *high_tsn) 2678 { 2679 struct sctp_chunkhdr *ch, chunk_buf; 2680 struct sctp_association *asoc; 2681 int num_chunks = 0; /* number of control chunks processed */ 2682 int stop_proc = 0; 2683 int break_flag, last_chunk; 2684 int abort_flag = 0, was_a_gap; 2685 struct mbuf *m; 2686 uint32_t highest_tsn; 2687 uint16_t chk_length; 2688 2689 /* set the rwnd */ 2690 sctp_set_rwnd(stcb, &stcb->asoc); 2691 2692 m = *mm; 2693 SCTP_TCB_LOCK_ASSERT(stcb); 2694 asoc = &stcb->asoc; 2695 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2696 highest_tsn = asoc->highest_tsn_inside_nr_map; 2697 } else { 2698 highest_tsn = asoc->highest_tsn_inside_map; 2699 } 2700 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2701 /* 2702 * setup where we got the last DATA packet from for any SACK that 2703 * may need to go out. Don't bump the net. This is done ONLY when a 2704 * chunk is assigned. 2705 */ 2706 asoc->last_data_chunk_from = net; 2707 2708 /*- 2709 * Now before we proceed we must figure out if this is a wasted 2710 * cluster... i.e. it is a small packet sent in and yet the driver 2711 * underneath allocated a full cluster for it. If so we must copy it 2712 * to a smaller mbuf and free up the cluster mbuf. This will help 2713 * with cluster starvation. Note for __Panda__ we don't do this 2714 * since it has clusters all the way down to 64 bytes. 2715 */ 2716 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2717 /* we only handle mbufs that are singletons.. not chains */ 2718 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2719 if (m) { 2720 /* ok lets see if we can copy the data up */ 2721 caddr_t *from, *to; 2722 2723 /* get the pointers and copy */ 2724 to = mtod(m, caddr_t *); 2725 from = mtod((*mm), caddr_t *); 2726 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2727 /* copy the length and free up the old */ 2728 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2729 sctp_m_freem(*mm); 2730 /* success, back copy */ 2731 *mm = m; 2732 } else { 2733 /* We are in trouble in the mbuf world .. yikes */ 2734 m = *mm; 2735 } 2736 } 2737 /* get pointer to the first chunk header */ 2738 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2739 sizeof(struct sctp_chunkhdr), 2740 (uint8_t *)&chunk_buf); 2741 if (ch == NULL) { 2742 return (1); 2743 } 2744 /* 2745 * process all DATA chunks... 2746 */ 2747 *high_tsn = asoc->cumulative_tsn; 2748 break_flag = 0; 2749 asoc->data_pkts_seen++; 2750 while (stop_proc == 0) { 2751 /* validate chunk length */ 2752 chk_length = ntohs(ch->chunk_length); 2753 if (length - *offset < chk_length) { 2754 /* all done, mutulated chunk */ 2755 stop_proc = 1; 2756 continue; 2757 } 2758 if ((asoc->idata_supported == 1) && 2759 (ch->chunk_type == SCTP_DATA)) { 2760 struct mbuf *op_err; 2761 char msg[SCTP_DIAG_INFO_LEN]; 2762 2763 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2764 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2765 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2766 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2767 return (2); 2768 } 2769 if ((asoc->idata_supported == 0) && 2770 (ch->chunk_type == SCTP_IDATA)) { 2771 struct mbuf *op_err; 2772 char msg[SCTP_DIAG_INFO_LEN]; 2773 2774 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2775 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2776 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2777 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2778 return (2); 2779 } 2780 if ((ch->chunk_type == SCTP_DATA) || 2781 (ch->chunk_type == SCTP_IDATA)) { 2782 uint16_t clen; 2783 2784 if (ch->chunk_type == SCTP_DATA) { 2785 clen = sizeof(struct sctp_data_chunk); 2786 } else { 2787 clen = sizeof(struct sctp_idata_chunk); 2788 } 2789 if (chk_length < clen) { 2790 /* 2791 * Need to send an abort since we had a 2792 * invalid data chunk. 2793 */ 2794 struct mbuf *op_err; 2795 char msg[SCTP_DIAG_INFO_LEN]; 2796 2797 snprintf(msg, sizeof(msg), "%s chunk of length %u", 2798 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2799 chk_length); 2800 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2801 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2802 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2803 return (2); 2804 } 2805 #ifdef SCTP_AUDITING_ENABLED 2806 sctp_audit_log(0xB1, 0); 2807 #endif 2808 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2809 last_chunk = 1; 2810 } else { 2811 last_chunk = 0; 2812 } 2813 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2814 chk_length, net, high_tsn, &abort_flag, &break_flag, 2815 last_chunk, ch->chunk_type)) { 2816 num_chunks++; 2817 } 2818 if (abort_flag) 2819 return (2); 2820 2821 if (break_flag) { 2822 /* 2823 * Set because of out of rwnd space and no 2824 * drop rep space left. 2825 */ 2826 stop_proc = 1; 2827 continue; 2828 } 2829 } else { 2830 /* not a data chunk in the data region */ 2831 switch (ch->chunk_type) { 2832 case SCTP_INITIATION: 2833 case SCTP_INITIATION_ACK: 2834 case SCTP_SELECTIVE_ACK: 2835 case SCTP_NR_SELECTIVE_ACK: 2836 case SCTP_HEARTBEAT_REQUEST: 2837 case SCTP_HEARTBEAT_ACK: 2838 case SCTP_ABORT_ASSOCIATION: 2839 case SCTP_SHUTDOWN: 2840 case SCTP_SHUTDOWN_ACK: 2841 case SCTP_OPERATION_ERROR: 2842 case SCTP_COOKIE_ECHO: 2843 case SCTP_COOKIE_ACK: 2844 case SCTP_ECN_ECHO: 2845 case SCTP_ECN_CWR: 2846 case SCTP_SHUTDOWN_COMPLETE: 2847 case SCTP_AUTHENTICATION: 2848 case SCTP_ASCONF_ACK: 2849 case SCTP_PACKET_DROPPED: 2850 case SCTP_STREAM_RESET: 2851 case SCTP_FORWARD_CUM_TSN: 2852 case SCTP_ASCONF: 2853 { 2854 /* 2855 * Now, what do we do with KNOWN 2856 * chunks that are NOT in the right 2857 * place? 2858 * 2859 * For now, I do nothing but ignore 2860 * them. We may later want to add 2861 * sysctl stuff to switch out and do 2862 * either an ABORT() or possibly 2863 * process them. 2864 */ 2865 struct mbuf *op_err; 2866 char msg[SCTP_DIAG_INFO_LEN]; 2867 2868 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2869 ch->chunk_type); 2870 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2871 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2872 return (2); 2873 } 2874 default: 2875 /* 2876 * Unknown chunk type: use bit rules after 2877 * checking length 2878 */ 2879 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2880 /* 2881 * Need to send an abort since we 2882 * had a invalid chunk. 2883 */ 2884 struct mbuf *op_err; 2885 char msg[SCTP_DIAG_INFO_LEN]; 2886 2887 snprintf(msg, sizeof(msg), "Chunk of length %u", 2888 chk_length); 2889 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2890 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2891 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2892 return (2); 2893 } 2894 if (ch->chunk_type & 0x40) { 2895 /* Add a error report to the queue */ 2896 struct mbuf *op_err; 2897 struct sctp_gen_error_cause *cause; 2898 2899 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2900 0, M_NOWAIT, 1, MT_DATA); 2901 if (op_err != NULL) { 2902 cause = mtod(op_err, struct sctp_gen_error_cause *); 2903 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2904 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2905 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2906 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2907 if (SCTP_BUF_NEXT(op_err) != NULL) { 2908 sctp_queue_op_err(stcb, op_err); 2909 } else { 2910 sctp_m_freem(op_err); 2911 } 2912 } 2913 } 2914 if ((ch->chunk_type & 0x80) == 0) { 2915 /* discard the rest of this packet */ 2916 stop_proc = 1; 2917 } /* else skip this bad chunk and 2918 * continue... */ 2919 break; 2920 } /* switch of chunk type */ 2921 } 2922 *offset += SCTP_SIZE32(chk_length); 2923 if ((*offset >= length) || stop_proc) { 2924 /* no more data left in the mbuf chain */ 2925 stop_proc = 1; 2926 continue; 2927 } 2928 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2929 sizeof(struct sctp_chunkhdr), 2930 (uint8_t *)&chunk_buf); 2931 if (ch == NULL) { 2932 *offset = length; 2933 stop_proc = 1; 2934 continue; 2935 } 2936 } 2937 if (break_flag) { 2938 /* 2939 * we need to report rwnd overrun drops. 2940 */ 2941 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2942 } 2943 if (num_chunks) { 2944 /* 2945 * Did we get data, if so update the time for auto-close and 2946 * give peer credit for being alive. 2947 */ 2948 SCTP_STAT_INCR(sctps_recvpktwithdata); 2949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2950 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2951 stcb->asoc.overall_error_count, 2952 0, 2953 SCTP_FROM_SCTP_INDATA, 2954 __LINE__); 2955 } 2956 stcb->asoc.overall_error_count = 0; 2957 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2958 } 2959 /* now service all of the reassm queue if needed */ 2960 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) { 2961 /* Assure that we ack right away */ 2962 stcb->asoc.send_sack = 1; 2963 } 2964 /* Start a sack timer or QUEUE a SACK for sending */ 2965 sctp_sack_check(stcb, was_a_gap); 2966 return (0); 2967 } 2968 2969 static int 2970 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2971 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2972 int *num_frs, 2973 uint32_t *biggest_newly_acked_tsn, 2974 uint32_t *this_sack_lowest_newack, 2975 int *rto_ok) 2976 { 2977 struct sctp_tmit_chunk *tp1; 2978 unsigned int theTSN; 2979 int j, wake_him = 0, circled = 0; 2980 2981 /* Recover the tp1 we last saw */ 2982 tp1 = *p_tp1; 2983 if (tp1 == NULL) { 2984 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2985 } 2986 for (j = frag_strt; j <= frag_end; j++) { 2987 theTSN = j + last_tsn; 2988 while (tp1) { 2989 if (tp1->rec.data.doing_fast_retransmit) 2990 (*num_frs) += 1; 2991 2992 /*- 2993 * CMT: CUCv2 algorithm. For each TSN being 2994 * processed from the sent queue, track the 2995 * next expected pseudo-cumack, or 2996 * rtx_pseudo_cumack, if required. Separate 2997 * cumack trackers for first transmissions, 2998 * and retransmissions. 2999 */ 3000 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3001 (tp1->whoTo->find_pseudo_cumack == 1) && 3002 (tp1->snd_count == 1)) { 3003 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 3004 tp1->whoTo->find_pseudo_cumack = 0; 3005 } 3006 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3007 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 3008 (tp1->snd_count > 1)) { 3009 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 3010 tp1->whoTo->find_rtx_pseudo_cumack = 0; 3011 } 3012 if (tp1->rec.data.tsn == theTSN) { 3013 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 3014 /*- 3015 * must be held until 3016 * cum-ack passes 3017 */ 3018 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3019 /*- 3020 * If it is less than RESEND, it is 3021 * now no-longer in flight. 3022 * Higher values may already be set 3023 * via previous Gap Ack Blocks... 3024 * i.e. ACKED or RESEND. 3025 */ 3026 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3027 *biggest_newly_acked_tsn)) { 3028 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 3029 } 3030 /*- 3031 * CMT: SFR algo (and HTNA) - set 3032 * saw_newack to 1 for dest being 3033 * newly acked. update 3034 * this_sack_highest_newack if 3035 * appropriate. 3036 */ 3037 if (tp1->rec.data.chunk_was_revoked == 0) 3038 tp1->whoTo->saw_newack = 1; 3039 3040 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3041 tp1->whoTo->this_sack_highest_newack)) { 3042 tp1->whoTo->this_sack_highest_newack = 3043 tp1->rec.data.tsn; 3044 } 3045 /*- 3046 * CMT DAC algo: also update 3047 * this_sack_lowest_newack 3048 */ 3049 if (*this_sack_lowest_newack == 0) { 3050 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3051 sctp_log_sack(*this_sack_lowest_newack, 3052 last_tsn, 3053 tp1->rec.data.tsn, 3054 0, 3055 0, 3056 SCTP_LOG_TSN_ACKED); 3057 } 3058 *this_sack_lowest_newack = tp1->rec.data.tsn; 3059 } 3060 /*- 3061 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3062 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3063 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3064 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3065 * Separate pseudo_cumack trackers for first transmissions and 3066 * retransmissions. 3067 */ 3068 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3069 if (tp1->rec.data.chunk_was_revoked == 0) { 3070 tp1->whoTo->new_pseudo_cumack = 1; 3071 } 3072 tp1->whoTo->find_pseudo_cumack = 1; 3073 } 3074 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3075 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3076 } 3077 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3078 if (tp1->rec.data.chunk_was_revoked == 0) { 3079 tp1->whoTo->new_pseudo_cumack = 1; 3080 } 3081 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3082 } 3083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3084 sctp_log_sack(*biggest_newly_acked_tsn, 3085 last_tsn, 3086 tp1->rec.data.tsn, 3087 frag_strt, 3088 frag_end, 3089 SCTP_LOG_TSN_ACKED); 3090 } 3091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3092 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3093 tp1->whoTo->flight_size, 3094 tp1->book_size, 3095 (uint32_t)(uintptr_t)tp1->whoTo, 3096 tp1->rec.data.tsn); 3097 } 3098 sctp_flight_size_decrease(tp1); 3099 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3100 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3101 tp1); 3102 } 3103 sctp_total_flight_decrease(stcb, tp1); 3104 3105 tp1->whoTo->net_ack += tp1->send_size; 3106 if (tp1->snd_count < 2) { 3107 /*- 3108 * True non-retransmitted chunk 3109 */ 3110 tp1->whoTo->net_ack2 += tp1->send_size; 3111 3112 /*- 3113 * update RTO too ? 3114 */ 3115 if (tp1->do_rtt) { 3116 if (*rto_ok && 3117 sctp_calculate_rto(stcb, 3118 &stcb->asoc, 3119 tp1->whoTo, 3120 &tp1->sent_rcv_time, 3121 SCTP_RTT_FROM_DATA)) { 3122 *rto_ok = 0; 3123 } 3124 if (tp1->whoTo->rto_needed == 0) { 3125 tp1->whoTo->rto_needed = 1; 3126 } 3127 tp1->do_rtt = 0; 3128 } 3129 } 3130 3131 } 3132 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3133 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3134 stcb->asoc.this_sack_highest_gap)) { 3135 stcb->asoc.this_sack_highest_gap = 3136 tp1->rec.data.tsn; 3137 } 3138 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3139 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3140 #ifdef SCTP_AUDITING_ENABLED 3141 sctp_audit_log(0xB2, 3142 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3143 #endif 3144 } 3145 } 3146 /*- 3147 * All chunks NOT UNSENT fall through here and are marked 3148 * (leave PR-SCTP ones that are to skip alone though) 3149 */ 3150 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3151 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3152 tp1->sent = SCTP_DATAGRAM_MARKED; 3153 } 3154 if (tp1->rec.data.chunk_was_revoked) { 3155 /* deflate the cwnd */ 3156 tp1->whoTo->cwnd -= tp1->book_size; 3157 tp1->rec.data.chunk_was_revoked = 0; 3158 } 3159 /* NR Sack code here */ 3160 if (nr_sacking && 3161 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3162 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3163 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3164 #ifdef INVARIANTS 3165 } else { 3166 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3167 #endif 3168 } 3169 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3170 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3171 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3172 stcb->asoc.trigger_reset = 1; 3173 } 3174 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3175 if (tp1->data) { 3176 /* 3177 * sa_ignore 3178 * NO_NULL_CHK 3179 */ 3180 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3181 sctp_m_freem(tp1->data); 3182 tp1->data = NULL; 3183 } 3184 wake_him++; 3185 } 3186 } 3187 break; 3188 } /* if (tp1->tsn == theTSN) */ 3189 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3190 break; 3191 } 3192 tp1 = TAILQ_NEXT(tp1, sctp_next); 3193 if ((tp1 == NULL) && (circled == 0)) { 3194 circled++; 3195 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3196 } 3197 } /* end while (tp1) */ 3198 if (tp1 == NULL) { 3199 circled = 0; 3200 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3201 } 3202 /* In case the fragments were not in order we must reset */ 3203 } /* end for (j = fragStart */ 3204 *p_tp1 = tp1; 3205 return (wake_him); /* Return value only used for nr-sack */ 3206 } 3207 3208 3209 static int 3210 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3211 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3212 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3213 int num_seg, int num_nr_seg, int *rto_ok) 3214 { 3215 struct sctp_gap_ack_block *frag, block; 3216 struct sctp_tmit_chunk *tp1; 3217 int i; 3218 int num_frs = 0; 3219 int chunk_freed; 3220 int non_revocable; 3221 uint16_t frag_strt, frag_end, prev_frag_end; 3222 3223 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3224 prev_frag_end = 0; 3225 chunk_freed = 0; 3226 3227 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3228 if (i == num_seg) { 3229 prev_frag_end = 0; 3230 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3231 } 3232 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3233 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3234 *offset += sizeof(block); 3235 if (frag == NULL) { 3236 return (chunk_freed); 3237 } 3238 frag_strt = ntohs(frag->start); 3239 frag_end = ntohs(frag->end); 3240 3241 if (frag_strt > frag_end) { 3242 /* This gap report is malformed, skip it. */ 3243 continue; 3244 } 3245 if (frag_strt <= prev_frag_end) { 3246 /* This gap report is not in order, so restart. */ 3247 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3248 } 3249 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3250 *biggest_tsn_acked = last_tsn + frag_end; 3251 } 3252 if (i < num_seg) { 3253 non_revocable = 0; 3254 } else { 3255 non_revocable = 1; 3256 } 3257 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3258 non_revocable, &num_frs, biggest_newly_acked_tsn, 3259 this_sack_lowest_newack, rto_ok)) { 3260 chunk_freed = 1; 3261 } 3262 prev_frag_end = frag_end; 3263 } 3264 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3265 if (num_frs) 3266 sctp_log_fr(*biggest_tsn_acked, 3267 *biggest_newly_acked_tsn, 3268 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3269 } 3270 return (chunk_freed); 3271 } 3272 3273 static void 3274 sctp_check_for_revoked(struct sctp_tcb *stcb, 3275 struct sctp_association *asoc, uint32_t cumack, 3276 uint32_t biggest_tsn_acked) 3277 { 3278 struct sctp_tmit_chunk *tp1; 3279 3280 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3281 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3282 /* 3283 * ok this guy is either ACK or MARKED. If it is 3284 * ACKED it has been previously acked but not this 3285 * time i.e. revoked. If it is MARKED it was ACK'ed 3286 * again. 3287 */ 3288 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3289 break; 3290 } 3291 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3292 /* it has been revoked */ 3293 tp1->sent = SCTP_DATAGRAM_SENT; 3294 tp1->rec.data.chunk_was_revoked = 1; 3295 /* 3296 * We must add this stuff back in to assure 3297 * timers and such get started. 3298 */ 3299 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3300 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3301 tp1->whoTo->flight_size, 3302 tp1->book_size, 3303 (uint32_t)(uintptr_t)tp1->whoTo, 3304 tp1->rec.data.tsn); 3305 } 3306 sctp_flight_size_increase(tp1); 3307 sctp_total_flight_increase(stcb, tp1); 3308 /* 3309 * We inflate the cwnd to compensate for our 3310 * artificial inflation of the flight_size. 3311 */ 3312 tp1->whoTo->cwnd += tp1->book_size; 3313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3314 sctp_log_sack(asoc->last_acked_seq, 3315 cumack, 3316 tp1->rec.data.tsn, 3317 0, 3318 0, 3319 SCTP_LOG_TSN_REVOKED); 3320 } 3321 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3322 /* it has been re-acked in this SACK */ 3323 tp1->sent = SCTP_DATAGRAM_ACKED; 3324 } 3325 } 3326 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3327 break; 3328 } 3329 } 3330 3331 3332 static void 3333 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3334 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3335 { 3336 struct sctp_tmit_chunk *tp1; 3337 int strike_flag = 0; 3338 struct timeval now; 3339 int tot_retrans = 0; 3340 uint32_t sending_seq; 3341 struct sctp_nets *net; 3342 int num_dests_sacked = 0; 3343 3344 /* 3345 * select the sending_seq, this is either the next thing ready to be 3346 * sent but not transmitted, OR, the next seq we assign. 3347 */ 3348 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3349 if (tp1 == NULL) { 3350 sending_seq = asoc->sending_seq; 3351 } else { 3352 sending_seq = tp1->rec.data.tsn; 3353 } 3354 3355 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3356 if ((asoc->sctp_cmt_on_off > 0) && 3357 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3358 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3359 if (net->saw_newack) 3360 num_dests_sacked++; 3361 } 3362 } 3363 if (stcb->asoc.prsctp_supported) { 3364 (void)SCTP_GETTIME_TIMEVAL(&now); 3365 } 3366 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3367 strike_flag = 0; 3368 if (tp1->no_fr_allowed) { 3369 /* this one had a timeout or something */ 3370 continue; 3371 } 3372 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3373 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3374 sctp_log_fr(biggest_tsn_newly_acked, 3375 tp1->rec.data.tsn, 3376 tp1->sent, 3377 SCTP_FR_LOG_CHECK_STRIKE); 3378 } 3379 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3380 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3381 /* done */ 3382 break; 3383 } 3384 if (stcb->asoc.prsctp_supported) { 3385 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3386 /* Is it expired? */ 3387 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3388 /* Yes so drop it */ 3389 if (tp1->data != NULL) { 3390 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3391 SCTP_SO_NOT_LOCKED); 3392 } 3393 continue; 3394 } 3395 } 3396 3397 } 3398 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3399 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3400 /* we are beyond the tsn in the sack */ 3401 break; 3402 } 3403 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3404 /* either a RESEND, ACKED, or MARKED */ 3405 /* skip */ 3406 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3407 /* Continue strikin FWD-TSN chunks */ 3408 tp1->rec.data.fwd_tsn_cnt++; 3409 } 3410 continue; 3411 } 3412 /* 3413 * CMT : SFR algo (covers part of DAC and HTNA as well) 3414 */ 3415 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3416 /* 3417 * No new acks were receieved for data sent to this 3418 * dest. Therefore, according to the SFR algo for 3419 * CMT, no data sent to this dest can be marked for 3420 * FR using this SACK. 3421 */ 3422 continue; 3423 } else if (tp1->whoTo && 3424 SCTP_TSN_GT(tp1->rec.data.tsn, 3425 tp1->whoTo->this_sack_highest_newack) && 3426 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3427 /* 3428 * CMT: New acks were receieved for data sent to 3429 * this dest. But no new acks were seen for data 3430 * sent after tp1. Therefore, according to the SFR 3431 * algo for CMT, tp1 cannot be marked for FR using 3432 * this SACK. This step covers part of the DAC algo 3433 * and the HTNA algo as well. 3434 */ 3435 continue; 3436 } 3437 /* 3438 * Here we check to see if we were have already done a FR 3439 * and if so we see if the biggest TSN we saw in the sack is 3440 * smaller than the recovery point. If so we don't strike 3441 * the tsn... otherwise we CAN strike the TSN. 3442 */ 3443 /* 3444 * @@@ JRI: Check for CMT if (accum_moved && 3445 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3446 * 0)) { 3447 */ 3448 if (accum_moved && asoc->fast_retran_loss_recovery) { 3449 /* 3450 * Strike the TSN if in fast-recovery and cum-ack 3451 * moved. 3452 */ 3453 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3454 sctp_log_fr(biggest_tsn_newly_acked, 3455 tp1->rec.data.tsn, 3456 tp1->sent, 3457 SCTP_FR_LOG_STRIKE_CHUNK); 3458 } 3459 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3460 tp1->sent++; 3461 } 3462 if ((asoc->sctp_cmt_on_off > 0) && 3463 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3464 /* 3465 * CMT DAC algorithm: If SACK flag is set to 3466 * 0, then lowest_newack test will not pass 3467 * because it would have been set to the 3468 * cumack earlier. If not already to be 3469 * rtx'd, If not a mixed sack and if tp1 is 3470 * not between two sacked TSNs, then mark by 3471 * one more. NOTE that we are marking by one 3472 * additional time since the SACK DAC flag 3473 * indicates that two packets have been 3474 * received after this missing TSN. 3475 */ 3476 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3477 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3478 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3479 sctp_log_fr(16 + num_dests_sacked, 3480 tp1->rec.data.tsn, 3481 tp1->sent, 3482 SCTP_FR_LOG_STRIKE_CHUNK); 3483 } 3484 tp1->sent++; 3485 } 3486 } 3487 } else if ((tp1->rec.data.doing_fast_retransmit) && 3488 (asoc->sctp_cmt_on_off == 0)) { 3489 /* 3490 * For those that have done a FR we must take 3491 * special consideration if we strike. I.e the 3492 * biggest_newly_acked must be higher than the 3493 * sending_seq at the time we did the FR. 3494 */ 3495 if ( 3496 #ifdef SCTP_FR_TO_ALTERNATE 3497 /* 3498 * If FR's go to new networks, then we must only do 3499 * this for singly homed asoc's. However if the FR's 3500 * go to the same network (Armando's work) then its 3501 * ok to FR multiple times. 3502 */ 3503 (asoc->numnets < 2) 3504 #else 3505 (1) 3506 #endif 3507 ) { 3508 3509 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3510 tp1->rec.data.fast_retran_tsn)) { 3511 /* 3512 * Strike the TSN, since this ack is 3513 * beyond where things were when we 3514 * did a FR. 3515 */ 3516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3517 sctp_log_fr(biggest_tsn_newly_acked, 3518 tp1->rec.data.tsn, 3519 tp1->sent, 3520 SCTP_FR_LOG_STRIKE_CHUNK); 3521 } 3522 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3523 tp1->sent++; 3524 } 3525 strike_flag = 1; 3526 if ((asoc->sctp_cmt_on_off > 0) && 3527 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3528 /* 3529 * CMT DAC algorithm: If 3530 * SACK flag is set to 0, 3531 * then lowest_newack test 3532 * will not pass because it 3533 * would have been set to 3534 * the cumack earlier. If 3535 * not already to be rtx'd, 3536 * If not a mixed sack and 3537 * if tp1 is not between two 3538 * sacked TSNs, then mark by 3539 * one more. NOTE that we 3540 * are marking by one 3541 * additional time since the 3542 * SACK DAC flag indicates 3543 * that two packets have 3544 * been received after this 3545 * missing TSN. 3546 */ 3547 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3548 (num_dests_sacked == 1) && 3549 SCTP_TSN_GT(this_sack_lowest_newack, 3550 tp1->rec.data.tsn)) { 3551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3552 sctp_log_fr(32 + num_dests_sacked, 3553 tp1->rec.data.tsn, 3554 tp1->sent, 3555 SCTP_FR_LOG_STRIKE_CHUNK); 3556 } 3557 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3558 tp1->sent++; 3559 } 3560 } 3561 } 3562 } 3563 } 3564 /* 3565 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3566 * algo covers HTNA. 3567 */ 3568 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3569 biggest_tsn_newly_acked)) { 3570 /* 3571 * We don't strike these: This is the HTNA 3572 * algorithm i.e. we don't strike If our TSN is 3573 * larger than the Highest TSN Newly Acked. 3574 */ 3575 ; 3576 } else { 3577 /* Strike the TSN */ 3578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3579 sctp_log_fr(biggest_tsn_newly_acked, 3580 tp1->rec.data.tsn, 3581 tp1->sent, 3582 SCTP_FR_LOG_STRIKE_CHUNK); 3583 } 3584 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3585 tp1->sent++; 3586 } 3587 if ((asoc->sctp_cmt_on_off > 0) && 3588 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3589 /* 3590 * CMT DAC algorithm: If SACK flag is set to 3591 * 0, then lowest_newack test will not pass 3592 * because it would have been set to the 3593 * cumack earlier. If not already to be 3594 * rtx'd, If not a mixed sack and if tp1 is 3595 * not between two sacked TSNs, then mark by 3596 * one more. NOTE that we are marking by one 3597 * additional time since the SACK DAC flag 3598 * indicates that two packets have been 3599 * received after this missing TSN. 3600 */ 3601 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3602 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3603 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3604 sctp_log_fr(48 + num_dests_sacked, 3605 tp1->rec.data.tsn, 3606 tp1->sent, 3607 SCTP_FR_LOG_STRIKE_CHUNK); 3608 } 3609 tp1->sent++; 3610 } 3611 } 3612 } 3613 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3614 struct sctp_nets *alt; 3615 3616 /* fix counts and things */ 3617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3618 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3619 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3620 tp1->book_size, 3621 (uint32_t)(uintptr_t)tp1->whoTo, 3622 tp1->rec.data.tsn); 3623 } 3624 if (tp1->whoTo) { 3625 tp1->whoTo->net_ack++; 3626 sctp_flight_size_decrease(tp1); 3627 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3628 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3629 tp1); 3630 } 3631 } 3632 3633 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3634 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3635 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3636 } 3637 /* add back to the rwnd */ 3638 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3639 3640 /* remove from the total flight */ 3641 sctp_total_flight_decrease(stcb, tp1); 3642 3643 if ((stcb->asoc.prsctp_supported) && 3644 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3645 /* 3646 * Has it been retransmitted tv_sec times? - 3647 * we store the retran count there. 3648 */ 3649 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3650 /* Yes, so drop it */ 3651 if (tp1->data != NULL) { 3652 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3653 SCTP_SO_NOT_LOCKED); 3654 } 3655 /* Make sure to flag we had a FR */ 3656 if (tp1->whoTo != NULL) { 3657 tp1->whoTo->net_ack++; 3658 } 3659 continue; 3660 } 3661 } 3662 /* 3663 * SCTP_PRINTF("OK, we are now ready to FR this 3664 * guy\n"); 3665 */ 3666 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3667 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3668 0, SCTP_FR_MARKED); 3669 } 3670 if (strike_flag) { 3671 /* This is a subsequent FR */ 3672 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3673 } 3674 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3675 if (asoc->sctp_cmt_on_off > 0) { 3676 /* 3677 * CMT: Using RTX_SSTHRESH policy for CMT. 3678 * If CMT is being used, then pick dest with 3679 * largest ssthresh for any retransmission. 3680 */ 3681 tp1->no_fr_allowed = 1; 3682 alt = tp1->whoTo; 3683 /* sa_ignore NO_NULL_CHK */ 3684 if (asoc->sctp_cmt_pf > 0) { 3685 /* 3686 * JRS 5/18/07 - If CMT PF is on, 3687 * use the PF version of 3688 * find_alt_net() 3689 */ 3690 alt = sctp_find_alternate_net(stcb, alt, 2); 3691 } else { 3692 /* 3693 * JRS 5/18/07 - If only CMT is on, 3694 * use the CMT version of 3695 * find_alt_net() 3696 */ 3697 /* sa_ignore NO_NULL_CHK */ 3698 alt = sctp_find_alternate_net(stcb, alt, 1); 3699 } 3700 if (alt == NULL) { 3701 alt = tp1->whoTo; 3702 } 3703 /* 3704 * CUCv2: If a different dest is picked for 3705 * the retransmission, then new 3706 * (rtx-)pseudo_cumack needs to be tracked 3707 * for orig dest. Let CUCv2 track new (rtx-) 3708 * pseudo-cumack always. 3709 */ 3710 if (tp1->whoTo) { 3711 tp1->whoTo->find_pseudo_cumack = 1; 3712 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3713 } 3714 3715 } else { /* CMT is OFF */ 3716 3717 #ifdef SCTP_FR_TO_ALTERNATE 3718 /* Can we find an alternate? */ 3719 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3720 #else 3721 /* 3722 * default behavior is to NOT retransmit 3723 * FR's to an alternate. Armando Caro's 3724 * paper details why. 3725 */ 3726 alt = tp1->whoTo; 3727 #endif 3728 } 3729 3730 tp1->rec.data.doing_fast_retransmit = 1; 3731 tot_retrans++; 3732 /* mark the sending seq for possible subsequent FR's */ 3733 /* 3734 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3735 * (uint32_t)tpi->rec.data.tsn); 3736 */ 3737 if (TAILQ_EMPTY(&asoc->send_queue)) { 3738 /* 3739 * If the queue of send is empty then its 3740 * the next sequence number that will be 3741 * assigned so we subtract one from this to 3742 * get the one we last sent. 3743 */ 3744 tp1->rec.data.fast_retran_tsn = sending_seq; 3745 } else { 3746 /* 3747 * If there are chunks on the send queue 3748 * (unsent data that has made it from the 3749 * stream queues but not out the door, we 3750 * take the first one (which will have the 3751 * lowest TSN) and subtract one to get the 3752 * one we last sent. 3753 */ 3754 struct sctp_tmit_chunk *ttt; 3755 3756 ttt = TAILQ_FIRST(&asoc->send_queue); 3757 tp1->rec.data.fast_retran_tsn = 3758 ttt->rec.data.tsn; 3759 } 3760 3761 if (tp1->do_rtt) { 3762 /* 3763 * this guy had a RTO calculation pending on 3764 * it, cancel it 3765 */ 3766 if ((tp1->whoTo != NULL) && 3767 (tp1->whoTo->rto_needed == 0)) { 3768 tp1->whoTo->rto_needed = 1; 3769 } 3770 tp1->do_rtt = 0; 3771 } 3772 if (alt != tp1->whoTo) { 3773 /* yes, there is an alternate. */ 3774 sctp_free_remote_addr(tp1->whoTo); 3775 /* sa_ignore FREED_MEMORY */ 3776 tp1->whoTo = alt; 3777 atomic_add_int(&alt->ref_count, 1); 3778 } 3779 } 3780 } 3781 } 3782 3783 struct sctp_tmit_chunk * 3784 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3785 struct sctp_association *asoc) 3786 { 3787 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3788 struct timeval now; 3789 int now_filled = 0; 3790 3791 if (asoc->prsctp_supported == 0) { 3792 return (NULL); 3793 } 3794 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3795 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3796 tp1->sent != SCTP_DATAGRAM_RESEND && 3797 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3798 /* no chance to advance, out of here */ 3799 break; 3800 } 3801 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3802 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3803 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3804 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3805 asoc->advanced_peer_ack_point, 3806 tp1->rec.data.tsn, 0, 0); 3807 } 3808 } 3809 if (!PR_SCTP_ENABLED(tp1->flags)) { 3810 /* 3811 * We can't fwd-tsn past any that are reliable aka 3812 * retransmitted until the asoc fails. 3813 */ 3814 break; 3815 } 3816 if (!now_filled) { 3817 (void)SCTP_GETTIME_TIMEVAL(&now); 3818 now_filled = 1; 3819 } 3820 /* 3821 * now we got a chunk which is marked for another 3822 * retransmission to a PR-stream but has run out its chances 3823 * already maybe OR has been marked to skip now. Can we skip 3824 * it if its a resend? 3825 */ 3826 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3827 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3828 /* 3829 * Now is this one marked for resend and its time is 3830 * now up? 3831 */ 3832 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3833 /* Yes so drop it */ 3834 if (tp1->data) { 3835 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3836 1, SCTP_SO_NOT_LOCKED); 3837 } 3838 } else { 3839 /* 3840 * No, we are done when hit one for resend 3841 * whos time as not expired. 3842 */ 3843 break; 3844 } 3845 } 3846 /* 3847 * Ok now if this chunk is marked to drop it we can clean up 3848 * the chunk, advance our peer ack point and we can check 3849 * the next chunk. 3850 */ 3851 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3852 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3853 /* advance PeerAckPoint goes forward */ 3854 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3855 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3856 a_adv = tp1; 3857 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3858 /* No update but we do save the chk */ 3859 a_adv = tp1; 3860 } 3861 } else { 3862 /* 3863 * If it is still in RESEND we can advance no 3864 * further 3865 */ 3866 break; 3867 } 3868 } 3869 return (a_adv); 3870 } 3871 3872 static int 3873 sctp_fs_audit(struct sctp_association *asoc) 3874 { 3875 struct sctp_tmit_chunk *chk; 3876 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3877 int ret; 3878 #ifndef INVARIANTS 3879 int entry_flight, entry_cnt; 3880 #endif 3881 3882 ret = 0; 3883 #ifndef INVARIANTS 3884 entry_flight = asoc->total_flight; 3885 entry_cnt = asoc->total_flight_count; 3886 #endif 3887 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3888 return (0); 3889 3890 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3891 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3892 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3893 chk->rec.data.tsn, 3894 chk->send_size, 3895 chk->snd_count); 3896 inflight++; 3897 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3898 resend++; 3899 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3900 inbetween++; 3901 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3902 above++; 3903 } else { 3904 acked++; 3905 } 3906 } 3907 3908 if ((inflight > 0) || (inbetween > 0)) { 3909 #ifdef INVARIANTS 3910 panic("Flight size-express incorrect? \n"); 3911 #else 3912 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3913 entry_flight, entry_cnt); 3914 3915 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3916 inflight, inbetween, resend, above, acked); 3917 ret = 1; 3918 #endif 3919 } 3920 return (ret); 3921 } 3922 3923 3924 static void 3925 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3926 struct sctp_association *asoc, 3927 struct sctp_tmit_chunk *tp1) 3928 { 3929 tp1->window_probe = 0; 3930 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3931 /* TSN's skipped we do NOT move back. */ 3932 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3933 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3934 tp1->book_size, 3935 (uint32_t)(uintptr_t)tp1->whoTo, 3936 tp1->rec.data.tsn); 3937 return; 3938 } 3939 /* First setup this by shrinking flight */ 3940 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3941 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3942 tp1); 3943 } 3944 sctp_flight_size_decrease(tp1); 3945 sctp_total_flight_decrease(stcb, tp1); 3946 /* Now mark for resend */ 3947 tp1->sent = SCTP_DATAGRAM_RESEND; 3948 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3949 3950 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3951 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3952 tp1->whoTo->flight_size, 3953 tp1->book_size, 3954 (uint32_t)(uintptr_t)tp1->whoTo, 3955 tp1->rec.data.tsn); 3956 } 3957 } 3958 3959 void 3960 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3961 uint32_t rwnd, int *abort_now, int ecne_seen) 3962 { 3963 struct sctp_nets *net; 3964 struct sctp_association *asoc; 3965 struct sctp_tmit_chunk *tp1, *tp2; 3966 uint32_t old_rwnd; 3967 int win_probe_recovery = 0; 3968 int win_probe_recovered = 0; 3969 int j, done_once = 0; 3970 int rto_ok = 1; 3971 uint32_t send_s; 3972 3973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3974 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3975 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3976 } 3977 SCTP_TCB_LOCK_ASSERT(stcb); 3978 #ifdef SCTP_ASOCLOG_OF_TSNS 3979 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3980 stcb->asoc.cumack_log_at++; 3981 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3982 stcb->asoc.cumack_log_at = 0; 3983 } 3984 #endif 3985 asoc = &stcb->asoc; 3986 old_rwnd = asoc->peers_rwnd; 3987 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3988 /* old ack */ 3989 return; 3990 } else if (asoc->last_acked_seq == cumack) { 3991 /* Window update sack */ 3992 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3993 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3994 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3995 /* SWS sender side engages */ 3996 asoc->peers_rwnd = 0; 3997 } 3998 if (asoc->peers_rwnd > old_rwnd) { 3999 goto again; 4000 } 4001 return; 4002 } 4003 4004 /* First setup for CC stuff */ 4005 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4006 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 4007 /* Drag along the window_tsn for cwr's */ 4008 net->cwr_window_tsn = cumack; 4009 } 4010 net->prev_cwnd = net->cwnd; 4011 net->net_ack = 0; 4012 net->net_ack2 = 0; 4013 4014 /* 4015 * CMT: Reset CUC and Fast recovery algo variables before 4016 * SACK processing 4017 */ 4018 net->new_pseudo_cumack = 0; 4019 net->will_exit_fast_recovery = 0; 4020 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4021 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4022 } 4023 } 4024 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4025 tp1 = TAILQ_LAST(&asoc->sent_queue, 4026 sctpchunk_listhead); 4027 send_s = tp1->rec.data.tsn + 1; 4028 } else { 4029 send_s = asoc->sending_seq; 4030 } 4031 if (SCTP_TSN_GE(cumack, send_s)) { 4032 struct mbuf *op_err; 4033 char msg[SCTP_DIAG_INFO_LEN]; 4034 4035 *abort_now = 1; 4036 /* XXX */ 4037 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4038 cumack, send_s); 4039 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4040 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 4041 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4042 return; 4043 } 4044 asoc->this_sack_highest_gap = cumack; 4045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4046 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4047 stcb->asoc.overall_error_count, 4048 0, 4049 SCTP_FROM_SCTP_INDATA, 4050 __LINE__); 4051 } 4052 stcb->asoc.overall_error_count = 0; 4053 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4054 /* process the new consecutive TSN first */ 4055 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4056 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4057 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4058 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4059 } 4060 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4061 /* 4062 * If it is less than ACKED, it is 4063 * now no-longer in flight. Higher 4064 * values may occur during marking 4065 */ 4066 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4067 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4068 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4069 tp1->whoTo->flight_size, 4070 tp1->book_size, 4071 (uint32_t)(uintptr_t)tp1->whoTo, 4072 tp1->rec.data.tsn); 4073 } 4074 sctp_flight_size_decrease(tp1); 4075 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4076 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4077 tp1); 4078 } 4079 /* sa_ignore NO_NULL_CHK */ 4080 sctp_total_flight_decrease(stcb, tp1); 4081 } 4082 tp1->whoTo->net_ack += tp1->send_size; 4083 if (tp1->snd_count < 2) { 4084 /* 4085 * True non-retransmitted 4086 * chunk 4087 */ 4088 tp1->whoTo->net_ack2 += 4089 tp1->send_size; 4090 4091 /* update RTO too? */ 4092 if (tp1->do_rtt) { 4093 if (rto_ok && 4094 sctp_calculate_rto(stcb, 4095 &stcb->asoc, 4096 tp1->whoTo, 4097 &tp1->sent_rcv_time, 4098 SCTP_RTT_FROM_DATA)) { 4099 rto_ok = 0; 4100 } 4101 if (tp1->whoTo->rto_needed == 0) { 4102 tp1->whoTo->rto_needed = 1; 4103 } 4104 tp1->do_rtt = 0; 4105 } 4106 } 4107 /* 4108 * CMT: CUCv2 algorithm. From the 4109 * cumack'd TSNs, for each TSN being 4110 * acked for the first time, set the 4111 * following variables for the 4112 * corresp destination. 4113 * new_pseudo_cumack will trigger a 4114 * cwnd update. 4115 * find_(rtx_)pseudo_cumack will 4116 * trigger search for the next 4117 * expected (rtx-)pseudo-cumack. 4118 */ 4119 tp1->whoTo->new_pseudo_cumack = 1; 4120 tp1->whoTo->find_pseudo_cumack = 1; 4121 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4122 4123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4124 /* sa_ignore NO_NULL_CHK */ 4125 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4126 } 4127 } 4128 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4129 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4130 } 4131 if (tp1->rec.data.chunk_was_revoked) { 4132 /* deflate the cwnd */ 4133 tp1->whoTo->cwnd -= tp1->book_size; 4134 tp1->rec.data.chunk_was_revoked = 0; 4135 } 4136 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4137 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4138 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4139 #ifdef INVARIANTS 4140 } else { 4141 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4142 #endif 4143 } 4144 } 4145 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4146 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4147 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4148 asoc->trigger_reset = 1; 4149 } 4150 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4151 if (tp1->data) { 4152 /* sa_ignore NO_NULL_CHK */ 4153 sctp_free_bufspace(stcb, asoc, tp1, 1); 4154 sctp_m_freem(tp1->data); 4155 tp1->data = NULL; 4156 } 4157 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4158 sctp_log_sack(asoc->last_acked_seq, 4159 cumack, 4160 tp1->rec.data.tsn, 4161 0, 4162 0, 4163 SCTP_LOG_FREE_SENT); 4164 } 4165 asoc->sent_queue_cnt--; 4166 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4167 } else { 4168 break; 4169 } 4170 } 4171 4172 } 4173 /* sa_ignore NO_NULL_CHK */ 4174 if (stcb->sctp_socket) { 4175 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4176 struct socket *so; 4177 4178 #endif 4179 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4180 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4181 /* sa_ignore NO_NULL_CHK */ 4182 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4183 } 4184 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4185 so = SCTP_INP_SO(stcb->sctp_ep); 4186 atomic_add_int(&stcb->asoc.refcnt, 1); 4187 SCTP_TCB_UNLOCK(stcb); 4188 SCTP_SOCKET_LOCK(so, 1); 4189 SCTP_TCB_LOCK(stcb); 4190 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4191 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4192 /* assoc was freed while we were unlocked */ 4193 SCTP_SOCKET_UNLOCK(so, 1); 4194 return; 4195 } 4196 #endif 4197 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4198 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4199 SCTP_SOCKET_UNLOCK(so, 1); 4200 #endif 4201 } else { 4202 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4203 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4204 } 4205 } 4206 4207 /* JRS - Use the congestion control given in the CC module */ 4208 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4209 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4210 if (net->net_ack2 > 0) { 4211 /* 4212 * Karn's rule applies to clearing error 4213 * count, this is optional. 4214 */ 4215 net->error_count = 0; 4216 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4217 /* addr came good */ 4218 net->dest_state |= SCTP_ADDR_REACHABLE; 4219 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4220 0, (void *)net, SCTP_SO_NOT_LOCKED); 4221 } 4222 if (net == stcb->asoc.primary_destination) { 4223 if (stcb->asoc.alternate) { 4224 /* 4225 * release the alternate, 4226 * primary is good 4227 */ 4228 sctp_free_remote_addr(stcb->asoc.alternate); 4229 stcb->asoc.alternate = NULL; 4230 } 4231 } 4232 if (net->dest_state & SCTP_ADDR_PF) { 4233 net->dest_state &= ~SCTP_ADDR_PF; 4234 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4235 stcb->sctp_ep, stcb, net, 4236 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4237 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4238 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4239 /* Done with this net */ 4240 net->net_ack = 0; 4241 } 4242 /* restore any doubled timers */ 4243 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4244 if (net->RTO < stcb->asoc.minrto) { 4245 net->RTO = stcb->asoc.minrto; 4246 } 4247 if (net->RTO > stcb->asoc.maxrto) { 4248 net->RTO = stcb->asoc.maxrto; 4249 } 4250 } 4251 } 4252 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4253 } 4254 asoc->last_acked_seq = cumack; 4255 4256 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4257 /* nothing left in-flight */ 4258 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4259 net->flight_size = 0; 4260 net->partial_bytes_acked = 0; 4261 } 4262 asoc->total_flight = 0; 4263 asoc->total_flight_count = 0; 4264 } 4265 4266 /* RWND update */ 4267 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4268 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4269 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4270 /* SWS sender side engages */ 4271 asoc->peers_rwnd = 0; 4272 } 4273 if (asoc->peers_rwnd > old_rwnd) { 4274 win_probe_recovery = 1; 4275 } 4276 /* Now assure a timer where data is queued at */ 4277 again: 4278 j = 0; 4279 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4280 if (win_probe_recovery && (net->window_probe)) { 4281 win_probe_recovered = 1; 4282 /* 4283 * Find first chunk that was used with window probe 4284 * and clear the sent 4285 */ 4286 /* sa_ignore FREED_MEMORY */ 4287 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4288 if (tp1->window_probe) { 4289 /* move back to data send queue */ 4290 sctp_window_probe_recovery(stcb, asoc, tp1); 4291 break; 4292 } 4293 } 4294 } 4295 if (net->flight_size) { 4296 j++; 4297 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4298 if (net->window_probe) { 4299 net->window_probe = 0; 4300 } 4301 } else { 4302 if (net->window_probe) { 4303 /* 4304 * In window probes we must assure a timer 4305 * is still running there 4306 */ 4307 net->window_probe = 0; 4308 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4309 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4310 } 4311 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4312 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4313 stcb, net, 4314 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4315 } 4316 } 4317 } 4318 if ((j == 0) && 4319 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4320 (asoc->sent_queue_retran_cnt == 0) && 4321 (win_probe_recovered == 0) && 4322 (done_once == 0)) { 4323 /* 4324 * huh, this should not happen unless all packets are 4325 * PR-SCTP and marked to skip of course. 4326 */ 4327 if (sctp_fs_audit(asoc)) { 4328 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4329 net->flight_size = 0; 4330 } 4331 asoc->total_flight = 0; 4332 asoc->total_flight_count = 0; 4333 asoc->sent_queue_retran_cnt = 0; 4334 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4335 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4336 sctp_flight_size_increase(tp1); 4337 sctp_total_flight_increase(stcb, tp1); 4338 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4339 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4340 } 4341 } 4342 } 4343 done_once = 1; 4344 goto again; 4345 } 4346 /**********************************/ 4347 /* Now what about shutdown issues */ 4348 /**********************************/ 4349 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4350 /* nothing left on sendqueue.. consider done */ 4351 /* clean up */ 4352 if ((asoc->stream_queue_cnt == 1) && 4353 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4354 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4355 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4356 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4357 } 4358 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4359 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4360 (asoc->stream_queue_cnt == 1) && 4361 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4362 struct mbuf *op_err; 4363 4364 *abort_now = 1; 4365 /* XXX */ 4366 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4367 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4368 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4369 return; 4370 } 4371 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4372 (asoc->stream_queue_cnt == 0)) { 4373 struct sctp_nets *netp; 4374 4375 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4376 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4377 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4378 } 4379 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4380 sctp_stop_timers_for_shutdown(stcb); 4381 if (asoc->alternate) { 4382 netp = asoc->alternate; 4383 } else { 4384 netp = asoc->primary_destination; 4385 } 4386 sctp_send_shutdown(stcb, netp); 4387 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4388 stcb->sctp_ep, stcb, netp); 4389 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4390 stcb->sctp_ep, stcb, netp); 4391 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4392 (asoc->stream_queue_cnt == 0)) { 4393 struct sctp_nets *netp; 4394 4395 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4396 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 4397 sctp_stop_timers_for_shutdown(stcb); 4398 if (asoc->alternate) { 4399 netp = asoc->alternate; 4400 } else { 4401 netp = asoc->primary_destination; 4402 } 4403 sctp_send_shutdown_ack(stcb, netp); 4404 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4405 stcb->sctp_ep, stcb, netp); 4406 } 4407 } 4408 /*********************************************/ 4409 /* Here we perform PR-SCTP procedures */ 4410 /* (section 4.2) */ 4411 /*********************************************/ 4412 /* C1. update advancedPeerAckPoint */ 4413 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4414 asoc->advanced_peer_ack_point = cumack; 4415 } 4416 /* PR-Sctp issues need to be addressed too */ 4417 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4418 struct sctp_tmit_chunk *lchk; 4419 uint32_t old_adv_peer_ack_point; 4420 4421 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4422 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4423 /* C3. See if we need to send a Fwd-TSN */ 4424 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4425 /* 4426 * ISSUE with ECN, see FWD-TSN processing. 4427 */ 4428 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4429 send_forward_tsn(stcb, asoc); 4430 } else if (lchk) { 4431 /* try to FR fwd-tsn's that get lost too */ 4432 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4433 send_forward_tsn(stcb, asoc); 4434 } 4435 } 4436 } 4437 if (lchk) { 4438 /* Assure a timer is up */ 4439 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4440 stcb->sctp_ep, stcb, lchk->whoTo); 4441 } 4442 } 4443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4444 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4445 rwnd, 4446 stcb->asoc.peers_rwnd, 4447 stcb->asoc.total_flight, 4448 stcb->asoc.total_output_queue_size); 4449 } 4450 } 4451 4452 void 4453 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4454 struct sctp_tcb *stcb, 4455 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4456 int *abort_now, uint8_t flags, 4457 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4458 { 4459 struct sctp_association *asoc; 4460 struct sctp_tmit_chunk *tp1, *tp2; 4461 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4462 uint16_t wake_him = 0; 4463 uint32_t send_s = 0; 4464 long j; 4465 int accum_moved = 0; 4466 int will_exit_fast_recovery = 0; 4467 uint32_t a_rwnd, old_rwnd; 4468 int win_probe_recovery = 0; 4469 int win_probe_recovered = 0; 4470 struct sctp_nets *net = NULL; 4471 int done_once; 4472 int rto_ok = 1; 4473 uint8_t reneged_all = 0; 4474 uint8_t cmt_dac_flag; 4475 4476 /* 4477 * we take any chance we can to service our queues since we cannot 4478 * get awoken when the socket is read from :< 4479 */ 4480 /* 4481 * Now perform the actual SACK handling: 1) Verify that it is not an 4482 * old sack, if so discard. 2) If there is nothing left in the send 4483 * queue (cum-ack is equal to last acked) then you have a duplicate 4484 * too, update any rwnd change and verify no timers are running. 4485 * then return. 3) Process any new consequtive data i.e. cum-ack 4486 * moved process these first and note that it moved. 4) Process any 4487 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4488 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4489 * sync up flightsizes and things, stop all timers and also check 4490 * for shutdown_pending state. If so then go ahead and send off the 4491 * shutdown. If in shutdown recv, send off the shutdown-ack and 4492 * start that timer, Ret. 9) Strike any non-acked things and do FR 4493 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4494 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4495 * if in shutdown_recv state. 4496 */ 4497 SCTP_TCB_LOCK_ASSERT(stcb); 4498 /* CMT DAC algo */ 4499 this_sack_lowest_newack = 0; 4500 SCTP_STAT_INCR(sctps_slowpath_sack); 4501 last_tsn = cum_ack; 4502 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4503 #ifdef SCTP_ASOCLOG_OF_TSNS 4504 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4505 stcb->asoc.cumack_log_at++; 4506 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4507 stcb->asoc.cumack_log_at = 0; 4508 } 4509 #endif 4510 a_rwnd = rwnd; 4511 4512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4513 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4514 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4515 } 4516 4517 old_rwnd = stcb->asoc.peers_rwnd; 4518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4519 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4520 stcb->asoc.overall_error_count, 4521 0, 4522 SCTP_FROM_SCTP_INDATA, 4523 __LINE__); 4524 } 4525 stcb->asoc.overall_error_count = 0; 4526 asoc = &stcb->asoc; 4527 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4528 sctp_log_sack(asoc->last_acked_seq, 4529 cum_ack, 4530 0, 4531 num_seg, 4532 num_dup, 4533 SCTP_LOG_NEW_SACK); 4534 } 4535 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4536 uint16_t i; 4537 uint32_t *dupdata, dblock; 4538 4539 for (i = 0; i < num_dup; i++) { 4540 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4541 sizeof(uint32_t), (uint8_t *)&dblock); 4542 if (dupdata == NULL) { 4543 break; 4544 } 4545 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4546 } 4547 } 4548 /* reality check */ 4549 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4550 tp1 = TAILQ_LAST(&asoc->sent_queue, 4551 sctpchunk_listhead); 4552 send_s = tp1->rec.data.tsn + 1; 4553 } else { 4554 tp1 = NULL; 4555 send_s = asoc->sending_seq; 4556 } 4557 if (SCTP_TSN_GE(cum_ack, send_s)) { 4558 struct mbuf *op_err; 4559 char msg[SCTP_DIAG_INFO_LEN]; 4560 4561 /* 4562 * no way, we have not even sent this TSN out yet. Peer is 4563 * hopelessly messed up with us. 4564 */ 4565 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4566 cum_ack, send_s); 4567 if (tp1) { 4568 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4569 tp1->rec.data.tsn, (void *)tp1); 4570 } 4571 hopeless_peer: 4572 *abort_now = 1; 4573 /* XXX */ 4574 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4575 cum_ack, send_s); 4576 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4577 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4578 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4579 return; 4580 } 4581 /**********************/ 4582 /* 1) check the range */ 4583 /**********************/ 4584 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4585 /* acking something behind */ 4586 return; 4587 } 4588 4589 /* update the Rwnd of the peer */ 4590 if (TAILQ_EMPTY(&asoc->sent_queue) && 4591 TAILQ_EMPTY(&asoc->send_queue) && 4592 (asoc->stream_queue_cnt == 0)) { 4593 /* nothing left on send/sent and strmq */ 4594 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4595 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4596 asoc->peers_rwnd, 0, 0, a_rwnd); 4597 } 4598 asoc->peers_rwnd = a_rwnd; 4599 if (asoc->sent_queue_retran_cnt) { 4600 asoc->sent_queue_retran_cnt = 0; 4601 } 4602 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4603 /* SWS sender side engages */ 4604 asoc->peers_rwnd = 0; 4605 } 4606 /* stop any timers */ 4607 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4608 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4609 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4610 net->partial_bytes_acked = 0; 4611 net->flight_size = 0; 4612 } 4613 asoc->total_flight = 0; 4614 asoc->total_flight_count = 0; 4615 return; 4616 } 4617 /* 4618 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4619 * things. The total byte count acked is tracked in netAckSz AND 4620 * netAck2 is used to track the total bytes acked that are un- 4621 * amibguious and were never retransmitted. We track these on a per 4622 * destination address basis. 4623 */ 4624 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4625 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4626 /* Drag along the window_tsn for cwr's */ 4627 net->cwr_window_tsn = cum_ack; 4628 } 4629 net->prev_cwnd = net->cwnd; 4630 net->net_ack = 0; 4631 net->net_ack2 = 0; 4632 4633 /* 4634 * CMT: Reset CUC and Fast recovery algo variables before 4635 * SACK processing 4636 */ 4637 net->new_pseudo_cumack = 0; 4638 net->will_exit_fast_recovery = 0; 4639 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4640 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4641 } 4642 4643 /* 4644 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4645 * to be greater than the cumack. Also reset saw_newack to 0 4646 * for all dests. 4647 */ 4648 net->saw_newack = 0; 4649 net->this_sack_highest_newack = last_tsn; 4650 } 4651 /* process the new consecutive TSN first */ 4652 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4653 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4654 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4655 accum_moved = 1; 4656 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4657 /* 4658 * If it is less than ACKED, it is 4659 * now no-longer in flight. Higher 4660 * values may occur during marking 4661 */ 4662 if ((tp1->whoTo->dest_state & 4663 SCTP_ADDR_UNCONFIRMED) && 4664 (tp1->snd_count < 2)) { 4665 /* 4666 * If there was no retran 4667 * and the address is 4668 * un-confirmed and we sent 4669 * there and are now 4670 * sacked.. its confirmed, 4671 * mark it so. 4672 */ 4673 tp1->whoTo->dest_state &= 4674 ~SCTP_ADDR_UNCONFIRMED; 4675 } 4676 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4677 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4678 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4679 tp1->whoTo->flight_size, 4680 tp1->book_size, 4681 (uint32_t)(uintptr_t)tp1->whoTo, 4682 tp1->rec.data.tsn); 4683 } 4684 sctp_flight_size_decrease(tp1); 4685 sctp_total_flight_decrease(stcb, tp1); 4686 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4687 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4688 tp1); 4689 } 4690 } 4691 tp1->whoTo->net_ack += tp1->send_size; 4692 4693 /* CMT SFR and DAC algos */ 4694 this_sack_lowest_newack = tp1->rec.data.tsn; 4695 tp1->whoTo->saw_newack = 1; 4696 4697 if (tp1->snd_count < 2) { 4698 /* 4699 * True non-retransmitted 4700 * chunk 4701 */ 4702 tp1->whoTo->net_ack2 += 4703 tp1->send_size; 4704 4705 /* update RTO too? */ 4706 if (tp1->do_rtt) { 4707 if (rto_ok && 4708 sctp_calculate_rto(stcb, 4709 &stcb->asoc, 4710 tp1->whoTo, 4711 &tp1->sent_rcv_time, 4712 SCTP_RTT_FROM_DATA)) { 4713 rto_ok = 0; 4714 } 4715 if (tp1->whoTo->rto_needed == 0) { 4716 tp1->whoTo->rto_needed = 1; 4717 } 4718 tp1->do_rtt = 0; 4719 } 4720 } 4721 /* 4722 * CMT: CUCv2 algorithm. From the 4723 * cumack'd TSNs, for each TSN being 4724 * acked for the first time, set the 4725 * following variables for the 4726 * corresp destination. 4727 * new_pseudo_cumack will trigger a 4728 * cwnd update. 4729 * find_(rtx_)pseudo_cumack will 4730 * trigger search for the next 4731 * expected (rtx-)pseudo-cumack. 4732 */ 4733 tp1->whoTo->new_pseudo_cumack = 1; 4734 tp1->whoTo->find_pseudo_cumack = 1; 4735 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4736 4737 4738 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4739 sctp_log_sack(asoc->last_acked_seq, 4740 cum_ack, 4741 tp1->rec.data.tsn, 4742 0, 4743 0, 4744 SCTP_LOG_TSN_ACKED); 4745 } 4746 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4747 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4748 } 4749 } 4750 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4751 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4752 #ifdef SCTP_AUDITING_ENABLED 4753 sctp_audit_log(0xB3, 4754 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4755 #endif 4756 } 4757 if (tp1->rec.data.chunk_was_revoked) { 4758 /* deflate the cwnd */ 4759 tp1->whoTo->cwnd -= tp1->book_size; 4760 tp1->rec.data.chunk_was_revoked = 0; 4761 } 4762 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4763 tp1->sent = SCTP_DATAGRAM_ACKED; 4764 } 4765 } 4766 } else { 4767 break; 4768 } 4769 } 4770 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4771 /* always set this up to cum-ack */ 4772 asoc->this_sack_highest_gap = last_tsn; 4773 4774 if ((num_seg > 0) || (num_nr_seg > 0)) { 4775 4776 /* 4777 * thisSackHighestGap will increase while handling NEW 4778 * segments this_sack_highest_newack will increase while 4779 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4780 * used for CMT DAC algo. saw_newack will also change. 4781 */ 4782 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4783 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4784 num_seg, num_nr_seg, &rto_ok)) { 4785 wake_him++; 4786 } 4787 /* 4788 * validate the biggest_tsn_acked in the gap acks if strict 4789 * adherence is wanted. 4790 */ 4791 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4792 /* 4793 * peer is either confused or we are under attack. 4794 * We must abort. 4795 */ 4796 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4797 biggest_tsn_acked, send_s); 4798 goto hopeless_peer; 4799 } 4800 } 4801 /*******************************************/ 4802 /* cancel ALL T3-send timer if accum moved */ 4803 /*******************************************/ 4804 if (asoc->sctp_cmt_on_off > 0) { 4805 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4806 if (net->new_pseudo_cumack) 4807 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4808 stcb, net, 4809 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4810 4811 } 4812 } else { 4813 if (accum_moved) { 4814 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4815 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4816 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4817 } 4818 } 4819 } 4820 /********************************************/ 4821 /* drop the acked chunks from the sentqueue */ 4822 /********************************************/ 4823 asoc->last_acked_seq = cum_ack; 4824 4825 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4826 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4827 break; 4828 } 4829 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4830 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4831 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4832 #ifdef INVARIANTS 4833 } else { 4834 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4835 #endif 4836 } 4837 } 4838 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4839 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4840 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4841 asoc->trigger_reset = 1; 4842 } 4843 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4844 if (PR_SCTP_ENABLED(tp1->flags)) { 4845 if (asoc->pr_sctp_cnt != 0) 4846 asoc->pr_sctp_cnt--; 4847 } 4848 asoc->sent_queue_cnt--; 4849 if (tp1->data) { 4850 /* sa_ignore NO_NULL_CHK */ 4851 sctp_free_bufspace(stcb, asoc, tp1, 1); 4852 sctp_m_freem(tp1->data); 4853 tp1->data = NULL; 4854 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4855 asoc->sent_queue_cnt_removeable--; 4856 } 4857 } 4858 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4859 sctp_log_sack(asoc->last_acked_seq, 4860 cum_ack, 4861 tp1->rec.data.tsn, 4862 0, 4863 0, 4864 SCTP_LOG_FREE_SENT); 4865 } 4866 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4867 wake_him++; 4868 } 4869 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4870 #ifdef INVARIANTS 4871 panic("Warning flight size is positive and should be 0"); 4872 #else 4873 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4874 asoc->total_flight); 4875 #endif 4876 asoc->total_flight = 0; 4877 } 4878 4879 /* sa_ignore NO_NULL_CHK */ 4880 if ((wake_him) && (stcb->sctp_socket)) { 4881 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4882 struct socket *so; 4883 4884 #endif 4885 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4887 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4888 } 4889 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4890 so = SCTP_INP_SO(stcb->sctp_ep); 4891 atomic_add_int(&stcb->asoc.refcnt, 1); 4892 SCTP_TCB_UNLOCK(stcb); 4893 SCTP_SOCKET_LOCK(so, 1); 4894 SCTP_TCB_LOCK(stcb); 4895 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4896 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4897 /* assoc was freed while we were unlocked */ 4898 SCTP_SOCKET_UNLOCK(so, 1); 4899 return; 4900 } 4901 #endif 4902 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4903 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4904 SCTP_SOCKET_UNLOCK(so, 1); 4905 #endif 4906 } else { 4907 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4908 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4909 } 4910 } 4911 4912 if (asoc->fast_retran_loss_recovery && accum_moved) { 4913 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4914 /* Setup so we will exit RFC2582 fast recovery */ 4915 will_exit_fast_recovery = 1; 4916 } 4917 } 4918 /* 4919 * Check for revoked fragments: 4920 * 4921 * if Previous sack - Had no frags then we can't have any revoked if 4922 * Previous sack - Had frag's then - If we now have frags aka 4923 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4924 * some of them. else - The peer revoked all ACKED fragments, since 4925 * we had some before and now we have NONE. 4926 */ 4927 4928 if (num_seg) { 4929 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4930 asoc->saw_sack_with_frags = 1; 4931 } else if (asoc->saw_sack_with_frags) { 4932 int cnt_revoked = 0; 4933 4934 /* Peer revoked all dg's marked or acked */ 4935 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4936 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4937 tp1->sent = SCTP_DATAGRAM_SENT; 4938 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4939 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4940 tp1->whoTo->flight_size, 4941 tp1->book_size, 4942 (uint32_t)(uintptr_t)tp1->whoTo, 4943 tp1->rec.data.tsn); 4944 } 4945 sctp_flight_size_increase(tp1); 4946 sctp_total_flight_increase(stcb, tp1); 4947 tp1->rec.data.chunk_was_revoked = 1; 4948 /* 4949 * To ensure that this increase in 4950 * flightsize, which is artificial, does not 4951 * throttle the sender, we also increase the 4952 * cwnd artificially. 4953 */ 4954 tp1->whoTo->cwnd += tp1->book_size; 4955 cnt_revoked++; 4956 } 4957 } 4958 if (cnt_revoked) { 4959 reneged_all = 1; 4960 } 4961 asoc->saw_sack_with_frags = 0; 4962 } 4963 if (num_nr_seg > 0) 4964 asoc->saw_sack_with_nr_frags = 1; 4965 else 4966 asoc->saw_sack_with_nr_frags = 0; 4967 4968 /* JRS - Use the congestion control given in the CC module */ 4969 if (ecne_seen == 0) { 4970 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4971 if (net->net_ack2 > 0) { 4972 /* 4973 * Karn's rule applies to clearing error 4974 * count, this is optional. 4975 */ 4976 net->error_count = 0; 4977 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4978 /* addr came good */ 4979 net->dest_state |= SCTP_ADDR_REACHABLE; 4980 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4981 0, (void *)net, SCTP_SO_NOT_LOCKED); 4982 } 4983 4984 if (net == stcb->asoc.primary_destination) { 4985 if (stcb->asoc.alternate) { 4986 /* 4987 * release the alternate, 4988 * primary is good 4989 */ 4990 sctp_free_remote_addr(stcb->asoc.alternate); 4991 stcb->asoc.alternate = NULL; 4992 } 4993 } 4994 4995 if (net->dest_state & SCTP_ADDR_PF) { 4996 net->dest_state &= ~SCTP_ADDR_PF; 4997 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4998 stcb->sctp_ep, stcb, net, 4999 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 5000 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 5001 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 5002 /* Done with this net */ 5003 net->net_ack = 0; 5004 } 5005 /* restore any doubled timers */ 5006 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 5007 if (net->RTO < stcb->asoc.minrto) { 5008 net->RTO = stcb->asoc.minrto; 5009 } 5010 if (net->RTO > stcb->asoc.maxrto) { 5011 net->RTO = stcb->asoc.maxrto; 5012 } 5013 } 5014 } 5015 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 5016 } 5017 5018 if (TAILQ_EMPTY(&asoc->sent_queue)) { 5019 /* nothing left in-flight */ 5020 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5021 /* stop all timers */ 5022 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5023 stcb, net, 5024 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 5025 net->flight_size = 0; 5026 net->partial_bytes_acked = 0; 5027 } 5028 asoc->total_flight = 0; 5029 asoc->total_flight_count = 0; 5030 } 5031 5032 /**********************************/ 5033 /* Now what about shutdown issues */ 5034 /**********************************/ 5035 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 5036 /* nothing left on sendqueue.. consider done */ 5037 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5038 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5039 asoc->peers_rwnd, 0, 0, a_rwnd); 5040 } 5041 asoc->peers_rwnd = a_rwnd; 5042 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5043 /* SWS sender side engages */ 5044 asoc->peers_rwnd = 0; 5045 } 5046 /* clean up */ 5047 if ((asoc->stream_queue_cnt == 1) && 5048 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5049 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 5050 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 5051 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 5052 } 5053 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5054 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 5055 (asoc->stream_queue_cnt == 1) && 5056 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 5057 struct mbuf *op_err; 5058 5059 *abort_now = 1; 5060 /* XXX */ 5061 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 5062 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 5063 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5064 return; 5065 } 5066 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5067 (asoc->stream_queue_cnt == 0)) { 5068 struct sctp_nets *netp; 5069 5070 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 5071 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5072 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5073 } 5074 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 5075 sctp_stop_timers_for_shutdown(stcb); 5076 if (asoc->alternate) { 5077 netp = asoc->alternate; 5078 } else { 5079 netp = asoc->primary_destination; 5080 } 5081 sctp_send_shutdown(stcb, netp); 5082 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5083 stcb->sctp_ep, stcb, netp); 5084 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5085 stcb->sctp_ep, stcb, netp); 5086 return; 5087 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5088 (asoc->stream_queue_cnt == 0)) { 5089 struct sctp_nets *netp; 5090 5091 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5092 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT); 5093 sctp_stop_timers_for_shutdown(stcb); 5094 if (asoc->alternate) { 5095 netp = asoc->alternate; 5096 } else { 5097 netp = asoc->primary_destination; 5098 } 5099 sctp_send_shutdown_ack(stcb, netp); 5100 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5101 stcb->sctp_ep, stcb, netp); 5102 return; 5103 } 5104 } 5105 /* 5106 * Now here we are going to recycle net_ack for a different use... 5107 * HEADS UP. 5108 */ 5109 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5110 net->net_ack = 0; 5111 } 5112 5113 /* 5114 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5115 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5116 * automatically ensure that. 5117 */ 5118 if ((asoc->sctp_cmt_on_off > 0) && 5119 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5120 (cmt_dac_flag == 0)) { 5121 this_sack_lowest_newack = cum_ack; 5122 } 5123 if ((num_seg > 0) || (num_nr_seg > 0)) { 5124 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5125 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5126 } 5127 /* JRS - Use the congestion control given in the CC module */ 5128 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5129 5130 /* Now are we exiting loss recovery ? */ 5131 if (will_exit_fast_recovery) { 5132 /* Ok, we must exit fast recovery */ 5133 asoc->fast_retran_loss_recovery = 0; 5134 } 5135 if ((asoc->sat_t3_loss_recovery) && 5136 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5137 /* end satellite t3 loss recovery */ 5138 asoc->sat_t3_loss_recovery = 0; 5139 } 5140 /* 5141 * CMT Fast recovery 5142 */ 5143 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5144 if (net->will_exit_fast_recovery) { 5145 /* Ok, we must exit fast recovery */ 5146 net->fast_retran_loss_recovery = 0; 5147 } 5148 } 5149 5150 /* Adjust and set the new rwnd value */ 5151 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5152 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5153 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5154 } 5155 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5156 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5157 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5158 /* SWS sender side engages */ 5159 asoc->peers_rwnd = 0; 5160 } 5161 if (asoc->peers_rwnd > old_rwnd) { 5162 win_probe_recovery = 1; 5163 } 5164 5165 /* 5166 * Now we must setup so we have a timer up for anyone with 5167 * outstanding data. 5168 */ 5169 done_once = 0; 5170 again: 5171 j = 0; 5172 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5173 if (win_probe_recovery && (net->window_probe)) { 5174 win_probe_recovered = 1; 5175 /*- 5176 * Find first chunk that was used with 5177 * window probe and clear the event. Put 5178 * it back into the send queue as if has 5179 * not been sent. 5180 */ 5181 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5182 if (tp1->window_probe) { 5183 sctp_window_probe_recovery(stcb, asoc, tp1); 5184 break; 5185 } 5186 } 5187 } 5188 if (net->flight_size) { 5189 j++; 5190 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5191 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5192 stcb->sctp_ep, stcb, net); 5193 } 5194 if (net->window_probe) { 5195 net->window_probe = 0; 5196 } 5197 } else { 5198 if (net->window_probe) { 5199 /* 5200 * In window probes we must assure a timer 5201 * is still running there 5202 */ 5203 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5204 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5205 stcb->sctp_ep, stcb, net); 5206 5207 } 5208 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5209 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5210 stcb, net, 5211 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5212 } 5213 } 5214 } 5215 if ((j == 0) && 5216 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5217 (asoc->sent_queue_retran_cnt == 0) && 5218 (win_probe_recovered == 0) && 5219 (done_once == 0)) { 5220 /* 5221 * huh, this should not happen unless all packets are 5222 * PR-SCTP and marked to skip of course. 5223 */ 5224 if (sctp_fs_audit(asoc)) { 5225 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5226 net->flight_size = 0; 5227 } 5228 asoc->total_flight = 0; 5229 asoc->total_flight_count = 0; 5230 asoc->sent_queue_retran_cnt = 0; 5231 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5232 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5233 sctp_flight_size_increase(tp1); 5234 sctp_total_flight_increase(stcb, tp1); 5235 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5236 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5237 } 5238 } 5239 } 5240 done_once = 1; 5241 goto again; 5242 } 5243 /*********************************************/ 5244 /* Here we perform PR-SCTP procedures */ 5245 /* (section 4.2) */ 5246 /*********************************************/ 5247 /* C1. update advancedPeerAckPoint */ 5248 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5249 asoc->advanced_peer_ack_point = cum_ack; 5250 } 5251 /* C2. try to further move advancedPeerAckPoint ahead */ 5252 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5253 struct sctp_tmit_chunk *lchk; 5254 uint32_t old_adv_peer_ack_point; 5255 5256 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5257 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5258 /* C3. See if we need to send a Fwd-TSN */ 5259 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5260 /* 5261 * ISSUE with ECN, see FWD-TSN processing. 5262 */ 5263 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5264 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5265 0xee, cum_ack, asoc->advanced_peer_ack_point, 5266 old_adv_peer_ack_point); 5267 } 5268 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5269 send_forward_tsn(stcb, asoc); 5270 } else if (lchk) { 5271 /* try to FR fwd-tsn's that get lost too */ 5272 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5273 send_forward_tsn(stcb, asoc); 5274 } 5275 } 5276 } 5277 if (lchk) { 5278 /* Assure a timer is up */ 5279 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5280 stcb->sctp_ep, stcb, lchk->whoTo); 5281 } 5282 } 5283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5284 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5285 a_rwnd, 5286 stcb->asoc.peers_rwnd, 5287 stcb->asoc.total_flight, 5288 stcb->asoc.total_output_queue_size); 5289 } 5290 } 5291 5292 void 5293 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5294 { 5295 /* Copy cum-ack */ 5296 uint32_t cum_ack, a_rwnd; 5297 5298 cum_ack = ntohl(cp->cumulative_tsn_ack); 5299 /* Arrange so a_rwnd does NOT change */ 5300 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5301 5302 /* Now call the express sack handling */ 5303 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5304 } 5305 5306 static void 5307 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5308 struct sctp_stream_in *strmin) 5309 { 5310 struct sctp_queued_to_read *control, *ncontrol; 5311 struct sctp_association *asoc; 5312 uint32_t mid; 5313 int need_reasm_check = 0; 5314 5315 asoc = &stcb->asoc; 5316 mid = strmin->last_mid_delivered; 5317 /* 5318 * First deliver anything prior to and including the stream no that 5319 * came in. 5320 */ 5321 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5322 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5323 /* this is deliverable now */ 5324 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5325 if (control->on_strm_q) { 5326 if (control->on_strm_q == SCTP_ON_ORDERED) { 5327 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5328 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5329 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5330 #ifdef INVARIANTS 5331 } else { 5332 panic("strmin: %p ctl: %p unknown %d", 5333 strmin, control, control->on_strm_q); 5334 #endif 5335 } 5336 control->on_strm_q = 0; 5337 } 5338 /* subtract pending on streams */ 5339 if (asoc->size_on_all_streams >= control->length) { 5340 asoc->size_on_all_streams -= control->length; 5341 } else { 5342 #ifdef INVARIANTS 5343 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5344 #else 5345 asoc->size_on_all_streams = 0; 5346 #endif 5347 } 5348 sctp_ucount_decr(asoc->cnt_on_all_streams); 5349 /* deliver it to at least the delivery-q */ 5350 if (stcb->sctp_socket) { 5351 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5352 sctp_add_to_readq(stcb->sctp_ep, stcb, 5353 control, 5354 &stcb->sctp_socket->so_rcv, 5355 1, SCTP_READ_LOCK_HELD, 5356 SCTP_SO_NOT_LOCKED); 5357 } 5358 } else { 5359 /* Its a fragmented message */ 5360 if (control->first_frag_seen) { 5361 /* 5362 * Make it so this is next to 5363 * deliver, we restore later 5364 */ 5365 strmin->last_mid_delivered = control->mid - 1; 5366 need_reasm_check = 1; 5367 break; 5368 } 5369 } 5370 } else { 5371 /* no more delivery now. */ 5372 break; 5373 } 5374 } 5375 if (need_reasm_check) { 5376 int ret; 5377 5378 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5379 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5380 /* Restore the next to deliver unless we are ahead */ 5381 strmin->last_mid_delivered = mid; 5382 } 5383 if (ret == 0) { 5384 /* Left the front Partial one on */ 5385 return; 5386 } 5387 need_reasm_check = 0; 5388 } 5389 /* 5390 * now we must deliver things in queue the normal way if any are 5391 * now ready. 5392 */ 5393 mid = strmin->last_mid_delivered + 1; 5394 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5395 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5396 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5397 /* this is deliverable now */ 5398 if (control->on_strm_q) { 5399 if (control->on_strm_q == SCTP_ON_ORDERED) { 5400 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5401 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5402 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5403 #ifdef INVARIANTS 5404 } else { 5405 panic("strmin: %p ctl: %p unknown %d", 5406 strmin, control, control->on_strm_q); 5407 #endif 5408 } 5409 control->on_strm_q = 0; 5410 } 5411 /* subtract pending on streams */ 5412 if (asoc->size_on_all_streams >= control->length) { 5413 asoc->size_on_all_streams -= control->length; 5414 } else { 5415 #ifdef INVARIANTS 5416 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5417 #else 5418 asoc->size_on_all_streams = 0; 5419 #endif 5420 } 5421 sctp_ucount_decr(asoc->cnt_on_all_streams); 5422 /* deliver it to at least the delivery-q */ 5423 strmin->last_mid_delivered = control->mid; 5424 if (stcb->sctp_socket) { 5425 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5426 sctp_add_to_readq(stcb->sctp_ep, stcb, 5427 control, 5428 &stcb->sctp_socket->so_rcv, 1, 5429 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5430 5431 } 5432 mid = strmin->last_mid_delivered + 1; 5433 } else { 5434 /* Its a fragmented message */ 5435 if (control->first_frag_seen) { 5436 /* 5437 * Make it so this is next to 5438 * deliver 5439 */ 5440 strmin->last_mid_delivered = control->mid - 1; 5441 need_reasm_check = 1; 5442 break; 5443 } 5444 } 5445 } else { 5446 break; 5447 } 5448 } 5449 if (need_reasm_check) { 5450 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5451 } 5452 } 5453 5454 5455 5456 static void 5457 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5458 struct sctp_association *asoc, 5459 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn) 5460 { 5461 struct sctp_queued_to_read *control; 5462 struct sctp_stream_in *strm; 5463 struct sctp_tmit_chunk *chk, *nchk; 5464 int cnt_removed = 0; 5465 5466 /* 5467 * For now large messages held on the stream reasm that are complete 5468 * will be tossed too. We could in theory do more work to spin 5469 * through and stop after dumping one msg aka seeing the start of a 5470 * new msg at the head, and call the delivery function... to see if 5471 * it can be delivered... But for now we just dump everything on the 5472 * queue. 5473 */ 5474 strm = &asoc->strmin[stream]; 5475 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported); 5476 if (control == NULL) { 5477 /* Not found */ 5478 return; 5479 } 5480 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5481 return; 5482 } 5483 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5484 /* Purge hanging chunks */ 5485 if (!asoc->idata_supported && (ordered == 0)) { 5486 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5487 break; 5488 } 5489 } 5490 cnt_removed++; 5491 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5492 if (asoc->size_on_reasm_queue >= chk->send_size) { 5493 asoc->size_on_reasm_queue -= chk->send_size; 5494 } else { 5495 #ifdef INVARIANTS 5496 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5497 #else 5498 asoc->size_on_reasm_queue = 0; 5499 #endif 5500 } 5501 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5502 if (chk->data) { 5503 sctp_m_freem(chk->data); 5504 chk->data = NULL; 5505 } 5506 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5507 } 5508 if (!TAILQ_EMPTY(&control->reasm)) { 5509 /* This has to be old data, unordered */ 5510 if (control->data) { 5511 sctp_m_freem(control->data); 5512 control->data = NULL; 5513 } 5514 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5515 chk = TAILQ_FIRST(&control->reasm); 5516 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5517 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5518 sctp_add_chk_to_control(control, strm, stcb, asoc, 5519 chk, SCTP_READ_LOCK_HELD); 5520 } 5521 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5522 return; 5523 } 5524 if (control->on_strm_q == SCTP_ON_ORDERED) { 5525 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5526 if (asoc->size_on_all_streams >= control->length) { 5527 asoc->size_on_all_streams -= control->length; 5528 } else { 5529 #ifdef INVARIANTS 5530 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5531 #else 5532 asoc->size_on_all_streams = 0; 5533 #endif 5534 } 5535 sctp_ucount_decr(asoc->cnt_on_all_streams); 5536 control->on_strm_q = 0; 5537 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5538 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5539 control->on_strm_q = 0; 5540 #ifdef INVARIANTS 5541 } else if (control->on_strm_q) { 5542 panic("strm: %p ctl: %p unknown %d", 5543 strm, control, control->on_strm_q); 5544 #endif 5545 } 5546 control->on_strm_q = 0; 5547 if (control->on_read_q == 0) { 5548 sctp_free_remote_addr(control->whoFrom); 5549 if (control->data) { 5550 sctp_m_freem(control->data); 5551 control->data = NULL; 5552 } 5553 sctp_free_a_readq(stcb, control); 5554 } 5555 } 5556 5557 void 5558 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5559 struct sctp_forward_tsn_chunk *fwd, 5560 int *abort_flag, struct mbuf *m, int offset) 5561 { 5562 /* The pr-sctp fwd tsn */ 5563 /* 5564 * here we will perform all the data receiver side steps for 5565 * processing FwdTSN, as required in by pr-sctp draft: 5566 * 5567 * Assume we get FwdTSN(x): 5568 * 5569 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5570 * + others we have 3) examine and update re-ordering queue on 5571 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5572 * report where we are. 5573 */ 5574 struct sctp_association *asoc; 5575 uint32_t new_cum_tsn, gap; 5576 unsigned int i, fwd_sz, m_size; 5577 uint32_t str_seq; 5578 struct sctp_stream_in *strm; 5579 struct sctp_queued_to_read *control, *sv; 5580 5581 asoc = &stcb->asoc; 5582 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5583 SCTPDBG(SCTP_DEBUG_INDATA1, 5584 "Bad size too small/big fwd-tsn\n"); 5585 return; 5586 } 5587 m_size = (stcb->asoc.mapping_array_size << 3); 5588 /*************************************************************/ 5589 /* 1. Here we update local cumTSN and shift the bitmap array */ 5590 /*************************************************************/ 5591 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5592 5593 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5594 /* Already got there ... */ 5595 return; 5596 } 5597 /* 5598 * now we know the new TSN is more advanced, let's find the actual 5599 * gap 5600 */ 5601 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5602 asoc->cumulative_tsn = new_cum_tsn; 5603 if (gap >= m_size) { 5604 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5605 struct mbuf *op_err; 5606 char msg[SCTP_DIAG_INFO_LEN]; 5607 5608 /* 5609 * out of range (of single byte chunks in the rwnd I 5610 * give out). This must be an attacker. 5611 */ 5612 *abort_flag = 1; 5613 snprintf(msg, sizeof(msg), 5614 "New cum ack %8.8x too high, highest TSN %8.8x", 5615 new_cum_tsn, asoc->highest_tsn_inside_map); 5616 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5617 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5618 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5619 return; 5620 } 5621 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5622 5623 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5624 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5625 asoc->highest_tsn_inside_map = new_cum_tsn; 5626 5627 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5628 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5629 5630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5631 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5632 } 5633 } else { 5634 SCTP_TCB_LOCK_ASSERT(stcb); 5635 for (i = 0; i <= gap; i++) { 5636 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5637 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5638 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5639 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5640 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5641 } 5642 } 5643 } 5644 } 5645 /*************************************************************/ 5646 /* 2. Clear up re-assembly queue */ 5647 /*************************************************************/ 5648 5649 /* This is now done as part of clearing up the stream/seq */ 5650 if (asoc->idata_supported == 0) { 5651 uint16_t sid; 5652 5653 /* Flush all the un-ordered data based on cum-tsn */ 5654 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5655 for (sid = 0; sid < asoc->streamincnt; sid++) { 5656 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn); 5657 } 5658 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5659 } 5660 /*******************************************************/ 5661 /* 3. Update the PR-stream re-ordering queues and fix */ 5662 /* delivery issues as needed. */ 5663 /*******************************************************/ 5664 fwd_sz -= sizeof(*fwd); 5665 if (m && fwd_sz) { 5666 /* New method. */ 5667 unsigned int num_str; 5668 uint32_t mid, cur_mid; 5669 uint16_t sid; 5670 uint16_t ordered, flags; 5671 struct sctp_strseq *stseq, strseqbuf; 5672 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5673 5674 offset += sizeof(*fwd); 5675 5676 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5677 if (asoc->idata_supported) { 5678 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5679 } else { 5680 num_str = fwd_sz / sizeof(struct sctp_strseq); 5681 } 5682 for (i = 0; i < num_str; i++) { 5683 if (asoc->idata_supported) { 5684 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5685 sizeof(struct sctp_strseq_mid), 5686 (uint8_t *)&strseqbuf_m); 5687 offset += sizeof(struct sctp_strseq_mid); 5688 if (stseq_m == NULL) { 5689 break; 5690 } 5691 sid = ntohs(stseq_m->sid); 5692 mid = ntohl(stseq_m->mid); 5693 flags = ntohs(stseq_m->flags); 5694 if (flags & PR_SCTP_UNORDERED_FLAG) { 5695 ordered = 0; 5696 } else { 5697 ordered = 1; 5698 } 5699 } else { 5700 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5701 sizeof(struct sctp_strseq), 5702 (uint8_t *)&strseqbuf); 5703 offset += sizeof(struct sctp_strseq); 5704 if (stseq == NULL) { 5705 break; 5706 } 5707 sid = ntohs(stseq->sid); 5708 mid = (uint32_t)ntohs(stseq->ssn); 5709 ordered = 1; 5710 } 5711 /* Convert */ 5712 5713 /* now process */ 5714 5715 /* 5716 * Ok we now look for the stream/seq on the read 5717 * queue where its not all delivered. If we find it 5718 * we transmute the read entry into a PDI_ABORTED. 5719 */ 5720 if (sid >= asoc->streamincnt) { 5721 /* screwed up streams, stop! */ 5722 break; 5723 } 5724 if ((asoc->str_of_pdapi == sid) && 5725 (asoc->ssn_of_pdapi == mid)) { 5726 /* 5727 * If this is the one we were partially 5728 * delivering now then we no longer are. 5729 * Note this will change with the reassembly 5730 * re-write. 5731 */ 5732 asoc->fragmented_delivery_inprogress = 0; 5733 } 5734 strm = &asoc->strmin[sid]; 5735 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) { 5736 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn); 5737 } 5738 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5739 if ((control->sinfo_stream == sid) && 5740 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5741 str_seq = (sid << 16) | (0x0000ffff & mid); 5742 control->pdapi_aborted = 1; 5743 sv = stcb->asoc.control_pdapi; 5744 control->end_added = 1; 5745 if (control->on_strm_q == SCTP_ON_ORDERED) { 5746 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5747 if (asoc->size_on_all_streams >= control->length) { 5748 asoc->size_on_all_streams -= control->length; 5749 } else { 5750 #ifdef INVARIANTS 5751 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5752 #else 5753 asoc->size_on_all_streams = 0; 5754 #endif 5755 } 5756 sctp_ucount_decr(asoc->cnt_on_all_streams); 5757 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5758 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5759 #ifdef INVARIANTS 5760 } else if (control->on_strm_q) { 5761 panic("strm: %p ctl: %p unknown %d", 5762 strm, control, control->on_strm_q); 5763 #endif 5764 } 5765 control->on_strm_q = 0; 5766 stcb->asoc.control_pdapi = control; 5767 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5768 stcb, 5769 SCTP_PARTIAL_DELIVERY_ABORTED, 5770 (void *)&str_seq, 5771 SCTP_SO_NOT_LOCKED); 5772 stcb->asoc.control_pdapi = sv; 5773 break; 5774 } else if ((control->sinfo_stream == sid) && 5775 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5776 /* We are past our victim SSN */ 5777 break; 5778 } 5779 } 5780 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5781 /* Update the sequence number */ 5782 strm->last_mid_delivered = mid; 5783 } 5784 /* now kick the stream the new way */ 5785 /* sa_ignore NO_NULL_CHK */ 5786 sctp_kick_prsctp_reorder_queue(stcb, strm); 5787 } 5788 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5789 } 5790 /* 5791 * Now slide thing forward. 5792 */ 5793 sctp_slide_mapping_arrays(stcb); 5794 } 5795