1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <sys/proc.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctp_pcb.h> 42 #include <netinet/sctputil.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_auth.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_asconf.h> 48 #include <netinet/sctp_indata.h> 49 #include <netinet/sctp_bsd_addr.h> 50 #include <netinet/sctp_input.h> 51 #include <netinet/sctp_crc32.h> 52 #include <netinet/sctp_lock_bsd.h> 53 /* 54 * NOTES: On the outbound side of things I need to check the sack timer to 55 * see if I should generate a sack into the chunk queue (if I have data to 56 * send that is and will be sending it .. for bundling. 57 * 58 * The callback in sctp_usrreq.c will get called when the socket is read from. 59 * This will cause sctp_service_queues() to get called on the top entry in 60 * the list. 61 */ 62 static void 63 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 64 struct sctp_stream_in *strm, 65 struct sctp_tcb *stcb, 66 struct sctp_association *asoc, 67 struct sctp_tmit_chunk *chk, int lock_held); 68 69 70 void 71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 72 { 73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 74 } 75 76 /* Calculate what the rwnd would be */ 77 uint32_t 78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 79 { 80 uint32_t calc = 0; 81 82 /* 83 * This is really set wrong with respect to a 1-2-m socket. Since 84 * the sb_cc is the count that everyone as put up. When we re-write 85 * sctp_soreceive then we will fix this so that ONLY this 86 * associations data is taken into account. 87 */ 88 if (stcb->sctp_socket == NULL) { 89 return (calc); 90 } 91 if (stcb->asoc.sb_cc == 0 && 92 asoc->size_on_reasm_queue == 0 && 93 asoc->size_on_all_streams == 0) { 94 /* Full rwnd granted */ 95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 96 return (calc); 97 } 98 /* get actual space */ 99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 100 /* 101 * take out what has NOT been put on socket queue and we yet hold 102 * for putting up. 103 */ 104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue + 105 asoc->cnt_on_reasm_queue * MSIZE)); 106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams + 107 asoc->cnt_on_all_streams * MSIZE)); 108 if (calc == 0) { 109 /* out of space */ 110 return (calc); 111 } 112 /* what is the overhead of all these rwnd's */ 113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 114 /* 115 * If the window gets too small due to ctrl-stuff, reduce it to 1, 116 * even it is 0. SWS engaged 117 */ 118 if (calc < stcb->asoc.my_rwnd_control_len) { 119 calc = 1; 120 } 121 return (calc); 122 } 123 124 125 126 /* 127 * Build out our readq entry based on the incoming packet. 128 */ 129 struct sctp_queued_to_read * 130 sctp_build_readq_entry(struct sctp_tcb *stcb, 131 struct sctp_nets *net, 132 uint32_t tsn, uint32_t ppid, 133 uint32_t context, uint16_t stream_no, 134 uint32_t stream_seq, uint8_t flags, 135 struct mbuf *dm) 136 { 137 struct sctp_queued_to_read *read_queue_e = NULL; 138 139 sctp_alloc_a_readq(stcb, read_queue_e); 140 if (read_queue_e == NULL) { 141 goto failed_build; 142 } 143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 144 read_queue_e->sinfo_stream = stream_no; 145 read_queue_e->sinfo_ssn = stream_seq; 146 read_queue_e->sinfo_flags = (flags << 8); 147 read_queue_e->sinfo_ppid = ppid; 148 read_queue_e->sinfo_context = context; 149 read_queue_e->sinfo_tsn = tsn; 150 read_queue_e->sinfo_cumtsn = tsn; 151 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 153 TAILQ_INIT(&read_queue_e->reasm); 154 read_queue_e->whoFrom = net; 155 atomic_add_int(&net->ref_count, 1); 156 read_queue_e->data = dm; 157 read_queue_e->stcb = stcb; 158 read_queue_e->port_from = stcb->rport; 159 failed_build: 160 return (read_queue_e); 161 } 162 163 struct mbuf * 164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 165 { 166 struct sctp_extrcvinfo *seinfo; 167 struct sctp_sndrcvinfo *outinfo; 168 struct sctp_rcvinfo *rcvinfo; 169 struct sctp_nxtinfo *nxtinfo; 170 struct cmsghdr *cmh; 171 struct mbuf *ret; 172 int len; 173 int use_extended; 174 int provide_nxt; 175 176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 179 /* user does not want any ancillary data */ 180 return (NULL); 181 } 182 len = 0; 183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 185 } 186 seinfo = (struct sctp_extrcvinfo *)sinfo; 187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 189 provide_nxt = 1; 190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 191 } else { 192 provide_nxt = 0; 193 } 194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 196 use_extended = 1; 197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 198 } else { 199 use_extended = 0; 200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 201 } 202 } else { 203 use_extended = 0; 204 } 205 206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 207 if (ret == NULL) { 208 /* No space */ 209 return (ret); 210 } 211 SCTP_BUF_LEN(ret) = 0; 212 213 /* We need a CMSG header followed by the struct */ 214 cmh = mtod(ret, struct cmsghdr *); 215 /* 216 * Make sure that there is no un-initialized padding between the 217 * cmsg header and cmsg data and after the cmsg data. 218 */ 219 memset(cmh, 0, len); 220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 221 cmh->cmsg_level = IPPROTO_SCTP; 222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 223 cmh->cmsg_type = SCTP_RCVINFO; 224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 225 rcvinfo->rcv_sid = sinfo->sinfo_stream; 226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 227 rcvinfo->rcv_flags = sinfo->sinfo_flags; 228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 231 rcvinfo->rcv_context = sinfo->sinfo_context; 232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 235 } 236 if (provide_nxt) { 237 cmh->cmsg_level = IPPROTO_SCTP; 238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 239 cmh->cmsg_type = SCTP_NXTINFO; 240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 242 nxtinfo->nxt_flags = 0; 243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 244 nxtinfo->nxt_flags |= SCTP_UNORDERED; 245 } 246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 248 } 249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 250 nxtinfo->nxt_flags |= SCTP_COMPLETE; 251 } 252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 253 nxtinfo->nxt_length = seinfo->serinfo_next_length; 254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 257 } 258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 259 cmh->cmsg_level = IPPROTO_SCTP; 260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 261 if (use_extended) { 262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 263 cmh->cmsg_type = SCTP_EXTRCV; 264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 266 } else { 267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 268 cmh->cmsg_type = SCTP_SNDRCV; 269 *outinfo = *sinfo; 270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 271 } 272 } 273 return (ret); 274 } 275 276 277 static void 278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 279 { 280 uint32_t gap, i, cumackp1; 281 int fnd = 0; 282 int in_r = 0, in_nr = 0; 283 284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 285 return; 286 } 287 cumackp1 = asoc->cumulative_tsn + 1; 288 if (SCTP_TSN_GT(cumackp1, tsn)) { 289 /* 290 * this tsn is behind the cum ack and thus we don't need to 291 * worry about it being moved from one to the other. 292 */ 293 return; 294 } 295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 298 if ((in_r == 0) && (in_nr == 0)) { 299 #ifdef INVARIANTS 300 panic("Things are really messed up now"); 301 #else 302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 303 sctp_print_mapping_array(asoc); 304 #endif 305 } 306 if (in_nr == 0) 307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 308 if (in_r) 309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 311 asoc->highest_tsn_inside_nr_map = tsn; 312 } 313 if (tsn == asoc->highest_tsn_inside_map) { 314 /* We must back down to see what the new highest is */ 315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 318 asoc->highest_tsn_inside_map = i; 319 fnd = 1; 320 break; 321 } 322 } 323 if (!fnd) { 324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 325 } 326 } 327 } 328 329 static int 330 sctp_place_control_in_stream(struct sctp_stream_in *strm, 331 struct sctp_association *asoc, 332 struct sctp_queued_to_read *control) 333 { 334 struct sctp_queued_to_read *at; 335 struct sctp_readhead *q; 336 uint8_t bits, unordered; 337 338 bits = (control->sinfo_flags >> 8); 339 unordered = bits & SCTP_DATA_UNORDERED; 340 if (unordered) { 341 q = &strm->uno_inqueue; 342 if (asoc->idata_supported == 0) { 343 if (!TAILQ_EMPTY(q)) { 344 /* 345 * Only one stream can be here in old style 346 * -- abort 347 */ 348 return (-1); 349 } 350 TAILQ_INSERT_TAIL(q, control, next_instrm); 351 control->on_strm_q = SCTP_ON_UNORDERED; 352 return (0); 353 } 354 } else { 355 q = &strm->inqueue; 356 } 357 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 358 control->end_added = control->last_frag_seen = control->first_frag_seen = 1; 359 } 360 if (TAILQ_EMPTY(q)) { 361 /* Empty queue */ 362 TAILQ_INSERT_HEAD(q, control, next_instrm); 363 if (unordered) { 364 control->on_strm_q = SCTP_ON_UNORDERED; 365 } else { 366 control->on_strm_q = SCTP_ON_ORDERED; 367 } 368 return (0); 369 } else { 370 TAILQ_FOREACH(at, q, next_instrm) { 371 if (SCTP_TSN_GT(at->msg_id, control->msg_id)) { 372 /* 373 * one in queue is bigger than the new one, 374 * insert before this one 375 */ 376 TAILQ_INSERT_BEFORE(at, control, next_instrm); 377 if (unordered) { 378 control->on_strm_q = SCTP_ON_UNORDERED; 379 } else { 380 control->on_strm_q = SCTP_ON_ORDERED; 381 } 382 break; 383 } else if (at->msg_id == control->msg_id) { 384 /* 385 * Gak, He sent me a duplicate msg id 386 * number?? return -1 to abort. 387 */ 388 return (-1); 389 } else { 390 if (TAILQ_NEXT(at, next_instrm) == NULL) { 391 /* 392 * We are at the end, insert it 393 * after this one 394 */ 395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 396 sctp_log_strm_del(control, at, 397 SCTP_STR_LOG_FROM_INSERT_TL); 398 } 399 TAILQ_INSERT_AFTER(q, 400 at, control, next_instrm); 401 if (unordered) { 402 control->on_strm_q = SCTP_ON_UNORDERED; 403 } else { 404 control->on_strm_q = SCTP_ON_ORDERED; 405 } 406 break; 407 } 408 } 409 } 410 } 411 return (0); 412 } 413 414 static void 415 sctp_abort_in_reasm(struct sctp_tcb *stcb, 416 struct sctp_queued_to_read *control, 417 struct sctp_tmit_chunk *chk, 418 int *abort_flag, int opspot) 419 { 420 char msg[SCTP_DIAG_INFO_LEN]; 421 struct mbuf *oper; 422 423 if (stcb->asoc.idata_supported) { 424 snprintf(msg, sizeof(msg), 425 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 426 opspot, 427 control->fsn_included, 428 chk->rec.data.TSN_seq, 429 chk->rec.data.stream_number, 430 chk->rec.data.fsn_num, chk->rec.data.stream_seq); 431 } else { 432 snprintf(msg, sizeof(msg), 433 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 434 opspot, 435 control->fsn_included, 436 chk->rec.data.TSN_seq, 437 chk->rec.data.stream_number, 438 chk->rec.data.fsn_num, 439 (uint16_t) chk->rec.data.stream_seq); 440 } 441 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 442 sctp_m_freem(chk->data); 443 chk->data = NULL; 444 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 445 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 446 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 447 *abort_flag = 1; 448 } 449 450 static void 451 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 452 { 453 /* 454 * The control could not be placed and must be cleaned. 455 */ 456 struct sctp_tmit_chunk *chk, *nchk; 457 458 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 459 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 460 if (chk->data) 461 sctp_m_freem(chk->data); 462 chk->data = NULL; 463 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 464 } 465 sctp_free_a_readq(stcb, control); 466 } 467 468 /* 469 * Queue the chunk either right into the socket buffer if it is the next one 470 * to go OR put it in the correct place in the delivery queue. If we do 471 * append to the so_buf, keep doing so until we are out of order as 472 * long as the control's entered are non-fragmented. 473 */ 474 static void 475 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 476 struct sctp_stream_in *strm, 477 struct sctp_association *asoc, 478 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 479 { 480 /* 481 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 482 * all the data in one stream this could happen quite rapidly. One 483 * could use the TSN to keep track of things, but this scheme breaks 484 * down in the other type of stream usage that could occur. Send a 485 * single msg to stream 0, send 4Billion messages to stream 1, now 486 * send a message to stream 0. You have a situation where the TSN 487 * has wrapped but not in the stream. Is this worth worrying about 488 * or should we just change our queue sort at the bottom to be by 489 * TSN. 490 * 491 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 492 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 493 * assignment this could happen... and I don't see how this would be 494 * a violation. So for now I am undecided an will leave the sort by 495 * SSN alone. Maybe a hybred approach is the answer 496 * 497 */ 498 struct sctp_queued_to_read *at; 499 int queue_needed; 500 uint32_t nxt_todel; 501 struct mbuf *op_err; 502 char msg[SCTP_DIAG_INFO_LEN]; 503 504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 505 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 506 } 507 if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) { 508 /* The incoming sseq is behind where we last delivered? */ 509 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 510 control->sinfo_ssn, strm->last_sequence_delivered); 511 protocol_error: 512 /* 513 * throw it in the stream so it gets cleaned up in 514 * association destruction 515 */ 516 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 517 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 518 strm->last_sequence_delivered, control->sinfo_tsn, 519 control->sinfo_stream, control->sinfo_ssn); 520 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 521 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 522 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 523 *abort_flag = 1; 524 return; 525 526 } 527 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) { 528 goto protocol_error; 529 } 530 queue_needed = 1; 531 asoc->size_on_all_streams += control->length; 532 sctp_ucount_incr(asoc->cnt_on_all_streams); 533 nxt_todel = strm->last_sequence_delivered + 1; 534 if (nxt_todel == control->sinfo_ssn) { 535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 536 struct socket *so; 537 538 so = SCTP_INP_SO(stcb->sctp_ep); 539 atomic_add_int(&stcb->asoc.refcnt, 1); 540 SCTP_TCB_UNLOCK(stcb); 541 SCTP_SOCKET_LOCK(so, 1); 542 SCTP_TCB_LOCK(stcb); 543 atomic_subtract_int(&stcb->asoc.refcnt, 1); 544 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 545 SCTP_SOCKET_UNLOCK(so, 1); 546 return; 547 } 548 #endif 549 /* can be delivered right away? */ 550 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 551 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 552 } 553 /* EY it wont be queued if it could be delivered directly */ 554 queue_needed = 0; 555 asoc->size_on_all_streams -= control->length; 556 sctp_ucount_decr(asoc->cnt_on_all_streams); 557 strm->last_sequence_delivered++; 558 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 559 sctp_add_to_readq(stcb->sctp_ep, stcb, 560 control, 561 &stcb->sctp_socket->so_rcv, 1, 562 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 563 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 564 /* all delivered */ 565 nxt_todel = strm->last_sequence_delivered + 1; 566 if ((nxt_todel == control->sinfo_ssn) && 567 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 568 asoc->size_on_all_streams -= control->length; 569 sctp_ucount_decr(asoc->cnt_on_all_streams); 570 if (control->on_strm_q == SCTP_ON_ORDERED) { 571 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 572 #ifdef INVARIANTS 573 } else { 574 panic("Huh control: %p is on_strm_q: %d", 575 control, control->on_strm_q); 576 #endif 577 } 578 control->on_strm_q = 0; 579 strm->last_sequence_delivered++; 580 /* 581 * We ignore the return of deliver_data here 582 * since we always can hold the chunk on the 583 * d-queue. And we have a finite number that 584 * can be delivered from the strq. 585 */ 586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 587 sctp_log_strm_del(control, NULL, 588 SCTP_STR_LOG_FROM_IMMED_DEL); 589 } 590 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 591 sctp_add_to_readq(stcb->sctp_ep, stcb, 592 control, 593 &stcb->sctp_socket->so_rcv, 1, 594 SCTP_READ_LOCK_NOT_HELD, 595 SCTP_SO_LOCKED); 596 continue; 597 } else if (nxt_todel == control->sinfo_ssn) { 598 *need_reasm = 1; 599 } 600 break; 601 } 602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 603 SCTP_SOCKET_UNLOCK(so, 1); 604 #endif 605 } 606 if (queue_needed) { 607 /* 608 * Ok, we did not deliver this guy, find the correct place 609 * to put it on the queue. 610 */ 611 if (sctp_place_control_in_stream(strm, asoc, control)) { 612 snprintf(msg, sizeof(msg), 613 "Queue to str msg_id: %u duplicate", 614 control->msg_id); 615 sctp_clean_up_control(stcb, control); 616 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 617 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 618 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 619 *abort_flag = 1; 620 } 621 } 622 } 623 624 625 static void 626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 627 { 628 struct mbuf *m, *prev = NULL; 629 struct sctp_tcb *stcb; 630 631 stcb = control->stcb; 632 control->held_length = 0; 633 control->length = 0; 634 m = control->data; 635 while (m) { 636 if (SCTP_BUF_LEN(m) == 0) { 637 /* Skip mbufs with NO length */ 638 if (prev == NULL) { 639 /* First one */ 640 control->data = sctp_m_free(m); 641 m = control->data; 642 } else { 643 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 644 m = SCTP_BUF_NEXT(prev); 645 } 646 if (m == NULL) { 647 control->tail_mbuf = prev; 648 } 649 continue; 650 } 651 prev = m; 652 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 653 if (control->on_read_q) { 654 /* 655 * On read queue so we must increment the SB stuff, 656 * we assume caller has done any locks of SB. 657 */ 658 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 659 } 660 m = SCTP_BUF_NEXT(m); 661 } 662 if (prev) { 663 control->tail_mbuf = prev; 664 } 665 } 666 667 static void 668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m) 669 { 670 struct mbuf *prev = NULL; 671 struct sctp_tcb *stcb; 672 673 stcb = control->stcb; 674 if (stcb == NULL) { 675 #ifdef INVARIANTS 676 panic("Control broken"); 677 #else 678 return; 679 #endif 680 } 681 if (control->tail_mbuf == NULL) { 682 /* TSNH */ 683 control->data = m; 684 sctp_setup_tail_pointer(control); 685 return; 686 } 687 control->tail_mbuf->m_next = m; 688 while (m) { 689 if (SCTP_BUF_LEN(m) == 0) { 690 /* Skip mbufs with NO length */ 691 if (prev == NULL) { 692 /* First one */ 693 control->tail_mbuf->m_next = sctp_m_free(m); 694 m = control->tail_mbuf->m_next; 695 } else { 696 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 697 m = SCTP_BUF_NEXT(prev); 698 } 699 if (m == NULL) { 700 control->tail_mbuf = prev; 701 } 702 continue; 703 } 704 prev = m; 705 if (control->on_read_q) { 706 /* 707 * On read queue so we must increment the SB stuff, 708 * we assume caller has done any locks of SB. 709 */ 710 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 711 } 712 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 713 m = SCTP_BUF_NEXT(m); 714 } 715 if (prev) { 716 control->tail_mbuf = prev; 717 } 718 } 719 720 static void 721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 722 { 723 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 724 nc->sinfo_stream = control->sinfo_stream; 725 nc->sinfo_ssn = control->sinfo_ssn; 726 TAILQ_INIT(&nc->reasm); 727 nc->top_fsn = control->top_fsn; 728 nc->msg_id = control->msg_id; 729 nc->sinfo_flags = control->sinfo_flags; 730 nc->sinfo_ppid = control->sinfo_ppid; 731 nc->sinfo_context = control->sinfo_context; 732 nc->fsn_included = 0xffffffff; 733 nc->sinfo_tsn = control->sinfo_tsn; 734 nc->sinfo_cumtsn = control->sinfo_cumtsn; 735 nc->sinfo_assoc_id = control->sinfo_assoc_id; 736 nc->whoFrom = control->whoFrom; 737 atomic_add_int(&nc->whoFrom->ref_count, 1); 738 nc->stcb = control->stcb; 739 nc->port_from = control->port_from; 740 } 741 742 static void 743 sctp_reset_a_control(struct sctp_queued_to_read *control, 744 struct sctp_inpcb *inp, uint32_t tsn) 745 { 746 control->fsn_included = tsn; 747 if (control->on_read_q) { 748 /* 749 * We have to purge it from there, hopefully this will work 750 * :-) 751 */ 752 TAILQ_REMOVE(&inp->read_queue, control, next); 753 control->on_read_q = 0; 754 } 755 } 756 757 static int 758 sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 759 struct sctp_association *asoc, 760 struct sctp_stream_in *strm, 761 struct sctp_queued_to_read *control, 762 uint32_t pd_point, 763 int inp_read_lock_held) 764 { 765 /* 766 * Special handling for the old un-ordered data chunk. All the 767 * chunks/TSN's go to msg_id 0. So we have to do the old style 768 * watching to see if we have it all. If you return one, no other 769 * control entries on the un-ordered queue will be looked at. In 770 * theory there should be no others entries in reality, unless the 771 * guy is sending both unordered NDATA and unordered DATA... 772 */ 773 struct sctp_tmit_chunk *chk, *lchk, *tchk; 774 uint32_t fsn; 775 struct sctp_queued_to_read *nc; 776 int cnt_added; 777 778 if (control->first_frag_seen == 0) { 779 /* Nothing we can do, we have not seen the first piece yet */ 780 return (1); 781 } 782 /* Collapse any we can */ 783 cnt_added = 0; 784 restart: 785 fsn = control->fsn_included + 1; 786 /* Now what can we add? */ 787 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 788 if (chk->rec.data.fsn_num == fsn) { 789 /* Ok lets add it */ 790 sctp_alloc_a_readq(stcb, nc); 791 if (nc == NULL) { 792 break; 793 } 794 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 795 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 796 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD); 797 fsn++; 798 cnt_added++; 799 chk = NULL; 800 if (control->end_added) { 801 /* We are done */ 802 if (!TAILQ_EMPTY(&control->reasm)) { 803 /* 804 * Ok we have to move anything left 805 * on the control queue to a new 806 * control. 807 */ 808 sctp_build_readq_entry_from_ctl(nc, control); 809 tchk = TAILQ_FIRST(&control->reasm); 810 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 811 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 812 asoc->size_on_reasm_queue -= tchk->send_size; 813 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 814 nc->first_frag_seen = 1; 815 nc->fsn_included = tchk->rec.data.fsn_num; 816 nc->data = tchk->data; 817 nc->sinfo_ppid = tchk->rec.data.payloadtype; 818 nc->sinfo_tsn = tchk->rec.data.TSN_seq; 819 sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq); 820 tchk->data = NULL; 821 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 822 sctp_setup_tail_pointer(nc); 823 tchk = TAILQ_FIRST(&control->reasm); 824 } 825 /* Spin the rest onto the queue */ 826 while (tchk) { 827 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 828 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 829 tchk = TAILQ_FIRST(&control->reasm); 830 } 831 /* 832 * Now lets add it to the queue 833 * after removing control 834 */ 835 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 836 nc->on_strm_q = SCTP_ON_UNORDERED; 837 if (control->on_strm_q) { 838 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 839 control->on_strm_q = 0; 840 } 841 } 842 if (control->pdapi_started) { 843 strm->pd_api_started = 0; 844 control->pdapi_started = 0; 845 } 846 if (control->on_strm_q) { 847 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 848 control->on_strm_q = 0; 849 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 850 } 851 if (control->on_read_q == 0) { 852 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 853 &stcb->sctp_socket->so_rcv, control->end_added, 854 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 855 } 856 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 857 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 858 /* 859 * Switch to the new guy and 860 * continue 861 */ 862 control = nc; 863 goto restart; 864 } else { 865 if (nc->on_strm_q == 0) { 866 sctp_free_a_readq(stcb, nc); 867 } 868 } 869 return (1); 870 } else { 871 sctp_free_a_readq(stcb, nc); 872 } 873 } else { 874 /* Can't add more */ 875 break; 876 } 877 } 878 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 879 strm->pd_api_started = 1; 880 control->pdapi_started = 1; 881 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 882 &stcb->sctp_socket->so_rcv, control->end_added, 883 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 884 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 885 return (0); 886 } else { 887 return (1); 888 } 889 } 890 891 static void 892 sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 893 struct sctp_association *asoc, 894 struct sctp_queued_to_read *control, 895 struct sctp_tmit_chunk *chk, 896 int *abort_flag) 897 { 898 struct sctp_tmit_chunk *at; 899 int inserted; 900 901 /* 902 * Here we need to place the chunk into the control structure sorted 903 * in the correct order. 904 */ 905 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 906 /* Its the very first one. */ 907 SCTPDBG(SCTP_DEBUG_XXX, 908 "chunk is a first fsn: %u becomes fsn_included\n", 909 chk->rec.data.fsn_num); 910 if (control->first_frag_seen) { 911 /* 912 * In old un-ordered we can reassembly on one 913 * control multiple messages. As long as the next 914 * FIRST is greater then the old first (TSN i.e. FSN 915 * wise) 916 */ 917 struct mbuf *tdata; 918 uint32_t tmp; 919 920 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) { 921 /* 922 * Easy way the start of a new guy beyond 923 * the lowest 924 */ 925 goto place_chunk; 926 } 927 if ((chk->rec.data.fsn_num == control->fsn_included) || 928 (control->pdapi_started)) { 929 /* 930 * Ok this should not happen, if it does we 931 * started the pd-api on the higher TSN 932 * (since the equals part is a TSN failure 933 * it must be that). 934 * 935 * We are completly hosed in that case since I 936 * have no way to recover. This really will 937 * only happen if we can get more TSN's 938 * higher before the pd-api-point. 939 */ 940 sctp_abort_in_reasm(stcb, control, chk, 941 abort_flag, 942 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 943 944 return; 945 } 946 /* 947 * Ok we have two firsts and the one we just got is 948 * smaller than the one we previously placed.. yuck! 949 * We must swap them out. 950 */ 951 /* swap the mbufs */ 952 tdata = control->data; 953 control->data = chk->data; 954 chk->data = tdata; 955 /* Save the lengths */ 956 chk->send_size = control->length; 957 /* Recompute length of control and tail pointer */ 958 sctp_setup_tail_pointer(control); 959 /* Fix the FSN included */ 960 tmp = control->fsn_included; 961 control->fsn_included = chk->rec.data.fsn_num; 962 chk->rec.data.fsn_num = tmp; 963 /* Fix the TSN included */ 964 tmp = control->sinfo_tsn; 965 control->sinfo_tsn = chk->rec.data.TSN_seq; 966 chk->rec.data.TSN_seq = tmp; 967 /* Fix the PPID included */ 968 tmp = control->sinfo_ppid; 969 control->sinfo_ppid = chk->rec.data.payloadtype; 970 chk->rec.data.payloadtype = tmp; 971 /* Fix tail pointer */ 972 goto place_chunk; 973 } 974 control->first_frag_seen = 1; 975 control->top_fsn = control->fsn_included = chk->rec.data.fsn_num; 976 control->sinfo_tsn = chk->rec.data.TSN_seq; 977 control->sinfo_ppid = chk->rec.data.payloadtype; 978 control->data = chk->data; 979 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 980 chk->data = NULL; 981 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 982 sctp_setup_tail_pointer(control); 983 return; 984 } 985 place_chunk: 986 inserted = 0; 987 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 988 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) { 989 /* 990 * This one in queue is bigger than the new one, 991 * insert the new one before at. 992 */ 993 asoc->size_on_reasm_queue += chk->send_size; 994 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 995 inserted = 1; 996 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 997 break; 998 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) { 999 /* 1000 * They sent a duplicate fsn number. This really 1001 * should not happen since the FSN is a TSN and it 1002 * should have been dropped earlier. 1003 */ 1004 sctp_abort_in_reasm(stcb, control, chk, 1005 abort_flag, 1006 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1007 return; 1008 } 1009 } 1010 if (inserted == 0) { 1011 /* Its at the end */ 1012 asoc->size_on_reasm_queue += chk->send_size; 1013 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1014 control->top_fsn = chk->rec.data.fsn_num; 1015 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1016 } 1017 } 1018 1019 static int 1020 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1021 struct sctp_stream_in *strm, int inp_read_lock_held) 1022 { 1023 /* 1024 * Given a stream, strm, see if any of the SSN's on it that are 1025 * fragmented are ready to deliver. If so go ahead and place them on 1026 * the read queue. In so placing if we have hit the end, then we 1027 * need to remove them from the stream's queue. 1028 */ 1029 struct sctp_queued_to_read *control, *nctl = NULL; 1030 uint32_t next_to_del; 1031 uint32_t pd_point; 1032 int ret = 0; 1033 1034 if (stcb->sctp_socket) { 1035 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1036 stcb->sctp_ep->partial_delivery_point); 1037 } else { 1038 pd_point = stcb->sctp_ep->partial_delivery_point; 1039 } 1040 control = TAILQ_FIRST(&strm->uno_inqueue); 1041 1042 if ((control) && 1043 (asoc->idata_supported == 0)) { 1044 /* Special handling needed for "old" data format */ 1045 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1046 goto done_un; 1047 } 1048 } 1049 if (strm->pd_api_started) { 1050 /* Can't add more */ 1051 return (0); 1052 } 1053 while (control) { 1054 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1055 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included); 1056 nctl = TAILQ_NEXT(control, next_instrm); 1057 if (control->end_added) { 1058 /* We just put the last bit on */ 1059 if (control->on_strm_q) { 1060 #ifdef INVARIANTS 1061 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1062 panic("Huh control: %p on_q: %d -- not unordered?", 1063 control, control->on_strm_q); 1064 } 1065 #endif 1066 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1067 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1068 control->on_strm_q = 0; 1069 } 1070 if (control->on_read_q == 0) { 1071 sctp_add_to_readq(stcb->sctp_ep, stcb, 1072 control, 1073 &stcb->sctp_socket->so_rcv, control->end_added, 1074 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1075 } 1076 } else { 1077 /* Can we do a PD-API for this un-ordered guy? */ 1078 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1079 strm->pd_api_started = 1; 1080 control->pdapi_started = 1; 1081 sctp_add_to_readq(stcb->sctp_ep, stcb, 1082 control, 1083 &stcb->sctp_socket->so_rcv, control->end_added, 1084 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1085 1086 break; 1087 } 1088 } 1089 control = nctl; 1090 } 1091 done_un: 1092 control = TAILQ_FIRST(&strm->inqueue); 1093 if (strm->pd_api_started) { 1094 /* Can't add more */ 1095 return (0); 1096 } 1097 if (control == NULL) { 1098 return (ret); 1099 } 1100 if (strm->last_sequence_delivered == control->sinfo_ssn) { 1101 /* 1102 * Ok the guy at the top was being partially delivered 1103 * completed, so we remove it. Note the pd_api flag was 1104 * taken off when the chunk was merged on in 1105 * sctp_queue_data_for_reasm below. 1106 */ 1107 nctl = TAILQ_NEXT(control, next_instrm); 1108 SCTPDBG(SCTP_DEBUG_XXX, 1109 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1110 control, control->end_added, control->sinfo_ssn, 1111 control->top_fsn, control->fsn_included, 1112 strm->last_sequence_delivered); 1113 if (control->end_added) { 1114 if (control->on_strm_q) { 1115 #ifdef INVARIANTS 1116 if (control->on_strm_q != SCTP_ON_ORDERED) { 1117 panic("Huh control: %p on_q: %d -- not ordered?", 1118 control, control->on_strm_q); 1119 } 1120 #endif 1121 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1122 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1123 control->on_strm_q = 0; 1124 } 1125 if (strm->pd_api_started && control->pdapi_started) { 1126 control->pdapi_started = 0; 1127 strm->pd_api_started = 0; 1128 } 1129 if (control->on_read_q == 0) { 1130 sctp_add_to_readq(stcb->sctp_ep, stcb, 1131 control, 1132 &stcb->sctp_socket->so_rcv, control->end_added, 1133 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1134 } 1135 control = nctl; 1136 } 1137 } 1138 if (strm->pd_api_started) { 1139 /* 1140 * Can't add more must have gotten an un-ordered above being 1141 * partially delivered. 1142 */ 1143 return (0); 1144 } 1145 deliver_more: 1146 next_to_del = strm->last_sequence_delivered + 1; 1147 if (control) { 1148 SCTPDBG(SCTP_DEBUG_XXX, 1149 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1150 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included, 1151 next_to_del); 1152 nctl = TAILQ_NEXT(control, next_instrm); 1153 if ((control->sinfo_ssn == next_to_del) && 1154 (control->first_frag_seen)) { 1155 int done; 1156 1157 /* Ok we can deliver it onto the stream. */ 1158 if (control->end_added) { 1159 /* We are done with it afterwards */ 1160 if (control->on_strm_q) { 1161 #ifdef INVARIANTS 1162 if (control->on_strm_q != SCTP_ON_ORDERED) { 1163 panic("Huh control: %p on_q: %d -- not ordered?", 1164 control, control->on_strm_q); 1165 } 1166 #endif 1167 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1168 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1169 control->on_strm_q = 0; 1170 } 1171 ret++; 1172 } 1173 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1174 /* 1175 * A singleton now slipping through - mark 1176 * it non-revokable too 1177 */ 1178 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1179 } else if (control->end_added == 0) { 1180 /* 1181 * Check if we can defer adding until its 1182 * all there 1183 */ 1184 if ((control->length < pd_point) || (strm->pd_api_started)) { 1185 /* 1186 * Don't need it or cannot add more 1187 * (one being delivered that way) 1188 */ 1189 goto out; 1190 } 1191 } 1192 done = (control->end_added) && (control->last_frag_seen); 1193 if (control->on_read_q == 0) { 1194 sctp_add_to_readq(stcb->sctp_ep, stcb, 1195 control, 1196 &stcb->sctp_socket->so_rcv, control->end_added, 1197 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1198 } 1199 strm->last_sequence_delivered = next_to_del; 1200 if (done) { 1201 control = nctl; 1202 goto deliver_more; 1203 } else { 1204 /* We are now doing PD API */ 1205 strm->pd_api_started = 1; 1206 control->pdapi_started = 1; 1207 } 1208 } 1209 } 1210 out: 1211 return (ret); 1212 } 1213 1214 1215 void 1216 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1217 struct sctp_stream_in *strm, 1218 struct sctp_tcb *stcb, struct sctp_association *asoc, 1219 struct sctp_tmit_chunk *chk, int hold_rlock) 1220 { 1221 /* 1222 * Given a control and a chunk, merge the data from the chk onto the 1223 * control and free up the chunk resources. 1224 */ 1225 int i_locked = 0; 1226 1227 if (control->on_read_q && (hold_rlock == 0)) { 1228 /* 1229 * Its being pd-api'd so we must do some locks. 1230 */ 1231 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1232 i_locked = 1; 1233 } 1234 if (control->data == NULL) { 1235 control->data = chk->data; 1236 sctp_setup_tail_pointer(control); 1237 } else { 1238 sctp_add_to_tail_pointer(control, chk->data); 1239 } 1240 control->fsn_included = chk->rec.data.fsn_num; 1241 asoc->size_on_reasm_queue -= chk->send_size; 1242 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1243 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 1244 chk->data = NULL; 1245 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1246 control->first_frag_seen = 1; 1247 } 1248 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1249 /* Its complete */ 1250 if ((control->on_strm_q) && (control->on_read_q)) { 1251 if (control->pdapi_started) { 1252 control->pdapi_started = 0; 1253 strm->pd_api_started = 0; 1254 } 1255 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1256 /* Unordered */ 1257 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1258 control->on_strm_q = 0; 1259 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1260 /* Ordered */ 1261 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1262 control->on_strm_q = 0; 1263 #ifdef INVARIANTS 1264 } else if (control->on_strm_q) { 1265 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1266 control->on_strm_q); 1267 #endif 1268 } 1269 } 1270 control->end_added = 1; 1271 control->last_frag_seen = 1; 1272 } 1273 if (i_locked) { 1274 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1275 } 1276 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1277 } 1278 1279 /* 1280 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1281 * queue, see if anthing can be delivered. If so pull it off (or as much as 1282 * we can. If we run out of space then we must dump what we can and set the 1283 * appropriate flag to say we queued what we could. 1284 */ 1285 static void 1286 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1287 struct sctp_stream_in *strm, 1288 struct sctp_queued_to_read *control, 1289 struct sctp_tmit_chunk *chk, 1290 int created_control, 1291 int *abort_flag, uint32_t tsn) 1292 { 1293 uint32_t next_fsn; 1294 struct sctp_tmit_chunk *at, *nat; 1295 int do_wakeup, unordered; 1296 1297 /* 1298 * For old un-ordered data chunks. 1299 */ 1300 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1301 unordered = 1; 1302 } else { 1303 unordered = 0; 1304 } 1305 /* Must be added to the stream-in queue */ 1306 if (created_control) { 1307 if (sctp_place_control_in_stream(strm, asoc, control)) { 1308 /* Duplicate SSN? */ 1309 sctp_clean_up_control(stcb, control); 1310 sctp_abort_in_reasm(stcb, control, chk, 1311 abort_flag, 1312 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1313 return; 1314 } 1315 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1316 /* 1317 * Ok we created this control and now lets validate 1318 * that its legal i.e. there is a B bit set, if not 1319 * and we have up to the cum-ack then its invalid. 1320 */ 1321 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1322 sctp_abort_in_reasm(stcb, control, chk, 1323 abort_flag, 1324 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1325 return; 1326 } 1327 } 1328 } 1329 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1330 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1331 return; 1332 } 1333 /* 1334 * Ok we must queue the chunk into the reasembly portion: o if its 1335 * the first it goes to the control mbuf. o if its not first but the 1336 * next in sequence it goes to the control, and each succeeding one 1337 * in order also goes. o if its not in order we place it on the list 1338 * in its place. 1339 */ 1340 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1341 /* Its the very first one. */ 1342 SCTPDBG(SCTP_DEBUG_XXX, 1343 "chunk is a first fsn: %u becomes fsn_included\n", 1344 chk->rec.data.fsn_num); 1345 if (control->first_frag_seen) { 1346 /* 1347 * Error on senders part, they either sent us two 1348 * data chunks with FIRST, or they sent two 1349 * un-ordered chunks that were fragmented at the 1350 * same time in the same stream. 1351 */ 1352 sctp_abort_in_reasm(stcb, control, chk, 1353 abort_flag, 1354 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1355 return; 1356 } 1357 control->first_frag_seen = 1; 1358 control->fsn_included = chk->rec.data.fsn_num; 1359 control->data = chk->data; 1360 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 1361 chk->data = NULL; 1362 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1363 sctp_setup_tail_pointer(control); 1364 } else { 1365 /* Place the chunk in our list */ 1366 int inserted = 0; 1367 1368 if (control->last_frag_seen == 0) { 1369 /* Still willing to raise highest FSN seen */ 1370 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) { 1371 SCTPDBG(SCTP_DEBUG_XXX, 1372 "We have a new top_fsn: %u\n", 1373 chk->rec.data.fsn_num); 1374 control->top_fsn = chk->rec.data.fsn_num; 1375 } 1376 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1377 SCTPDBG(SCTP_DEBUG_XXX, 1378 "The last fsn is now in place fsn: %u\n", 1379 chk->rec.data.fsn_num); 1380 control->last_frag_seen = 1; 1381 } 1382 if (asoc->idata_supported || control->first_frag_seen) { 1383 /* 1384 * For IDATA we always check since we know 1385 * that the first fragment is 0. For old 1386 * DATA we have to receive the first before 1387 * we know the first FSN (which is the TSN). 1388 */ 1389 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) { 1390 /* 1391 * We have already delivered up to 1392 * this so its a dup 1393 */ 1394 sctp_abort_in_reasm(stcb, control, chk, 1395 abort_flag, 1396 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1397 return; 1398 } 1399 } 1400 } else { 1401 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1402 /* Second last? huh? */ 1403 SCTPDBG(SCTP_DEBUG_XXX, 1404 "Duplicate last fsn: %u (top: %u) -- abort\n", 1405 chk->rec.data.fsn_num, control->top_fsn); 1406 sctp_abort_in_reasm(stcb, control, 1407 chk, abort_flag, 1408 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1409 return; 1410 } 1411 if (asoc->idata_supported || control->first_frag_seen) { 1412 /* 1413 * For IDATA we always check since we know 1414 * that the first fragment is 0. For old 1415 * DATA we have to receive the first before 1416 * we know the first FSN (which is the TSN). 1417 */ 1418 1419 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) { 1420 /* 1421 * We have already delivered up to 1422 * this so its a dup 1423 */ 1424 SCTPDBG(SCTP_DEBUG_XXX, 1425 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1426 chk->rec.data.fsn_num, control->fsn_included); 1427 sctp_abort_in_reasm(stcb, control, chk, 1428 abort_flag, 1429 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1430 return; 1431 } 1432 } 1433 /* 1434 * validate not beyond top FSN if we have seen last 1435 * one 1436 */ 1437 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) { 1438 SCTPDBG(SCTP_DEBUG_XXX, 1439 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1440 chk->rec.data.fsn_num, 1441 control->top_fsn); 1442 sctp_abort_in_reasm(stcb, control, chk, 1443 abort_flag, 1444 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1445 return; 1446 } 1447 } 1448 /* 1449 * If we reach here, we need to place the new chunk in the 1450 * reassembly for this control. 1451 */ 1452 SCTPDBG(SCTP_DEBUG_XXX, 1453 "chunk is a not first fsn: %u needs to be inserted\n", 1454 chk->rec.data.fsn_num); 1455 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1456 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) { 1457 /* 1458 * This one in queue is bigger than the new 1459 * one, insert the new one before at. 1460 */ 1461 SCTPDBG(SCTP_DEBUG_XXX, 1462 "Insert it before fsn: %u\n", 1463 at->rec.data.fsn_num); 1464 asoc->size_on_reasm_queue += chk->send_size; 1465 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1466 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1467 inserted = 1; 1468 break; 1469 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) { 1470 /* 1471 * Gak, He sent me a duplicate str seq 1472 * number 1473 */ 1474 /* 1475 * foo bar, I guess I will just free this 1476 * new guy, should we abort too? FIX ME 1477 * MAYBE? Or it COULD be that the SSN's have 1478 * wrapped. Maybe I should compare to TSN 1479 * somehow... sigh for now just blow away 1480 * the chunk! 1481 */ 1482 SCTPDBG(SCTP_DEBUG_XXX, 1483 "Duplicate to fsn: %u -- abort\n", 1484 at->rec.data.fsn_num); 1485 sctp_abort_in_reasm(stcb, control, 1486 chk, abort_flag, 1487 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1488 return; 1489 } 1490 } 1491 if (inserted == 0) { 1492 /* Goes on the end */ 1493 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1494 chk->rec.data.fsn_num); 1495 asoc->size_on_reasm_queue += chk->send_size; 1496 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1497 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1498 } 1499 } 1500 /* 1501 * Ok lets see if we can suck any up into the control structure that 1502 * are in seq if it makes sense. 1503 */ 1504 do_wakeup = 0; 1505 /* 1506 * If the first fragment has not been seen there is no sense in 1507 * looking. 1508 */ 1509 if (control->first_frag_seen) { 1510 next_fsn = control->fsn_included + 1; 1511 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1512 if (at->rec.data.fsn_num == next_fsn) { 1513 /* We can add this one now to the control */ 1514 SCTPDBG(SCTP_DEBUG_XXX, 1515 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1516 control, at, 1517 at->rec.data.fsn_num, 1518 next_fsn, control->fsn_included); 1519 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1520 sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1521 if (control->on_read_q) { 1522 do_wakeup = 1; 1523 } 1524 next_fsn++; 1525 if (control->end_added && control->pdapi_started) { 1526 if (strm->pd_api_started) { 1527 strm->pd_api_started = 0; 1528 control->pdapi_started = 0; 1529 } 1530 if (control->on_read_q == 0) { 1531 sctp_add_to_readq(stcb->sctp_ep, stcb, 1532 control, 1533 &stcb->sctp_socket->so_rcv, control->end_added, 1534 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1535 do_wakeup = 1; 1536 } 1537 break; 1538 } 1539 } else { 1540 break; 1541 } 1542 } 1543 } 1544 if (do_wakeup) { 1545 /* Need to wakeup the reader */ 1546 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1547 } 1548 } 1549 1550 static struct sctp_queued_to_read * 1551 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old) 1552 { 1553 struct sctp_queued_to_read *control; 1554 1555 if (ordered) { 1556 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1557 if (control->msg_id == msg_id) { 1558 break; 1559 } 1560 } 1561 } else { 1562 if (old) { 1563 control = TAILQ_FIRST(&strm->uno_inqueue); 1564 return (control); 1565 } 1566 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1567 if (control->msg_id == msg_id) { 1568 break; 1569 } 1570 } 1571 } 1572 return (control); 1573 } 1574 1575 static int 1576 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1577 struct mbuf **m, int offset, int chk_length, 1578 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1579 int *break_flag, int last_chunk, uint8_t chtype) 1580 { 1581 /* Process a data chunk */ 1582 /* struct sctp_tmit_chunk *chk; */ 1583 struct sctp_data_chunk *ch; 1584 struct sctp_idata_chunk *nch, chunk_buf; 1585 struct sctp_tmit_chunk *chk; 1586 uint32_t tsn, fsn, gap, msg_id; 1587 struct mbuf *dmbuf; 1588 int the_len; 1589 int need_reasm_check = 0; 1590 uint16_t strmno; 1591 struct mbuf *op_err; 1592 char msg[SCTP_DIAG_INFO_LEN]; 1593 struct sctp_queued_to_read *control = NULL; 1594 uint32_t protocol_id; 1595 uint8_t chunk_flags; 1596 struct sctp_stream_reset_list *liste; 1597 struct sctp_stream_in *strm; 1598 int ordered; 1599 size_t clen; 1600 int created_control = 0; 1601 uint8_t old_data; 1602 1603 chk = NULL; 1604 if (chtype == SCTP_IDATA) { 1605 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1606 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf); 1607 ch = (struct sctp_data_chunk *)nch; 1608 clen = sizeof(struct sctp_idata_chunk); 1609 tsn = ntohl(ch->dp.tsn); 1610 msg_id = ntohl(nch->dp.msg_id); 1611 protocol_id = nch->dp.ppid_fsn.protocol_id; 1612 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) 1613 fsn = 0; 1614 else 1615 fsn = ntohl(nch->dp.ppid_fsn.fsn); 1616 old_data = 0; 1617 } else { 1618 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1619 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 1620 tsn = ntohl(ch->dp.tsn); 1621 protocol_id = ch->dp.protocol_id; 1622 clen = sizeof(struct sctp_data_chunk); 1623 fsn = tsn; 1624 msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence)); 1625 nch = NULL; 1626 old_data = 1; 1627 } 1628 chunk_flags = ch->ch.chunk_flags; 1629 if ((size_t)chk_length == clen) { 1630 /* 1631 * Need to send an abort since we had a empty data chunk. 1632 */ 1633 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn); 1634 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1635 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1636 *abort_flag = 1; 1637 return (0); 1638 } 1639 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1640 asoc->send_sack = 1; 1641 } 1642 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0); 1643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1644 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1645 } 1646 if (stcb == NULL) { 1647 return (0); 1648 } 1649 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1650 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1651 /* It is a duplicate */ 1652 SCTP_STAT_INCR(sctps_recvdupdata); 1653 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1654 /* Record a dup for the next outbound sack */ 1655 asoc->dup_tsns[asoc->numduptsns] = tsn; 1656 asoc->numduptsns++; 1657 } 1658 asoc->send_sack = 1; 1659 return (0); 1660 } 1661 /* Calculate the number of TSN's between the base and this TSN */ 1662 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1663 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1664 /* Can't hold the bit in the mapping at max array, toss it */ 1665 return (0); 1666 } 1667 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1668 SCTP_TCB_LOCK_ASSERT(stcb); 1669 if (sctp_expand_mapping_array(asoc, gap)) { 1670 /* Can't expand, drop it */ 1671 return (0); 1672 } 1673 } 1674 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1675 *high_tsn = tsn; 1676 } 1677 /* See if we have received this one already */ 1678 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1679 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1680 SCTP_STAT_INCR(sctps_recvdupdata); 1681 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1682 /* Record a dup for the next outbound sack */ 1683 asoc->dup_tsns[asoc->numduptsns] = tsn; 1684 asoc->numduptsns++; 1685 } 1686 asoc->send_sack = 1; 1687 return (0); 1688 } 1689 /* 1690 * Check to see about the GONE flag, duplicates would cause a sack 1691 * to be sent up above 1692 */ 1693 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1694 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1695 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1696 /* 1697 * wait a minute, this guy is gone, there is no longer a 1698 * receiver. Send peer an ABORT! 1699 */ 1700 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1701 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1702 *abort_flag = 1; 1703 return (0); 1704 } 1705 /* 1706 * Now before going further we see if there is room. If NOT then we 1707 * MAY let one through only IF this TSN is the one we are waiting 1708 * for on a partial delivery API. 1709 */ 1710 1711 /* Is the stream valid? */ 1712 strmno = ntohs(ch->dp.stream_id); 1713 1714 if (strmno >= asoc->streamincnt) { 1715 struct sctp_error_invalid_stream *cause; 1716 1717 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1718 0, M_NOWAIT, 1, MT_DATA); 1719 if (op_err != NULL) { 1720 /* add some space up front so prepend will work well */ 1721 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1722 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1723 /* 1724 * Error causes are just param's and this one has 1725 * two back to back phdr, one with the error type 1726 * and size, the other with the streamid and a rsvd 1727 */ 1728 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1729 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1730 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1731 cause->stream_id = ch->dp.stream_id; 1732 cause->reserved = htons(0); 1733 sctp_queue_op_err(stcb, op_err); 1734 } 1735 SCTP_STAT_INCR(sctps_badsid); 1736 SCTP_TCB_LOCK_ASSERT(stcb); 1737 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1738 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1739 asoc->highest_tsn_inside_nr_map = tsn; 1740 } 1741 if (tsn == (asoc->cumulative_tsn + 1)) { 1742 /* Update cum-ack */ 1743 asoc->cumulative_tsn = tsn; 1744 } 1745 return (0); 1746 } 1747 strm = &asoc->strmin[strmno]; 1748 /* 1749 * If its a fragmented message, lets see if we can find the control 1750 * on the reassembly queues. 1751 */ 1752 if ((chtype == SCTP_IDATA) && 1753 ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1754 (fsn == 0)) { 1755 /* 1756 * The first *must* be fsn 0, and other (middle/end) pieces 1757 * can *not* be fsn 0. XXX: This can happen in case of a 1758 * wrap around. Ignore is for now. 1759 */ 1760 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", 1761 msg_id, chunk_flags); 1762 goto err_out; 1763 } 1764 control = sctp_find_reasm_entry(strm, msg_id, ordered, old_data); 1765 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1766 chunk_flags, control); 1767 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1768 /* See if we can find the re-assembly entity */ 1769 if (control != NULL) { 1770 /* We found something, does it belong? */ 1771 if (ordered && (msg_id != control->sinfo_ssn)) { 1772 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", msg_id); 1773 err_out: 1774 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1775 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1776 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1777 *abort_flag = 1; 1778 return (0); 1779 } 1780 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1781 /* 1782 * We can't have a switched order with an 1783 * unordered chunk 1784 */ 1785 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1786 tsn); 1787 goto err_out; 1788 } 1789 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1790 /* 1791 * We can't have a switched unordered with a 1792 * ordered chunk 1793 */ 1794 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1795 tsn); 1796 goto err_out; 1797 } 1798 } 1799 } else { 1800 /* 1801 * Its a complete segment. Lets validate we don't have a 1802 * re-assembly going on with the same Stream/Seq (for 1803 * ordered) or in the same Stream for unordered. 1804 */ 1805 if (control != NULL) { 1806 if (ordered || (old_data == 0)) { 1807 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n", 1808 chunk_flags, msg_id); 1809 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", msg_id); 1810 goto err_out; 1811 } else { 1812 if ((tsn == control->fsn_included + 1) && 1813 (control->end_added == 0)) { 1814 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included); 1815 goto err_out; 1816 } else { 1817 control = NULL; 1818 } 1819 } 1820 } 1821 } 1822 /* now do the tests */ 1823 if (((asoc->cnt_on_all_streams + 1824 asoc->cnt_on_reasm_queue + 1825 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1826 (((int)asoc->my_rwnd) <= 0)) { 1827 /* 1828 * When we have NO room in the rwnd we check to make sure 1829 * the reader is doing its job... 1830 */ 1831 if (stcb->sctp_socket->so_rcv.sb_cc) { 1832 /* some to read, wake-up */ 1833 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1834 struct socket *so; 1835 1836 so = SCTP_INP_SO(stcb->sctp_ep); 1837 atomic_add_int(&stcb->asoc.refcnt, 1); 1838 SCTP_TCB_UNLOCK(stcb); 1839 SCTP_SOCKET_LOCK(so, 1); 1840 SCTP_TCB_LOCK(stcb); 1841 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1842 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1843 /* assoc was freed while we were unlocked */ 1844 SCTP_SOCKET_UNLOCK(so, 1); 1845 return (0); 1846 } 1847 #endif 1848 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1849 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1850 SCTP_SOCKET_UNLOCK(so, 1); 1851 #endif 1852 } 1853 /* now is it in the mapping array of what we have accepted? */ 1854 if (nch == NULL) { 1855 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1856 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1857 /* Nope not in the valid range dump it */ 1858 dump_packet: 1859 sctp_set_rwnd(stcb, asoc); 1860 if ((asoc->cnt_on_all_streams + 1861 asoc->cnt_on_reasm_queue + 1862 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1863 SCTP_STAT_INCR(sctps_datadropchklmt); 1864 } else { 1865 SCTP_STAT_INCR(sctps_datadroprwnd); 1866 } 1867 *break_flag = 1; 1868 return (0); 1869 } 1870 } else { 1871 if (control == NULL) { 1872 goto dump_packet; 1873 } 1874 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1875 goto dump_packet; 1876 } 1877 } 1878 } 1879 #ifdef SCTP_ASOCLOG_OF_TSNS 1880 SCTP_TCB_LOCK_ASSERT(stcb); 1881 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1882 asoc->tsn_in_at = 0; 1883 asoc->tsn_in_wrapped = 1; 1884 } 1885 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1886 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1887 asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id; 1888 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1889 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1890 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1891 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1892 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1893 asoc->tsn_in_at++; 1894 #endif 1895 /* 1896 * Before we continue lets validate that we are not being fooled by 1897 * an evil attacker. We can only have Nk chunks based on our TSN 1898 * spread allowed by the mapping array N * 8 bits, so there is no 1899 * way our stream sequence numbers could have wrapped. We of course 1900 * only validate the FIRST fragment so the bit must be set. 1901 */ 1902 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1903 (TAILQ_EMPTY(&asoc->resetHead)) && 1904 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1905 SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) { 1906 /* The incoming sseq is behind where we last delivered? */ 1907 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 1908 msg_id, asoc->strmin[strmno].last_sequence_delivered); 1909 1910 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1911 asoc->strmin[strmno].last_sequence_delivered, 1912 tsn, strmno, msg_id); 1913 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1914 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1915 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1916 *abort_flag = 1; 1917 return (0); 1918 } 1919 /************************************ 1920 * From here down we may find ch-> invalid 1921 * so its a good idea NOT to use it. 1922 *************************************/ 1923 if (nch) { 1924 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 1925 } else { 1926 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1927 } 1928 if (last_chunk == 0) { 1929 if (nch) { 1930 dmbuf = SCTP_M_COPYM(*m, 1931 (offset + sizeof(struct sctp_idata_chunk)), 1932 the_len, M_NOWAIT); 1933 } else { 1934 dmbuf = SCTP_M_COPYM(*m, 1935 (offset + sizeof(struct sctp_data_chunk)), 1936 the_len, M_NOWAIT); 1937 } 1938 #ifdef SCTP_MBUF_LOGGING 1939 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1940 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 1941 } 1942 #endif 1943 } else { 1944 /* We can steal the last chunk */ 1945 int l_len; 1946 1947 dmbuf = *m; 1948 /* lop off the top part */ 1949 if (nch) { 1950 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 1951 } else { 1952 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1953 } 1954 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1955 l_len = SCTP_BUF_LEN(dmbuf); 1956 } else { 1957 /* 1958 * need to count up the size hopefully does not hit 1959 * this to often :-0 1960 */ 1961 struct mbuf *lat; 1962 1963 l_len = 0; 1964 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 1965 l_len += SCTP_BUF_LEN(lat); 1966 } 1967 } 1968 if (l_len > the_len) { 1969 /* Trim the end round bytes off too */ 1970 m_adj(dmbuf, -(l_len - the_len)); 1971 } 1972 } 1973 if (dmbuf == NULL) { 1974 SCTP_STAT_INCR(sctps_nomem); 1975 return (0); 1976 } 1977 /* 1978 * Now no matter what we need a control, get one if we don't have 1979 * one (we may have gotten it above when we found the message was 1980 * fragmented 1981 */ 1982 if (control == NULL) { 1983 sctp_alloc_a_readq(stcb, control); 1984 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1985 protocol_id, 1986 strmno, msg_id, 1987 chunk_flags, 1988 NULL, fsn, msg_id); 1989 if (control == NULL) { 1990 SCTP_STAT_INCR(sctps_nomem); 1991 return (0); 1992 } 1993 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1994 control->data = dmbuf; 1995 control->tail_mbuf = NULL; 1996 control->end_added = control->last_frag_seen = control->first_frag_seen = 1; 1997 control->top_fsn = control->fsn_included = fsn; 1998 } 1999 created_control = 1; 2000 } 2001 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n", 2002 chunk_flags, ordered, msg_id, control); 2003 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2004 TAILQ_EMPTY(&asoc->resetHead) && 2005 ((ordered == 0) || 2006 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id && 2007 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 2008 /* Candidate for express delivery */ 2009 /* 2010 * Its not fragmented, No PD-API is up, Nothing in the 2011 * delivery queue, Its un-ordered OR ordered and the next to 2012 * deliver AND nothing else is stuck on the stream queue, 2013 * And there is room for it in the socket buffer. Lets just 2014 * stuff it up the buffer.... 2015 */ 2016 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2017 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2018 asoc->highest_tsn_inside_nr_map = tsn; 2019 } 2020 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n", 2021 control, msg_id); 2022 2023 sctp_add_to_readq(stcb->sctp_ep, stcb, 2024 control, &stcb->sctp_socket->so_rcv, 2025 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2026 2027 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 2028 /* for ordered, bump what we delivered */ 2029 strm->last_sequence_delivered++; 2030 } 2031 SCTP_STAT_INCR(sctps_recvexpress); 2032 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2033 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, 2034 SCTP_STR_LOG_FROM_EXPRS_DEL); 2035 } 2036 control = NULL; 2037 goto finish_express_del; 2038 } 2039 /* Now will we need a chunk too? */ 2040 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2041 sctp_alloc_a_chunk(stcb, chk); 2042 if (chk == NULL) { 2043 /* No memory so we drop the chunk */ 2044 SCTP_STAT_INCR(sctps_nomem); 2045 if (last_chunk == 0) { 2046 /* we copied it, free the copy */ 2047 sctp_m_freem(dmbuf); 2048 } 2049 return (0); 2050 } 2051 chk->rec.data.TSN_seq = tsn; 2052 chk->no_fr_allowed = 0; 2053 chk->rec.data.fsn_num = fsn; 2054 chk->rec.data.stream_seq = msg_id; 2055 chk->rec.data.stream_number = strmno; 2056 chk->rec.data.payloadtype = protocol_id; 2057 chk->rec.data.context = stcb->asoc.context; 2058 chk->rec.data.doing_fast_retransmit = 0; 2059 chk->rec.data.rcv_flags = chunk_flags; 2060 chk->asoc = asoc; 2061 chk->send_size = the_len; 2062 chk->whoTo = net; 2063 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n", 2064 chk, 2065 control, msg_id); 2066 atomic_add_int(&net->ref_count, 1); 2067 chk->data = dmbuf; 2068 } 2069 /* Set the appropriate TSN mark */ 2070 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2071 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2072 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2073 asoc->highest_tsn_inside_nr_map = tsn; 2074 } 2075 } else { 2076 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2077 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2078 asoc->highest_tsn_inside_map = tsn; 2079 } 2080 } 2081 /* Now is it complete (i.e. not fragmented)? */ 2082 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2083 /* 2084 * Special check for when streams are resetting. We could be 2085 * more smart about this and check the actual stream to see 2086 * if it is not being reset.. that way we would not create a 2087 * HOLB when amongst streams being reset and those not being 2088 * reset. 2089 * 2090 */ 2091 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2092 SCTP_TSN_GT(tsn, liste->tsn)) { 2093 /* 2094 * yep its past where we need to reset... go ahead 2095 * and queue it. 2096 */ 2097 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2098 /* first one on */ 2099 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2100 } else { 2101 struct sctp_queued_to_read *ctlOn, *nctlOn; 2102 unsigned char inserted = 0; 2103 2104 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) { 2105 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) { 2106 2107 continue; 2108 } else { 2109 /* found it */ 2110 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2111 inserted = 1; 2112 break; 2113 } 2114 } 2115 if (inserted == 0) { 2116 /* 2117 * must be put at end, use prevP 2118 * (all setup from loop) to setup 2119 * nextP. 2120 */ 2121 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2122 } 2123 } 2124 goto finish_express_del; 2125 } 2126 if (chunk_flags & SCTP_DATA_UNORDERED) { 2127 /* queue directly into socket buffer */ 2128 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n", 2129 control, msg_id); 2130 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2131 sctp_add_to_readq(stcb->sctp_ep, stcb, 2132 control, 2133 &stcb->sctp_socket->so_rcv, 1, 2134 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2135 2136 } else { 2137 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control, 2138 msg_id); 2139 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check); 2140 if (*abort_flag) { 2141 if (last_chunk) { 2142 *m = NULL; 2143 } 2144 return (0); 2145 } 2146 } 2147 goto finish_express_del; 2148 } 2149 /* If we reach here its a reassembly */ 2150 need_reasm_check = 1; 2151 SCTPDBG(SCTP_DEBUG_XXX, 2152 "Queue data to stream for reasm control: %p msg_id: %u\n", 2153 control, msg_id); 2154 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn); 2155 if (*abort_flag) { 2156 /* 2157 * the assoc is now gone and chk was put onto the reasm 2158 * queue, which has all been freed. 2159 */ 2160 if (last_chunk) { 2161 *m = NULL; 2162 } 2163 return (0); 2164 } 2165 finish_express_del: 2166 /* Here we tidy up things */ 2167 if (tsn == (asoc->cumulative_tsn + 1)) { 2168 /* Update cum-ack */ 2169 asoc->cumulative_tsn = tsn; 2170 } 2171 if (last_chunk) { 2172 *m = NULL; 2173 } 2174 if (ordered) { 2175 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2176 } else { 2177 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2178 } 2179 SCTP_STAT_INCR(sctps_recvdata); 2180 /* Set it present please */ 2181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2182 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2183 } 2184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2185 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2186 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2187 } 2188 /* check the special flag for stream resets */ 2189 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2190 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2191 /* 2192 * we have finished working through the backlogged TSN's now 2193 * time to reset streams. 1: call reset function. 2: free 2194 * pending_reply space 3: distribute any chunks in 2195 * pending_reply_queue. 2196 */ 2197 struct sctp_queued_to_read *ctl, *nctl; 2198 2199 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2200 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2201 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2202 SCTP_FREE(liste, SCTP_M_STRESET); 2203 /* sa_ignore FREED_MEMORY */ 2204 liste = TAILQ_FIRST(&asoc->resetHead); 2205 if (TAILQ_EMPTY(&asoc->resetHead)) { 2206 /* All can be removed */ 2207 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 2208 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2209 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check); 2210 if (*abort_flag) { 2211 return (0); 2212 } 2213 } 2214 } else { 2215 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 2216 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) { 2217 break; 2218 } 2219 /* 2220 * if ctl->sinfo_tsn is <= liste->tsn we can 2221 * process it which is the NOT of 2222 * ctl->sinfo_tsn > liste->tsn 2223 */ 2224 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2225 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check); 2226 if (*abort_flag) { 2227 return (0); 2228 } 2229 } 2230 } 2231 /* 2232 * Now service re-assembly to pick up anything that has been 2233 * held on reassembly queue? 2234 */ 2235 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2236 need_reasm_check = 0; 2237 } 2238 if (need_reasm_check) { 2239 /* Another one waits ? */ 2240 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); 2241 } 2242 return (1); 2243 } 2244 2245 static const int8_t sctp_map_lookup_tab[256] = { 2246 0, 1, 0, 2, 0, 1, 0, 3, 2247 0, 1, 0, 2, 0, 1, 0, 4, 2248 0, 1, 0, 2, 0, 1, 0, 3, 2249 0, 1, 0, 2, 0, 1, 0, 5, 2250 0, 1, 0, 2, 0, 1, 0, 3, 2251 0, 1, 0, 2, 0, 1, 0, 4, 2252 0, 1, 0, 2, 0, 1, 0, 3, 2253 0, 1, 0, 2, 0, 1, 0, 6, 2254 0, 1, 0, 2, 0, 1, 0, 3, 2255 0, 1, 0, 2, 0, 1, 0, 4, 2256 0, 1, 0, 2, 0, 1, 0, 3, 2257 0, 1, 0, 2, 0, 1, 0, 5, 2258 0, 1, 0, 2, 0, 1, 0, 3, 2259 0, 1, 0, 2, 0, 1, 0, 4, 2260 0, 1, 0, 2, 0, 1, 0, 3, 2261 0, 1, 0, 2, 0, 1, 0, 7, 2262 0, 1, 0, 2, 0, 1, 0, 3, 2263 0, 1, 0, 2, 0, 1, 0, 4, 2264 0, 1, 0, 2, 0, 1, 0, 3, 2265 0, 1, 0, 2, 0, 1, 0, 5, 2266 0, 1, 0, 2, 0, 1, 0, 3, 2267 0, 1, 0, 2, 0, 1, 0, 4, 2268 0, 1, 0, 2, 0, 1, 0, 3, 2269 0, 1, 0, 2, 0, 1, 0, 6, 2270 0, 1, 0, 2, 0, 1, 0, 3, 2271 0, 1, 0, 2, 0, 1, 0, 4, 2272 0, 1, 0, 2, 0, 1, 0, 3, 2273 0, 1, 0, 2, 0, 1, 0, 5, 2274 0, 1, 0, 2, 0, 1, 0, 3, 2275 0, 1, 0, 2, 0, 1, 0, 4, 2276 0, 1, 0, 2, 0, 1, 0, 3, 2277 0, 1, 0, 2, 0, 1, 0, 8 2278 }; 2279 2280 2281 void 2282 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2283 { 2284 /* 2285 * Now we also need to check the mapping array in a couple of ways. 2286 * 1) Did we move the cum-ack point? 2287 * 2288 * When you first glance at this you might think that all entries that 2289 * make up the position of the cum-ack would be in the nr-mapping 2290 * array only.. i.e. things up to the cum-ack are always 2291 * deliverable. Thats true with one exception, when its a fragmented 2292 * message we may not deliver the data until some threshold (or all 2293 * of it) is in place. So we must OR the nr_mapping_array and 2294 * mapping_array to get a true picture of the cum-ack. 2295 */ 2296 struct sctp_association *asoc; 2297 int at; 2298 uint8_t val; 2299 int slide_from, slide_end, lgap, distance; 2300 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2301 2302 asoc = &stcb->asoc; 2303 2304 old_cumack = asoc->cumulative_tsn; 2305 old_base = asoc->mapping_array_base_tsn; 2306 old_highest = asoc->highest_tsn_inside_map; 2307 /* 2308 * We could probably improve this a small bit by calculating the 2309 * offset of the current cum-ack as the starting point. 2310 */ 2311 at = 0; 2312 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2313 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2314 if (val == 0xff) { 2315 at += 8; 2316 } else { 2317 /* there is a 0 bit */ 2318 at += sctp_map_lookup_tab[val]; 2319 break; 2320 } 2321 } 2322 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2323 2324 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2325 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2326 #ifdef INVARIANTS 2327 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2328 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2329 #else 2330 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2331 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2332 sctp_print_mapping_array(asoc); 2333 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2334 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2335 } 2336 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2337 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2338 #endif 2339 } 2340 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2341 highest_tsn = asoc->highest_tsn_inside_nr_map; 2342 } else { 2343 highest_tsn = asoc->highest_tsn_inside_map; 2344 } 2345 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2346 /* The complete array was completed by a single FR */ 2347 /* highest becomes the cum-ack */ 2348 int clr; 2349 2350 #ifdef INVARIANTS 2351 unsigned int i; 2352 2353 #endif 2354 2355 /* clear the array */ 2356 clr = ((at + 7) >> 3); 2357 if (clr > asoc->mapping_array_size) { 2358 clr = asoc->mapping_array_size; 2359 } 2360 memset(asoc->mapping_array, 0, clr); 2361 memset(asoc->nr_mapping_array, 0, clr); 2362 #ifdef INVARIANTS 2363 for (i = 0; i < asoc->mapping_array_size; i++) { 2364 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2365 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2366 sctp_print_mapping_array(asoc); 2367 } 2368 } 2369 #endif 2370 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2371 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2372 } else if (at >= 8) { 2373 /* we can slide the mapping array down */ 2374 /* slide_from holds where we hit the first NON 0xff byte */ 2375 2376 /* 2377 * now calculate the ceiling of the move using our highest 2378 * TSN value 2379 */ 2380 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2381 slide_end = (lgap >> 3); 2382 if (slide_end < slide_from) { 2383 sctp_print_mapping_array(asoc); 2384 #ifdef INVARIANTS 2385 panic("impossible slide"); 2386 #else 2387 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2388 lgap, slide_end, slide_from, at); 2389 return; 2390 #endif 2391 } 2392 if (slide_end > asoc->mapping_array_size) { 2393 #ifdef INVARIANTS 2394 panic("would overrun buffer"); 2395 #else 2396 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2397 asoc->mapping_array_size, slide_end); 2398 slide_end = asoc->mapping_array_size; 2399 #endif 2400 } 2401 distance = (slide_end - slide_from) + 1; 2402 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2403 sctp_log_map(old_base, old_cumack, old_highest, 2404 SCTP_MAP_PREPARE_SLIDE); 2405 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2406 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2407 } 2408 if (distance + slide_from > asoc->mapping_array_size || 2409 distance < 0) { 2410 /* 2411 * Here we do NOT slide forward the array so that 2412 * hopefully when more data comes in to fill it up 2413 * we will be able to slide it forward. Really I 2414 * don't think this should happen :-0 2415 */ 2416 2417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2418 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2419 (uint32_t) asoc->mapping_array_size, 2420 SCTP_MAP_SLIDE_NONE); 2421 } 2422 } else { 2423 int ii; 2424 2425 for (ii = 0; ii < distance; ii++) { 2426 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2427 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2428 2429 } 2430 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2431 asoc->mapping_array[ii] = 0; 2432 asoc->nr_mapping_array[ii] = 0; 2433 } 2434 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2435 asoc->highest_tsn_inside_map += (slide_from << 3); 2436 } 2437 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2438 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2439 } 2440 asoc->mapping_array_base_tsn += (slide_from << 3); 2441 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2442 sctp_log_map(asoc->mapping_array_base_tsn, 2443 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2444 SCTP_MAP_SLIDE_RESULT); 2445 } 2446 } 2447 } 2448 } 2449 2450 void 2451 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2452 { 2453 struct sctp_association *asoc; 2454 uint32_t highest_tsn; 2455 2456 asoc = &stcb->asoc; 2457 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2458 highest_tsn = asoc->highest_tsn_inside_nr_map; 2459 } else { 2460 highest_tsn = asoc->highest_tsn_inside_map; 2461 } 2462 2463 /* 2464 * Now we need to see if we need to queue a sack or just start the 2465 * timer (if allowed). 2466 */ 2467 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2468 /* 2469 * Ok special case, in SHUTDOWN-SENT case. here we maker 2470 * sure SACK timer is off and instead send a SHUTDOWN and a 2471 * SACK 2472 */ 2473 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2474 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2475 stcb->sctp_ep, stcb, NULL, 2476 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2477 } 2478 sctp_send_shutdown(stcb, 2479 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2480 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2481 } else { 2482 int is_a_gap; 2483 2484 /* is there a gap now ? */ 2485 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2486 2487 /* 2488 * CMT DAC algorithm: increase number of packets received 2489 * since last ack 2490 */ 2491 stcb->asoc.cmt_dac_pkts_rcvd++; 2492 2493 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2494 * SACK */ 2495 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2496 * longer is one */ 2497 (stcb->asoc.numduptsns) || /* we have dup's */ 2498 (is_a_gap) || /* is still a gap */ 2499 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2500 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2501 ) { 2502 2503 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2504 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2505 (stcb->asoc.send_sack == 0) && 2506 (stcb->asoc.numduptsns == 0) && 2507 (stcb->asoc.delayed_ack) && 2508 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2509 2510 /* 2511 * CMT DAC algorithm: With CMT, delay acks 2512 * even in the face of 2513 * 2514 * reordering. Therefore, if acks that do not 2515 * have to be sent because of the above 2516 * reasons, will be delayed. That is, acks 2517 * that would have been sent due to gap 2518 * reports will be delayed with DAC. Start 2519 * the delayed ack timer. 2520 */ 2521 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2522 stcb->sctp_ep, stcb, NULL); 2523 } else { 2524 /* 2525 * Ok we must build a SACK since the timer 2526 * is pending, we got our first packet OR 2527 * there are gaps or duplicates. 2528 */ 2529 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2530 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2531 } 2532 } else { 2533 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2534 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2535 stcb->sctp_ep, stcb, NULL); 2536 } 2537 } 2538 } 2539 } 2540 2541 int 2542 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2543 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2544 struct sctp_nets *net, uint32_t * high_tsn) 2545 { 2546 struct sctp_chunkhdr *ch, chunk_buf; 2547 struct sctp_association *asoc; 2548 int num_chunks = 0; /* number of control chunks processed */ 2549 int stop_proc = 0; 2550 int chk_length, break_flag, last_chunk; 2551 int abort_flag = 0, was_a_gap; 2552 struct mbuf *m; 2553 uint32_t highest_tsn; 2554 2555 /* set the rwnd */ 2556 sctp_set_rwnd(stcb, &stcb->asoc); 2557 2558 m = *mm; 2559 SCTP_TCB_LOCK_ASSERT(stcb); 2560 asoc = &stcb->asoc; 2561 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2562 highest_tsn = asoc->highest_tsn_inside_nr_map; 2563 } else { 2564 highest_tsn = asoc->highest_tsn_inside_map; 2565 } 2566 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2567 /* 2568 * setup where we got the last DATA packet from for any SACK that 2569 * may need to go out. Don't bump the net. This is done ONLY when a 2570 * chunk is assigned. 2571 */ 2572 asoc->last_data_chunk_from = net; 2573 2574 /*- 2575 * Now before we proceed we must figure out if this is a wasted 2576 * cluster... i.e. it is a small packet sent in and yet the driver 2577 * underneath allocated a full cluster for it. If so we must copy it 2578 * to a smaller mbuf and free up the cluster mbuf. This will help 2579 * with cluster starvation. Note for __Panda__ we don't do this 2580 * since it has clusters all the way down to 64 bytes. 2581 */ 2582 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2583 /* we only handle mbufs that are singletons.. not chains */ 2584 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2585 if (m) { 2586 /* ok lets see if we can copy the data up */ 2587 caddr_t *from, *to; 2588 2589 /* get the pointers and copy */ 2590 to = mtod(m, caddr_t *); 2591 from = mtod((*mm), caddr_t *); 2592 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2593 /* copy the length and free up the old */ 2594 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2595 sctp_m_freem(*mm); 2596 /* success, back copy */ 2597 *mm = m; 2598 } else { 2599 /* We are in trouble in the mbuf world .. yikes */ 2600 m = *mm; 2601 } 2602 } 2603 /* get pointer to the first chunk header */ 2604 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2605 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf); 2606 if (ch == NULL) { 2607 return (1); 2608 } 2609 /* 2610 * process all DATA chunks... 2611 */ 2612 *high_tsn = asoc->cumulative_tsn; 2613 break_flag = 0; 2614 asoc->data_pkts_seen++; 2615 while (stop_proc == 0) { 2616 /* validate chunk length */ 2617 chk_length = ntohs(ch->chunk_length); 2618 if (length - *offset < chk_length) { 2619 /* all done, mutulated chunk */ 2620 stop_proc = 1; 2621 continue; 2622 } 2623 if ((asoc->idata_supported == 1) && 2624 (ch->chunk_type == SCTP_DATA)) { 2625 struct mbuf *op_err; 2626 char msg[SCTP_DIAG_INFO_LEN]; 2627 2628 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2629 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2630 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2631 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2632 return (2); 2633 } 2634 if ((asoc->idata_supported == 0) && 2635 (ch->chunk_type == SCTP_IDATA)) { 2636 struct mbuf *op_err; 2637 char msg[SCTP_DIAG_INFO_LEN]; 2638 2639 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2640 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2641 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2642 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2643 return (2); 2644 } 2645 if ((ch->chunk_type == SCTP_DATA) || 2646 (ch->chunk_type == SCTP_IDATA)) { 2647 int clen; 2648 2649 if (ch->chunk_type == SCTP_DATA) { 2650 clen = sizeof(struct sctp_data_chunk); 2651 } else { 2652 clen = sizeof(struct sctp_idata_chunk); 2653 } 2654 if (chk_length < clen) { 2655 /* 2656 * Need to send an abort since we had a 2657 * invalid data chunk. 2658 */ 2659 struct mbuf *op_err; 2660 char msg[SCTP_DIAG_INFO_LEN]; 2661 2662 snprintf(msg, sizeof(msg), "DATA chunk of length %d", 2663 chk_length); 2664 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2665 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2666 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2667 return (2); 2668 } 2669 #ifdef SCTP_AUDITING_ENABLED 2670 sctp_audit_log(0xB1, 0); 2671 #endif 2672 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2673 last_chunk = 1; 2674 } else { 2675 last_chunk = 0; 2676 } 2677 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2678 chk_length, net, high_tsn, &abort_flag, &break_flag, 2679 last_chunk, ch->chunk_type)) { 2680 num_chunks++; 2681 } 2682 if (abort_flag) 2683 return (2); 2684 2685 if (break_flag) { 2686 /* 2687 * Set because of out of rwnd space and no 2688 * drop rep space left. 2689 */ 2690 stop_proc = 1; 2691 continue; 2692 } 2693 } else { 2694 /* not a data chunk in the data region */ 2695 switch (ch->chunk_type) { 2696 case SCTP_INITIATION: 2697 case SCTP_INITIATION_ACK: 2698 case SCTP_SELECTIVE_ACK: 2699 case SCTP_NR_SELECTIVE_ACK: 2700 case SCTP_HEARTBEAT_REQUEST: 2701 case SCTP_HEARTBEAT_ACK: 2702 case SCTP_ABORT_ASSOCIATION: 2703 case SCTP_SHUTDOWN: 2704 case SCTP_SHUTDOWN_ACK: 2705 case SCTP_OPERATION_ERROR: 2706 case SCTP_COOKIE_ECHO: 2707 case SCTP_COOKIE_ACK: 2708 case SCTP_ECN_ECHO: 2709 case SCTP_ECN_CWR: 2710 case SCTP_SHUTDOWN_COMPLETE: 2711 case SCTP_AUTHENTICATION: 2712 case SCTP_ASCONF_ACK: 2713 case SCTP_PACKET_DROPPED: 2714 case SCTP_STREAM_RESET: 2715 case SCTP_FORWARD_CUM_TSN: 2716 case SCTP_ASCONF: 2717 { 2718 /* 2719 * Now, what do we do with KNOWN 2720 * chunks that are NOT in the right 2721 * place? 2722 * 2723 * For now, I do nothing but ignore 2724 * them. We may later want to add 2725 * sysctl stuff to switch out and do 2726 * either an ABORT() or possibly 2727 * process them. 2728 */ 2729 struct mbuf *op_err; 2730 char msg[SCTP_DIAG_INFO_LEN]; 2731 2732 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2733 ch->chunk_type); 2734 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2735 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2736 return (2); 2737 } 2738 default: 2739 /* unknown chunk type, use bit rules */ 2740 if (ch->chunk_type & 0x40) { 2741 /* Add a error report to the queue */ 2742 struct mbuf *op_err; 2743 struct sctp_gen_error_cause *cause; 2744 2745 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2746 0, M_NOWAIT, 1, MT_DATA); 2747 if (op_err != NULL) { 2748 cause = mtod(op_err, struct sctp_gen_error_cause *); 2749 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2750 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause))); 2751 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2752 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2753 if (SCTP_BUF_NEXT(op_err) != NULL) { 2754 sctp_queue_op_err(stcb, op_err); 2755 } else { 2756 sctp_m_freem(op_err); 2757 } 2758 } 2759 } 2760 if ((ch->chunk_type & 0x80) == 0) { 2761 /* discard the rest of this packet */ 2762 stop_proc = 1; 2763 } /* else skip this bad chunk and 2764 * continue... */ 2765 break; 2766 } /* switch of chunk type */ 2767 } 2768 *offset += SCTP_SIZE32(chk_length); 2769 if ((*offset >= length) || stop_proc) { 2770 /* no more data left in the mbuf chain */ 2771 stop_proc = 1; 2772 continue; 2773 } 2774 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2775 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf); 2776 if (ch == NULL) { 2777 *offset = length; 2778 stop_proc = 1; 2779 continue; 2780 } 2781 } 2782 if (break_flag) { 2783 /* 2784 * we need to report rwnd overrun drops. 2785 */ 2786 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2787 } 2788 if (num_chunks) { 2789 /* 2790 * Did we get data, if so update the time for auto-close and 2791 * give peer credit for being alive. 2792 */ 2793 SCTP_STAT_INCR(sctps_recvpktwithdata); 2794 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2795 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2796 stcb->asoc.overall_error_count, 2797 0, 2798 SCTP_FROM_SCTP_INDATA, 2799 __LINE__); 2800 } 2801 stcb->asoc.overall_error_count = 0; 2802 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2803 } 2804 /* now service all of the reassm queue if needed */ 2805 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2806 /* Assure that we ack right away */ 2807 stcb->asoc.send_sack = 1; 2808 } 2809 /* Start a sack timer or QUEUE a SACK for sending */ 2810 sctp_sack_check(stcb, was_a_gap); 2811 return (0); 2812 } 2813 2814 static int 2815 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2816 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2817 int *num_frs, 2818 uint32_t * biggest_newly_acked_tsn, 2819 uint32_t * this_sack_lowest_newack, 2820 int *rto_ok) 2821 { 2822 struct sctp_tmit_chunk *tp1; 2823 unsigned int theTSN; 2824 int j, wake_him = 0, circled = 0; 2825 2826 /* Recover the tp1 we last saw */ 2827 tp1 = *p_tp1; 2828 if (tp1 == NULL) { 2829 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2830 } 2831 for (j = frag_strt; j <= frag_end; j++) { 2832 theTSN = j + last_tsn; 2833 while (tp1) { 2834 if (tp1->rec.data.doing_fast_retransmit) 2835 (*num_frs) += 1; 2836 2837 /*- 2838 * CMT: CUCv2 algorithm. For each TSN being 2839 * processed from the sent queue, track the 2840 * next expected pseudo-cumack, or 2841 * rtx_pseudo_cumack, if required. Separate 2842 * cumack trackers for first transmissions, 2843 * and retransmissions. 2844 */ 2845 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2846 (tp1->whoTo->find_pseudo_cumack == 1) && 2847 (tp1->snd_count == 1)) { 2848 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2849 tp1->whoTo->find_pseudo_cumack = 0; 2850 } 2851 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2852 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2853 (tp1->snd_count > 1)) { 2854 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2855 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2856 } 2857 if (tp1->rec.data.TSN_seq == theTSN) { 2858 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2859 /*- 2860 * must be held until 2861 * cum-ack passes 2862 */ 2863 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2864 /*- 2865 * If it is less than RESEND, it is 2866 * now no-longer in flight. 2867 * Higher values may already be set 2868 * via previous Gap Ack Blocks... 2869 * i.e. ACKED or RESEND. 2870 */ 2871 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2872 *biggest_newly_acked_tsn)) { 2873 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2874 } 2875 /*- 2876 * CMT: SFR algo (and HTNA) - set 2877 * saw_newack to 1 for dest being 2878 * newly acked. update 2879 * this_sack_highest_newack if 2880 * appropriate. 2881 */ 2882 if (tp1->rec.data.chunk_was_revoked == 0) 2883 tp1->whoTo->saw_newack = 1; 2884 2885 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2886 tp1->whoTo->this_sack_highest_newack)) { 2887 tp1->whoTo->this_sack_highest_newack = 2888 tp1->rec.data.TSN_seq; 2889 } 2890 /*- 2891 * CMT DAC algo: also update 2892 * this_sack_lowest_newack 2893 */ 2894 if (*this_sack_lowest_newack == 0) { 2895 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2896 sctp_log_sack(*this_sack_lowest_newack, 2897 last_tsn, 2898 tp1->rec.data.TSN_seq, 2899 0, 2900 0, 2901 SCTP_LOG_TSN_ACKED); 2902 } 2903 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2904 } 2905 /*- 2906 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 2907 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 2908 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 2909 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 2910 * Separate pseudo_cumack trackers for first transmissions and 2911 * retransmissions. 2912 */ 2913 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2914 if (tp1->rec.data.chunk_was_revoked == 0) { 2915 tp1->whoTo->new_pseudo_cumack = 1; 2916 } 2917 tp1->whoTo->find_pseudo_cumack = 1; 2918 } 2919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2920 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2921 } 2922 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2923 if (tp1->rec.data.chunk_was_revoked == 0) { 2924 tp1->whoTo->new_pseudo_cumack = 1; 2925 } 2926 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2927 } 2928 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2929 sctp_log_sack(*biggest_newly_acked_tsn, 2930 last_tsn, 2931 tp1->rec.data.TSN_seq, 2932 frag_strt, 2933 frag_end, 2934 SCTP_LOG_TSN_ACKED); 2935 } 2936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2937 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2938 tp1->whoTo->flight_size, 2939 tp1->book_size, 2940 (uint32_t) (uintptr_t) tp1->whoTo, 2941 tp1->rec.data.TSN_seq); 2942 } 2943 sctp_flight_size_decrease(tp1); 2944 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 2945 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 2946 tp1); 2947 } 2948 sctp_total_flight_decrease(stcb, tp1); 2949 2950 tp1->whoTo->net_ack += tp1->send_size; 2951 if (tp1->snd_count < 2) { 2952 /*- 2953 * True non-retransmited chunk 2954 */ 2955 tp1->whoTo->net_ack2 += tp1->send_size; 2956 2957 /*- 2958 * update RTO too ? 2959 */ 2960 if (tp1->do_rtt) { 2961 if (*rto_ok) { 2962 tp1->whoTo->RTO = 2963 sctp_calculate_rto(stcb, 2964 &stcb->asoc, 2965 tp1->whoTo, 2966 &tp1->sent_rcv_time, 2967 sctp_align_safe_nocopy, 2968 SCTP_RTT_FROM_DATA); 2969 *rto_ok = 0; 2970 } 2971 if (tp1->whoTo->rto_needed == 0) { 2972 tp1->whoTo->rto_needed = 1; 2973 } 2974 tp1->do_rtt = 0; 2975 } 2976 } 2977 } 2978 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 2979 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2980 stcb->asoc.this_sack_highest_gap)) { 2981 stcb->asoc.this_sack_highest_gap = 2982 tp1->rec.data.TSN_seq; 2983 } 2984 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 2985 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 2986 #ifdef SCTP_AUDITING_ENABLED 2987 sctp_audit_log(0xB2, 2988 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 2989 #endif 2990 } 2991 } 2992 /*- 2993 * All chunks NOT UNSENT fall through here and are marked 2994 * (leave PR-SCTP ones that are to skip alone though) 2995 */ 2996 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 2997 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2998 tp1->sent = SCTP_DATAGRAM_MARKED; 2999 } 3000 if (tp1->rec.data.chunk_was_revoked) { 3001 /* deflate the cwnd */ 3002 tp1->whoTo->cwnd -= tp1->book_size; 3003 tp1->rec.data.chunk_was_revoked = 0; 3004 } 3005 /* NR Sack code here */ 3006 if (nr_sacking && 3007 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3008 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 3009 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--; 3010 #ifdef INVARIANTS 3011 } else { 3012 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 3013 #endif 3014 } 3015 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 3016 (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 3017 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) { 3018 stcb->asoc.trigger_reset = 1; 3019 } 3020 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3021 if (tp1->data) { 3022 /* 3023 * sa_ignore 3024 * NO_NULL_CHK 3025 */ 3026 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3027 sctp_m_freem(tp1->data); 3028 tp1->data = NULL; 3029 } 3030 wake_him++; 3031 } 3032 } 3033 break; 3034 } /* if (tp1->TSN_seq == theTSN) */ 3035 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) { 3036 break; 3037 } 3038 tp1 = TAILQ_NEXT(tp1, sctp_next); 3039 if ((tp1 == NULL) && (circled == 0)) { 3040 circled++; 3041 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3042 } 3043 } /* end while (tp1) */ 3044 if (tp1 == NULL) { 3045 circled = 0; 3046 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3047 } 3048 /* In case the fragments were not in order we must reset */ 3049 } /* end for (j = fragStart */ 3050 *p_tp1 = tp1; 3051 return (wake_him); /* Return value only used for nr-sack */ 3052 } 3053 3054 3055 static int 3056 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3057 uint32_t last_tsn, uint32_t * biggest_tsn_acked, 3058 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 3059 int num_seg, int num_nr_seg, int *rto_ok) 3060 { 3061 struct sctp_gap_ack_block *frag, block; 3062 struct sctp_tmit_chunk *tp1; 3063 int i; 3064 int num_frs = 0; 3065 int chunk_freed; 3066 int non_revocable; 3067 uint16_t frag_strt, frag_end, prev_frag_end; 3068 3069 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3070 prev_frag_end = 0; 3071 chunk_freed = 0; 3072 3073 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3074 if (i == num_seg) { 3075 prev_frag_end = 0; 3076 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3077 } 3078 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3079 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 3080 *offset += sizeof(block); 3081 if (frag == NULL) { 3082 return (chunk_freed); 3083 } 3084 frag_strt = ntohs(frag->start); 3085 frag_end = ntohs(frag->end); 3086 3087 if (frag_strt > frag_end) { 3088 /* This gap report is malformed, skip it. */ 3089 continue; 3090 } 3091 if (frag_strt <= prev_frag_end) { 3092 /* This gap report is not in order, so restart. */ 3093 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3094 } 3095 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3096 *biggest_tsn_acked = last_tsn + frag_end; 3097 } 3098 if (i < num_seg) { 3099 non_revocable = 0; 3100 } else { 3101 non_revocable = 1; 3102 } 3103 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3104 non_revocable, &num_frs, biggest_newly_acked_tsn, 3105 this_sack_lowest_newack, rto_ok)) { 3106 chunk_freed = 1; 3107 } 3108 prev_frag_end = frag_end; 3109 } 3110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3111 if (num_frs) 3112 sctp_log_fr(*biggest_tsn_acked, 3113 *biggest_newly_acked_tsn, 3114 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3115 } 3116 return (chunk_freed); 3117 } 3118 3119 static void 3120 sctp_check_for_revoked(struct sctp_tcb *stcb, 3121 struct sctp_association *asoc, uint32_t cumack, 3122 uint32_t biggest_tsn_acked) 3123 { 3124 struct sctp_tmit_chunk *tp1; 3125 3126 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3127 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) { 3128 /* 3129 * ok this guy is either ACK or MARKED. If it is 3130 * ACKED it has been previously acked but not this 3131 * time i.e. revoked. If it is MARKED it was ACK'ed 3132 * again. 3133 */ 3134 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) { 3135 break; 3136 } 3137 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3138 /* it has been revoked */ 3139 tp1->sent = SCTP_DATAGRAM_SENT; 3140 tp1->rec.data.chunk_was_revoked = 1; 3141 /* 3142 * We must add this stuff back in to assure 3143 * timers and such get started. 3144 */ 3145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3146 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3147 tp1->whoTo->flight_size, 3148 tp1->book_size, 3149 (uint32_t) (uintptr_t) tp1->whoTo, 3150 tp1->rec.data.TSN_seq); 3151 } 3152 sctp_flight_size_increase(tp1); 3153 sctp_total_flight_increase(stcb, tp1); 3154 /* 3155 * We inflate the cwnd to compensate for our 3156 * artificial inflation of the flight_size. 3157 */ 3158 tp1->whoTo->cwnd += tp1->book_size; 3159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3160 sctp_log_sack(asoc->last_acked_seq, 3161 cumack, 3162 tp1->rec.data.TSN_seq, 3163 0, 3164 0, 3165 SCTP_LOG_TSN_REVOKED); 3166 } 3167 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3168 /* it has been re-acked in this SACK */ 3169 tp1->sent = SCTP_DATAGRAM_ACKED; 3170 } 3171 } 3172 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3173 break; 3174 } 3175 } 3176 3177 3178 static void 3179 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3180 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3181 { 3182 struct sctp_tmit_chunk *tp1; 3183 int strike_flag = 0; 3184 struct timeval now; 3185 int tot_retrans = 0; 3186 uint32_t sending_seq; 3187 struct sctp_nets *net; 3188 int num_dests_sacked = 0; 3189 3190 /* 3191 * select the sending_seq, this is either the next thing ready to be 3192 * sent but not transmitted, OR, the next seq we assign. 3193 */ 3194 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3195 if (tp1 == NULL) { 3196 sending_seq = asoc->sending_seq; 3197 } else { 3198 sending_seq = tp1->rec.data.TSN_seq; 3199 } 3200 3201 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3202 if ((asoc->sctp_cmt_on_off > 0) && 3203 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3204 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3205 if (net->saw_newack) 3206 num_dests_sacked++; 3207 } 3208 } 3209 if (stcb->asoc.prsctp_supported) { 3210 (void)SCTP_GETTIME_TIMEVAL(&now); 3211 } 3212 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3213 strike_flag = 0; 3214 if (tp1->no_fr_allowed) { 3215 /* this one had a timeout or something */ 3216 continue; 3217 } 3218 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3219 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3220 sctp_log_fr(biggest_tsn_newly_acked, 3221 tp1->rec.data.TSN_seq, 3222 tp1->sent, 3223 SCTP_FR_LOG_CHECK_STRIKE); 3224 } 3225 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) || 3226 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3227 /* done */ 3228 break; 3229 } 3230 if (stcb->asoc.prsctp_supported) { 3231 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3232 /* Is it expired? */ 3233 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3234 /* Yes so drop it */ 3235 if (tp1->data != NULL) { 3236 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3237 SCTP_SO_NOT_LOCKED); 3238 } 3239 continue; 3240 } 3241 } 3242 } 3243 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) { 3244 /* we are beyond the tsn in the sack */ 3245 break; 3246 } 3247 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3248 /* either a RESEND, ACKED, or MARKED */ 3249 /* skip */ 3250 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3251 /* Continue strikin FWD-TSN chunks */ 3252 tp1->rec.data.fwd_tsn_cnt++; 3253 } 3254 continue; 3255 } 3256 /* 3257 * CMT : SFR algo (covers part of DAC and HTNA as well) 3258 */ 3259 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3260 /* 3261 * No new acks were receieved for data sent to this 3262 * dest. Therefore, according to the SFR algo for 3263 * CMT, no data sent to this dest can be marked for 3264 * FR using this SACK. 3265 */ 3266 continue; 3267 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3268 tp1->whoTo->this_sack_highest_newack)) { 3269 /* 3270 * CMT: New acks were receieved for data sent to 3271 * this dest. But no new acks were seen for data 3272 * sent after tp1. Therefore, according to the SFR 3273 * algo for CMT, tp1 cannot be marked for FR using 3274 * this SACK. This step covers part of the DAC algo 3275 * and the HTNA algo as well. 3276 */ 3277 continue; 3278 } 3279 /* 3280 * Here we check to see if we were have already done a FR 3281 * and if so we see if the biggest TSN we saw in the sack is 3282 * smaller than the recovery point. If so we don't strike 3283 * the tsn... otherwise we CAN strike the TSN. 3284 */ 3285 /* 3286 * @@@ JRI: Check for CMT if (accum_moved && 3287 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3288 * 0)) { 3289 */ 3290 if (accum_moved && asoc->fast_retran_loss_recovery) { 3291 /* 3292 * Strike the TSN if in fast-recovery and cum-ack 3293 * moved. 3294 */ 3295 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3296 sctp_log_fr(biggest_tsn_newly_acked, 3297 tp1->rec.data.TSN_seq, 3298 tp1->sent, 3299 SCTP_FR_LOG_STRIKE_CHUNK); 3300 } 3301 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3302 tp1->sent++; 3303 } 3304 if ((asoc->sctp_cmt_on_off > 0) && 3305 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3306 /* 3307 * CMT DAC algorithm: If SACK flag is set to 3308 * 0, then lowest_newack test will not pass 3309 * because it would have been set to the 3310 * cumack earlier. If not already to be 3311 * rtx'd, If not a mixed sack and if tp1 is 3312 * not between two sacked TSNs, then mark by 3313 * one more. NOTE that we are marking by one 3314 * additional time since the SACK DAC flag 3315 * indicates that two packets have been 3316 * received after this missing TSN. 3317 */ 3318 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3319 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3320 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3321 sctp_log_fr(16 + num_dests_sacked, 3322 tp1->rec.data.TSN_seq, 3323 tp1->sent, 3324 SCTP_FR_LOG_STRIKE_CHUNK); 3325 } 3326 tp1->sent++; 3327 } 3328 } 3329 } else if ((tp1->rec.data.doing_fast_retransmit) && 3330 (asoc->sctp_cmt_on_off == 0)) { 3331 /* 3332 * For those that have done a FR we must take 3333 * special consideration if we strike. I.e the 3334 * biggest_newly_acked must be higher than the 3335 * sending_seq at the time we did the FR. 3336 */ 3337 if ( 3338 #ifdef SCTP_FR_TO_ALTERNATE 3339 /* 3340 * If FR's go to new networks, then we must only do 3341 * this for singly homed asoc's. However if the FR's 3342 * go to the same network (Armando's work) then its 3343 * ok to FR multiple times. 3344 */ 3345 (asoc->numnets < 2) 3346 #else 3347 (1) 3348 #endif 3349 ) { 3350 3351 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3352 tp1->rec.data.fast_retran_tsn)) { 3353 /* 3354 * Strike the TSN, since this ack is 3355 * beyond where things were when we 3356 * did a FR. 3357 */ 3358 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3359 sctp_log_fr(biggest_tsn_newly_acked, 3360 tp1->rec.data.TSN_seq, 3361 tp1->sent, 3362 SCTP_FR_LOG_STRIKE_CHUNK); 3363 } 3364 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3365 tp1->sent++; 3366 } 3367 strike_flag = 1; 3368 if ((asoc->sctp_cmt_on_off > 0) && 3369 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3370 /* 3371 * CMT DAC algorithm: If 3372 * SACK flag is set to 0, 3373 * then lowest_newack test 3374 * will not pass because it 3375 * would have been set to 3376 * the cumack earlier. If 3377 * not already to be rtx'd, 3378 * If not a mixed sack and 3379 * if tp1 is not between two 3380 * sacked TSNs, then mark by 3381 * one more. NOTE that we 3382 * are marking by one 3383 * additional time since the 3384 * SACK DAC flag indicates 3385 * that two packets have 3386 * been received after this 3387 * missing TSN. 3388 */ 3389 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3390 (num_dests_sacked == 1) && 3391 SCTP_TSN_GT(this_sack_lowest_newack, 3392 tp1->rec.data.TSN_seq)) { 3393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3394 sctp_log_fr(32 + num_dests_sacked, 3395 tp1->rec.data.TSN_seq, 3396 tp1->sent, 3397 SCTP_FR_LOG_STRIKE_CHUNK); 3398 } 3399 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3400 tp1->sent++; 3401 } 3402 } 3403 } 3404 } 3405 } 3406 /* 3407 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3408 * algo covers HTNA. 3409 */ 3410 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3411 biggest_tsn_newly_acked)) { 3412 /* 3413 * We don't strike these: This is the HTNA 3414 * algorithm i.e. we don't strike If our TSN is 3415 * larger than the Highest TSN Newly Acked. 3416 */ 3417 ; 3418 } else { 3419 /* Strike the TSN */ 3420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3421 sctp_log_fr(biggest_tsn_newly_acked, 3422 tp1->rec.data.TSN_seq, 3423 tp1->sent, 3424 SCTP_FR_LOG_STRIKE_CHUNK); 3425 } 3426 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3427 tp1->sent++; 3428 } 3429 if ((asoc->sctp_cmt_on_off > 0) && 3430 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3431 /* 3432 * CMT DAC algorithm: If SACK flag is set to 3433 * 0, then lowest_newack test will not pass 3434 * because it would have been set to the 3435 * cumack earlier. If not already to be 3436 * rtx'd, If not a mixed sack and if tp1 is 3437 * not between two sacked TSNs, then mark by 3438 * one more. NOTE that we are marking by one 3439 * additional time since the SACK DAC flag 3440 * indicates that two packets have been 3441 * received after this missing TSN. 3442 */ 3443 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3444 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3445 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3446 sctp_log_fr(48 + num_dests_sacked, 3447 tp1->rec.data.TSN_seq, 3448 tp1->sent, 3449 SCTP_FR_LOG_STRIKE_CHUNK); 3450 } 3451 tp1->sent++; 3452 } 3453 } 3454 } 3455 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3456 struct sctp_nets *alt; 3457 3458 /* fix counts and things */ 3459 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3460 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3461 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3462 tp1->book_size, 3463 (uint32_t) (uintptr_t) tp1->whoTo, 3464 tp1->rec.data.TSN_seq); 3465 } 3466 if (tp1->whoTo) { 3467 tp1->whoTo->net_ack++; 3468 sctp_flight_size_decrease(tp1); 3469 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3470 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3471 tp1); 3472 } 3473 } 3474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3475 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3476 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3477 } 3478 /* add back to the rwnd */ 3479 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3480 3481 /* remove from the total flight */ 3482 sctp_total_flight_decrease(stcb, tp1); 3483 3484 if ((stcb->asoc.prsctp_supported) && 3485 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3486 /* 3487 * Has it been retransmitted tv_sec times? - 3488 * we store the retran count there. 3489 */ 3490 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3491 /* Yes, so drop it */ 3492 if (tp1->data != NULL) { 3493 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3494 SCTP_SO_NOT_LOCKED); 3495 } 3496 /* Make sure to flag we had a FR */ 3497 tp1->whoTo->net_ack++; 3498 continue; 3499 } 3500 } 3501 /* 3502 * SCTP_PRINTF("OK, we are now ready to FR this 3503 * guy\n"); 3504 */ 3505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3506 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3507 0, SCTP_FR_MARKED); 3508 } 3509 if (strike_flag) { 3510 /* This is a subsequent FR */ 3511 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3512 } 3513 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3514 if (asoc->sctp_cmt_on_off > 0) { 3515 /* 3516 * CMT: Using RTX_SSTHRESH policy for CMT. 3517 * If CMT is being used, then pick dest with 3518 * largest ssthresh for any retransmission. 3519 */ 3520 tp1->no_fr_allowed = 1; 3521 alt = tp1->whoTo; 3522 /* sa_ignore NO_NULL_CHK */ 3523 if (asoc->sctp_cmt_pf > 0) { 3524 /* 3525 * JRS 5/18/07 - If CMT PF is on, 3526 * use the PF version of 3527 * find_alt_net() 3528 */ 3529 alt = sctp_find_alternate_net(stcb, alt, 2); 3530 } else { 3531 /* 3532 * JRS 5/18/07 - If only CMT is on, 3533 * use the CMT version of 3534 * find_alt_net() 3535 */ 3536 /* sa_ignore NO_NULL_CHK */ 3537 alt = sctp_find_alternate_net(stcb, alt, 1); 3538 } 3539 if (alt == NULL) { 3540 alt = tp1->whoTo; 3541 } 3542 /* 3543 * CUCv2: If a different dest is picked for 3544 * the retransmission, then new 3545 * (rtx-)pseudo_cumack needs to be tracked 3546 * for orig dest. Let CUCv2 track new (rtx-) 3547 * pseudo-cumack always. 3548 */ 3549 if (tp1->whoTo) { 3550 tp1->whoTo->find_pseudo_cumack = 1; 3551 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3552 } 3553 } else {/* CMT is OFF */ 3554 3555 #ifdef SCTP_FR_TO_ALTERNATE 3556 /* Can we find an alternate? */ 3557 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3558 #else 3559 /* 3560 * default behavior is to NOT retransmit 3561 * FR's to an alternate. Armando Caro's 3562 * paper details why. 3563 */ 3564 alt = tp1->whoTo; 3565 #endif 3566 } 3567 3568 tp1->rec.data.doing_fast_retransmit = 1; 3569 tot_retrans++; 3570 /* mark the sending seq for possible subsequent FR's */ 3571 /* 3572 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3573 * (uint32_t)tpi->rec.data.TSN_seq); 3574 */ 3575 if (TAILQ_EMPTY(&asoc->send_queue)) { 3576 /* 3577 * If the queue of send is empty then its 3578 * the next sequence number that will be 3579 * assigned so we subtract one from this to 3580 * get the one we last sent. 3581 */ 3582 tp1->rec.data.fast_retran_tsn = sending_seq; 3583 } else { 3584 /* 3585 * If there are chunks on the send queue 3586 * (unsent data that has made it from the 3587 * stream queues but not out the door, we 3588 * take the first one (which will have the 3589 * lowest TSN) and subtract one to get the 3590 * one we last sent. 3591 */ 3592 struct sctp_tmit_chunk *ttt; 3593 3594 ttt = TAILQ_FIRST(&asoc->send_queue); 3595 tp1->rec.data.fast_retran_tsn = 3596 ttt->rec.data.TSN_seq; 3597 } 3598 3599 if (tp1->do_rtt) { 3600 /* 3601 * this guy had a RTO calculation pending on 3602 * it, cancel it 3603 */ 3604 if ((tp1->whoTo != NULL) && 3605 (tp1->whoTo->rto_needed == 0)) { 3606 tp1->whoTo->rto_needed = 1; 3607 } 3608 tp1->do_rtt = 0; 3609 } 3610 if (alt != tp1->whoTo) { 3611 /* yes, there is an alternate. */ 3612 sctp_free_remote_addr(tp1->whoTo); 3613 /* sa_ignore FREED_MEMORY */ 3614 tp1->whoTo = alt; 3615 atomic_add_int(&alt->ref_count, 1); 3616 } 3617 } 3618 } 3619 } 3620 3621 struct sctp_tmit_chunk * 3622 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3623 struct sctp_association *asoc) 3624 { 3625 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3626 struct timeval now; 3627 int now_filled = 0; 3628 3629 if (asoc->prsctp_supported == 0) { 3630 return (NULL); 3631 } 3632 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3633 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3634 tp1->sent != SCTP_DATAGRAM_RESEND && 3635 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3636 /* no chance to advance, out of here */ 3637 break; 3638 } 3639 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3640 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3641 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3642 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3643 asoc->advanced_peer_ack_point, 3644 tp1->rec.data.TSN_seq, 0, 0); 3645 } 3646 } 3647 if (!PR_SCTP_ENABLED(tp1->flags)) { 3648 /* 3649 * We can't fwd-tsn past any that are reliable aka 3650 * retransmitted until the asoc fails. 3651 */ 3652 break; 3653 } 3654 if (!now_filled) { 3655 (void)SCTP_GETTIME_TIMEVAL(&now); 3656 now_filled = 1; 3657 } 3658 /* 3659 * now we got a chunk which is marked for another 3660 * retransmission to a PR-stream but has run out its chances 3661 * already maybe OR has been marked to skip now. Can we skip 3662 * it if its a resend? 3663 */ 3664 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3665 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3666 /* 3667 * Now is this one marked for resend and its time is 3668 * now up? 3669 */ 3670 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3671 /* Yes so drop it */ 3672 if (tp1->data) { 3673 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3674 1, SCTP_SO_NOT_LOCKED); 3675 } 3676 } else { 3677 /* 3678 * No, we are done when hit one for resend 3679 * whos time as not expired. 3680 */ 3681 break; 3682 } 3683 } 3684 /* 3685 * Ok now if this chunk is marked to drop it we can clean up 3686 * the chunk, advance our peer ack point and we can check 3687 * the next chunk. 3688 */ 3689 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3690 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3691 /* advance PeerAckPoint goes forward */ 3692 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) { 3693 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3694 a_adv = tp1; 3695 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { 3696 /* No update but we do save the chk */ 3697 a_adv = tp1; 3698 } 3699 } else { 3700 /* 3701 * If it is still in RESEND we can advance no 3702 * further 3703 */ 3704 break; 3705 } 3706 } 3707 return (a_adv); 3708 } 3709 3710 static int 3711 sctp_fs_audit(struct sctp_association *asoc) 3712 { 3713 struct sctp_tmit_chunk *chk; 3714 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3715 int ret; 3716 3717 #ifndef INVARIANTS 3718 int entry_flight, entry_cnt; 3719 3720 #endif 3721 3722 ret = 0; 3723 #ifndef INVARIANTS 3724 entry_flight = asoc->total_flight; 3725 entry_cnt = asoc->total_flight_count; 3726 #endif 3727 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3728 return (0); 3729 3730 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3731 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3732 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3733 chk->rec.data.TSN_seq, 3734 chk->send_size, 3735 chk->snd_count); 3736 inflight++; 3737 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3738 resend++; 3739 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3740 inbetween++; 3741 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3742 above++; 3743 } else { 3744 acked++; 3745 } 3746 } 3747 3748 if ((inflight > 0) || (inbetween > 0)) { 3749 #ifdef INVARIANTS 3750 panic("Flight size-express incorrect? \n"); 3751 #else 3752 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3753 entry_flight, entry_cnt); 3754 3755 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3756 inflight, inbetween, resend, above, acked); 3757 ret = 1; 3758 #endif 3759 } 3760 return (ret); 3761 } 3762 3763 3764 static void 3765 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3766 struct sctp_association *asoc, 3767 struct sctp_tmit_chunk *tp1) 3768 { 3769 tp1->window_probe = 0; 3770 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3771 /* TSN's skipped we do NOT move back. */ 3772 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3773 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3774 tp1->book_size, 3775 (uint32_t) (uintptr_t) tp1->whoTo, 3776 tp1->rec.data.TSN_seq); 3777 return; 3778 } 3779 /* First setup this by shrinking flight */ 3780 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3781 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3782 tp1); 3783 } 3784 sctp_flight_size_decrease(tp1); 3785 sctp_total_flight_decrease(stcb, tp1); 3786 /* Now mark for resend */ 3787 tp1->sent = SCTP_DATAGRAM_RESEND; 3788 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3789 3790 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3791 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3792 tp1->whoTo->flight_size, 3793 tp1->book_size, 3794 (uint32_t) (uintptr_t) tp1->whoTo, 3795 tp1->rec.data.TSN_seq); 3796 } 3797 } 3798 3799 void 3800 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3801 uint32_t rwnd, int *abort_now, int ecne_seen) 3802 { 3803 struct sctp_nets *net; 3804 struct sctp_association *asoc; 3805 struct sctp_tmit_chunk *tp1, *tp2; 3806 uint32_t old_rwnd; 3807 int win_probe_recovery = 0; 3808 int win_probe_recovered = 0; 3809 int j, done_once = 0; 3810 int rto_ok = 1; 3811 uint32_t send_s; 3812 3813 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3814 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3815 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3816 } 3817 SCTP_TCB_LOCK_ASSERT(stcb); 3818 #ifdef SCTP_ASOCLOG_OF_TSNS 3819 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3820 stcb->asoc.cumack_log_at++; 3821 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3822 stcb->asoc.cumack_log_at = 0; 3823 } 3824 #endif 3825 asoc = &stcb->asoc; 3826 old_rwnd = asoc->peers_rwnd; 3827 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3828 /* old ack */ 3829 return; 3830 } else if (asoc->last_acked_seq == cumack) { 3831 /* Window update sack */ 3832 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3833 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3834 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3835 /* SWS sender side engages */ 3836 asoc->peers_rwnd = 0; 3837 } 3838 if (asoc->peers_rwnd > old_rwnd) { 3839 goto again; 3840 } 3841 return; 3842 } 3843 /* First setup for CC stuff */ 3844 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3845 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3846 /* Drag along the window_tsn for cwr's */ 3847 net->cwr_window_tsn = cumack; 3848 } 3849 net->prev_cwnd = net->cwnd; 3850 net->net_ack = 0; 3851 net->net_ack2 = 0; 3852 3853 /* 3854 * CMT: Reset CUC and Fast recovery algo variables before 3855 * SACK processing 3856 */ 3857 net->new_pseudo_cumack = 0; 3858 net->will_exit_fast_recovery = 0; 3859 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3860 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3861 } 3862 } 3863 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3864 tp1 = TAILQ_LAST(&asoc->sent_queue, 3865 sctpchunk_listhead); 3866 send_s = tp1->rec.data.TSN_seq + 1; 3867 } else { 3868 send_s = asoc->sending_seq; 3869 } 3870 if (SCTP_TSN_GE(cumack, send_s)) { 3871 struct mbuf *op_err; 3872 char msg[SCTP_DIAG_INFO_LEN]; 3873 3874 *abort_now = 1; 3875 /* XXX */ 3876 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 3877 cumack, send_s); 3878 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 3879 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 3880 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 3881 return; 3882 } 3883 asoc->this_sack_highest_gap = cumack; 3884 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 3885 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3886 stcb->asoc.overall_error_count, 3887 0, 3888 SCTP_FROM_SCTP_INDATA, 3889 __LINE__); 3890 } 3891 stcb->asoc.overall_error_count = 0; 3892 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 3893 /* process the new consecutive TSN first */ 3894 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3895 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) { 3896 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3897 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 3898 } 3899 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3900 /* 3901 * If it is less than ACKED, it is 3902 * now no-longer in flight. Higher 3903 * values may occur during marking 3904 */ 3905 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3906 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3907 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3908 tp1->whoTo->flight_size, 3909 tp1->book_size, 3910 (uint32_t) (uintptr_t) tp1->whoTo, 3911 tp1->rec.data.TSN_seq); 3912 } 3913 sctp_flight_size_decrease(tp1); 3914 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3915 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3916 tp1); 3917 } 3918 /* sa_ignore NO_NULL_CHK */ 3919 sctp_total_flight_decrease(stcb, tp1); 3920 } 3921 tp1->whoTo->net_ack += tp1->send_size; 3922 if (tp1->snd_count < 2) { 3923 /* 3924 * True non-retransmited 3925 * chunk 3926 */ 3927 tp1->whoTo->net_ack2 += 3928 tp1->send_size; 3929 3930 /* update RTO too? */ 3931 if (tp1->do_rtt) { 3932 if (rto_ok) { 3933 tp1->whoTo->RTO = 3934 /* 3935 * sa_ignore 3936 * NO_NULL_CH 3937 * K 3938 */ 3939 sctp_calculate_rto(stcb, 3940 asoc, tp1->whoTo, 3941 &tp1->sent_rcv_time, 3942 sctp_align_safe_nocopy, 3943 SCTP_RTT_FROM_DATA); 3944 rto_ok = 0; 3945 } 3946 if (tp1->whoTo->rto_needed == 0) { 3947 tp1->whoTo->rto_needed = 1; 3948 } 3949 tp1->do_rtt = 0; 3950 } 3951 } 3952 /* 3953 * CMT: CUCv2 algorithm. From the 3954 * cumack'd TSNs, for each TSN being 3955 * acked for the first time, set the 3956 * following variables for the 3957 * corresp destination. 3958 * new_pseudo_cumack will trigger a 3959 * cwnd update. 3960 * find_(rtx_)pseudo_cumack will 3961 * trigger search for the next 3962 * expected (rtx-)pseudo-cumack. 3963 */ 3964 tp1->whoTo->new_pseudo_cumack = 1; 3965 tp1->whoTo->find_pseudo_cumack = 1; 3966 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3967 3968 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3969 /* sa_ignore NO_NULL_CHK */ 3970 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3971 } 3972 } 3973 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3974 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3975 } 3976 if (tp1->rec.data.chunk_was_revoked) { 3977 /* deflate the cwnd */ 3978 tp1->whoTo->cwnd -= tp1->book_size; 3979 tp1->rec.data.chunk_was_revoked = 0; 3980 } 3981 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3982 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 3983 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 3984 #ifdef INVARIANTS 3985 } else { 3986 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 3987 #endif 3988 } 3989 } 3990 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 3991 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 3992 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) { 3993 asoc->trigger_reset = 1; 3994 } 3995 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3996 if (tp1->data) { 3997 /* sa_ignore NO_NULL_CHK */ 3998 sctp_free_bufspace(stcb, asoc, tp1, 1); 3999 sctp_m_freem(tp1->data); 4000 tp1->data = NULL; 4001 } 4002 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4003 sctp_log_sack(asoc->last_acked_seq, 4004 cumack, 4005 tp1->rec.data.TSN_seq, 4006 0, 4007 0, 4008 SCTP_LOG_FREE_SENT); 4009 } 4010 asoc->sent_queue_cnt--; 4011 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4012 } else { 4013 break; 4014 } 4015 } 4016 4017 } 4018 /* sa_ignore NO_NULL_CHK */ 4019 if (stcb->sctp_socket) { 4020 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4021 struct socket *so; 4022 4023 #endif 4024 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4025 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4026 /* sa_ignore NO_NULL_CHK */ 4027 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4028 } 4029 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4030 so = SCTP_INP_SO(stcb->sctp_ep); 4031 atomic_add_int(&stcb->asoc.refcnt, 1); 4032 SCTP_TCB_UNLOCK(stcb); 4033 SCTP_SOCKET_LOCK(so, 1); 4034 SCTP_TCB_LOCK(stcb); 4035 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4036 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4037 /* assoc was freed while we were unlocked */ 4038 SCTP_SOCKET_UNLOCK(so, 1); 4039 return; 4040 } 4041 #endif 4042 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4043 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4044 SCTP_SOCKET_UNLOCK(so, 1); 4045 #endif 4046 } else { 4047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4048 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4049 } 4050 } 4051 4052 /* JRS - Use the congestion control given in the CC module */ 4053 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4054 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4055 if (net->net_ack2 > 0) { 4056 /* 4057 * Karn's rule applies to clearing error 4058 * count, this is optional. 4059 */ 4060 net->error_count = 0; 4061 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4062 /* addr came good */ 4063 net->dest_state |= SCTP_ADDR_REACHABLE; 4064 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4065 0, (void *)net, SCTP_SO_NOT_LOCKED); 4066 } 4067 if (net == stcb->asoc.primary_destination) { 4068 if (stcb->asoc.alternate) { 4069 /* 4070 * release the alternate, 4071 * primary is good 4072 */ 4073 sctp_free_remote_addr(stcb->asoc.alternate); 4074 stcb->asoc.alternate = NULL; 4075 } 4076 } 4077 if (net->dest_state & SCTP_ADDR_PF) { 4078 net->dest_state &= ~SCTP_ADDR_PF; 4079 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4080 stcb->sctp_ep, stcb, net, 4081 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4082 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4083 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4084 /* Done with this net */ 4085 net->net_ack = 0; 4086 } 4087 /* restore any doubled timers */ 4088 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4089 if (net->RTO < stcb->asoc.minrto) { 4090 net->RTO = stcb->asoc.minrto; 4091 } 4092 if (net->RTO > stcb->asoc.maxrto) { 4093 net->RTO = stcb->asoc.maxrto; 4094 } 4095 } 4096 } 4097 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4098 } 4099 asoc->last_acked_seq = cumack; 4100 4101 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4102 /* nothing left in-flight */ 4103 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4104 net->flight_size = 0; 4105 net->partial_bytes_acked = 0; 4106 } 4107 asoc->total_flight = 0; 4108 asoc->total_flight_count = 0; 4109 } 4110 /* RWND update */ 4111 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4112 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4113 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4114 /* SWS sender side engages */ 4115 asoc->peers_rwnd = 0; 4116 } 4117 if (asoc->peers_rwnd > old_rwnd) { 4118 win_probe_recovery = 1; 4119 } 4120 /* Now assure a timer where data is queued at */ 4121 again: 4122 j = 0; 4123 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4124 int to_ticks; 4125 4126 if (win_probe_recovery && (net->window_probe)) { 4127 win_probe_recovered = 1; 4128 /* 4129 * Find first chunk that was used with window probe 4130 * and clear the sent 4131 */ 4132 /* sa_ignore FREED_MEMORY */ 4133 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4134 if (tp1->window_probe) { 4135 /* move back to data send queue */ 4136 sctp_window_probe_recovery(stcb, asoc, tp1); 4137 break; 4138 } 4139 } 4140 } 4141 if (net->RTO == 0) { 4142 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4143 } else { 4144 to_ticks = MSEC_TO_TICKS(net->RTO); 4145 } 4146 if (net->flight_size) { 4147 j++; 4148 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4149 sctp_timeout_handler, &net->rxt_timer); 4150 if (net->window_probe) { 4151 net->window_probe = 0; 4152 } 4153 } else { 4154 if (net->window_probe) { 4155 /* 4156 * In window probes we must assure a timer 4157 * is still running there 4158 */ 4159 net->window_probe = 0; 4160 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4161 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4162 sctp_timeout_handler, &net->rxt_timer); 4163 } 4164 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4165 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4166 stcb, net, 4167 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4168 } 4169 } 4170 } 4171 if ((j == 0) && 4172 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4173 (asoc->sent_queue_retran_cnt == 0) && 4174 (win_probe_recovered == 0) && 4175 (done_once == 0)) { 4176 /* 4177 * huh, this should not happen unless all packets are 4178 * PR-SCTP and marked to skip of course. 4179 */ 4180 if (sctp_fs_audit(asoc)) { 4181 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4182 net->flight_size = 0; 4183 } 4184 asoc->total_flight = 0; 4185 asoc->total_flight_count = 0; 4186 asoc->sent_queue_retran_cnt = 0; 4187 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4188 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4189 sctp_flight_size_increase(tp1); 4190 sctp_total_flight_increase(stcb, tp1); 4191 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4192 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4193 } 4194 } 4195 } 4196 done_once = 1; 4197 goto again; 4198 } 4199 /**********************************/ 4200 /* Now what about shutdown issues */ 4201 /**********************************/ 4202 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4203 /* nothing left on sendqueue.. consider done */ 4204 /* clean up */ 4205 if ((asoc->stream_queue_cnt == 1) && 4206 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4207 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4208 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4209 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4210 } 4211 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4212 (asoc->stream_queue_cnt == 0)) { 4213 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4214 /* Need to abort here */ 4215 struct mbuf *op_err; 4216 4217 abort_out_now: 4218 *abort_now = 1; 4219 /* XXX */ 4220 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4221 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4222 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4223 return; 4224 } else { 4225 struct sctp_nets *netp; 4226 4227 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4228 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4229 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4230 } 4231 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4232 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4233 sctp_stop_timers_for_shutdown(stcb); 4234 if (asoc->alternate) { 4235 netp = asoc->alternate; 4236 } else { 4237 netp = asoc->primary_destination; 4238 } 4239 sctp_send_shutdown(stcb, netp); 4240 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4241 stcb->sctp_ep, stcb, netp); 4242 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4243 stcb->sctp_ep, stcb, netp); 4244 } 4245 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4246 (asoc->stream_queue_cnt == 0)) { 4247 struct sctp_nets *netp; 4248 4249 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4250 goto abort_out_now; 4251 } 4252 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4253 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4254 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4255 sctp_stop_timers_for_shutdown(stcb); 4256 if (asoc->alternate) { 4257 netp = asoc->alternate; 4258 } else { 4259 netp = asoc->primary_destination; 4260 } 4261 sctp_send_shutdown_ack(stcb, netp); 4262 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4263 stcb->sctp_ep, stcb, netp); 4264 } 4265 } 4266 /*********************************************/ 4267 /* Here we perform PR-SCTP procedures */ 4268 /* (section 4.2) */ 4269 /*********************************************/ 4270 /* C1. update advancedPeerAckPoint */ 4271 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4272 asoc->advanced_peer_ack_point = cumack; 4273 } 4274 /* PR-Sctp issues need to be addressed too */ 4275 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4276 struct sctp_tmit_chunk *lchk; 4277 uint32_t old_adv_peer_ack_point; 4278 4279 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4280 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4281 /* C3. See if we need to send a Fwd-TSN */ 4282 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4283 /* 4284 * ISSUE with ECN, see FWD-TSN processing. 4285 */ 4286 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4287 send_forward_tsn(stcb, asoc); 4288 } else if (lchk) { 4289 /* try to FR fwd-tsn's that get lost too */ 4290 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4291 send_forward_tsn(stcb, asoc); 4292 } 4293 } 4294 } 4295 if (lchk) { 4296 /* Assure a timer is up */ 4297 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4298 stcb->sctp_ep, stcb, lchk->whoTo); 4299 } 4300 } 4301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4302 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4303 rwnd, 4304 stcb->asoc.peers_rwnd, 4305 stcb->asoc.total_flight, 4306 stcb->asoc.total_output_queue_size); 4307 } 4308 } 4309 4310 void 4311 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4312 struct sctp_tcb *stcb, 4313 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4314 int *abort_now, uint8_t flags, 4315 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4316 { 4317 struct sctp_association *asoc; 4318 struct sctp_tmit_chunk *tp1, *tp2; 4319 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4320 uint16_t wake_him = 0; 4321 uint32_t send_s = 0; 4322 long j; 4323 int accum_moved = 0; 4324 int will_exit_fast_recovery = 0; 4325 uint32_t a_rwnd, old_rwnd; 4326 int win_probe_recovery = 0; 4327 int win_probe_recovered = 0; 4328 struct sctp_nets *net = NULL; 4329 int done_once; 4330 int rto_ok = 1; 4331 uint8_t reneged_all = 0; 4332 uint8_t cmt_dac_flag; 4333 4334 /* 4335 * we take any chance we can to service our queues since we cannot 4336 * get awoken when the socket is read from :< 4337 */ 4338 /* 4339 * Now perform the actual SACK handling: 1) Verify that it is not an 4340 * old sack, if so discard. 2) If there is nothing left in the send 4341 * queue (cum-ack is equal to last acked) then you have a duplicate 4342 * too, update any rwnd change and verify no timers are running. 4343 * then return. 3) Process any new consequtive data i.e. cum-ack 4344 * moved process these first and note that it moved. 4) Process any 4345 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4346 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4347 * sync up flightsizes and things, stop all timers and also check 4348 * for shutdown_pending state. If so then go ahead and send off the 4349 * shutdown. If in shutdown recv, send off the shutdown-ack and 4350 * start that timer, Ret. 9) Strike any non-acked things and do FR 4351 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4352 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4353 * if in shutdown_recv state. 4354 */ 4355 SCTP_TCB_LOCK_ASSERT(stcb); 4356 /* CMT DAC algo */ 4357 this_sack_lowest_newack = 0; 4358 SCTP_STAT_INCR(sctps_slowpath_sack); 4359 last_tsn = cum_ack; 4360 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4361 #ifdef SCTP_ASOCLOG_OF_TSNS 4362 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4363 stcb->asoc.cumack_log_at++; 4364 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4365 stcb->asoc.cumack_log_at = 0; 4366 } 4367 #endif 4368 a_rwnd = rwnd; 4369 4370 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4371 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4372 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4373 } 4374 old_rwnd = stcb->asoc.peers_rwnd; 4375 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4376 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4377 stcb->asoc.overall_error_count, 4378 0, 4379 SCTP_FROM_SCTP_INDATA, 4380 __LINE__); 4381 } 4382 stcb->asoc.overall_error_count = 0; 4383 asoc = &stcb->asoc; 4384 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4385 sctp_log_sack(asoc->last_acked_seq, 4386 cum_ack, 4387 0, 4388 num_seg, 4389 num_dup, 4390 SCTP_LOG_NEW_SACK); 4391 } 4392 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4393 uint16_t i; 4394 uint32_t *dupdata, dblock; 4395 4396 for (i = 0; i < num_dup; i++) { 4397 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4398 sizeof(uint32_t), (uint8_t *) & dblock); 4399 if (dupdata == NULL) { 4400 break; 4401 } 4402 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4403 } 4404 } 4405 /* reality check */ 4406 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4407 tp1 = TAILQ_LAST(&asoc->sent_queue, 4408 sctpchunk_listhead); 4409 send_s = tp1->rec.data.TSN_seq + 1; 4410 } else { 4411 tp1 = NULL; 4412 send_s = asoc->sending_seq; 4413 } 4414 if (SCTP_TSN_GE(cum_ack, send_s)) { 4415 struct mbuf *op_err; 4416 char msg[SCTP_DIAG_INFO_LEN]; 4417 4418 /* 4419 * no way, we have not even sent this TSN out yet. Peer is 4420 * hopelessly messed up with us. 4421 */ 4422 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4423 cum_ack, send_s); 4424 if (tp1) { 4425 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4426 tp1->rec.data.TSN_seq, (void *)tp1); 4427 } 4428 hopeless_peer: 4429 *abort_now = 1; 4430 /* XXX */ 4431 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4432 cum_ack, send_s); 4433 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4434 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4435 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4436 return; 4437 } 4438 /**********************/ 4439 /* 1) check the range */ 4440 /**********************/ 4441 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4442 /* acking something behind */ 4443 return; 4444 } 4445 /* update the Rwnd of the peer */ 4446 if (TAILQ_EMPTY(&asoc->sent_queue) && 4447 TAILQ_EMPTY(&asoc->send_queue) && 4448 (asoc->stream_queue_cnt == 0)) { 4449 /* nothing left on send/sent and strmq */ 4450 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4451 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4452 asoc->peers_rwnd, 0, 0, a_rwnd); 4453 } 4454 asoc->peers_rwnd = a_rwnd; 4455 if (asoc->sent_queue_retran_cnt) { 4456 asoc->sent_queue_retran_cnt = 0; 4457 } 4458 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4459 /* SWS sender side engages */ 4460 asoc->peers_rwnd = 0; 4461 } 4462 /* stop any timers */ 4463 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4464 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4465 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4466 net->partial_bytes_acked = 0; 4467 net->flight_size = 0; 4468 } 4469 asoc->total_flight = 0; 4470 asoc->total_flight_count = 0; 4471 return; 4472 } 4473 /* 4474 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4475 * things. The total byte count acked is tracked in netAckSz AND 4476 * netAck2 is used to track the total bytes acked that are un- 4477 * amibguious and were never retransmitted. We track these on a per 4478 * destination address basis. 4479 */ 4480 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4481 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4482 /* Drag along the window_tsn for cwr's */ 4483 net->cwr_window_tsn = cum_ack; 4484 } 4485 net->prev_cwnd = net->cwnd; 4486 net->net_ack = 0; 4487 net->net_ack2 = 0; 4488 4489 /* 4490 * CMT: Reset CUC and Fast recovery algo variables before 4491 * SACK processing 4492 */ 4493 net->new_pseudo_cumack = 0; 4494 net->will_exit_fast_recovery = 0; 4495 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4496 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4497 } 4498 } 4499 /* process the new consecutive TSN first */ 4500 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4501 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) { 4502 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4503 accum_moved = 1; 4504 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4505 /* 4506 * If it is less than ACKED, it is 4507 * now no-longer in flight. Higher 4508 * values may occur during marking 4509 */ 4510 if ((tp1->whoTo->dest_state & 4511 SCTP_ADDR_UNCONFIRMED) && 4512 (tp1->snd_count < 2)) { 4513 /* 4514 * If there was no retran 4515 * and the address is 4516 * un-confirmed and we sent 4517 * there and are now 4518 * sacked.. its confirmed, 4519 * mark it so. 4520 */ 4521 tp1->whoTo->dest_state &= 4522 ~SCTP_ADDR_UNCONFIRMED; 4523 } 4524 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4525 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4526 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4527 tp1->whoTo->flight_size, 4528 tp1->book_size, 4529 (uint32_t) (uintptr_t) tp1->whoTo, 4530 tp1->rec.data.TSN_seq); 4531 } 4532 sctp_flight_size_decrease(tp1); 4533 sctp_total_flight_decrease(stcb, tp1); 4534 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4535 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4536 tp1); 4537 } 4538 } 4539 tp1->whoTo->net_ack += tp1->send_size; 4540 4541 /* CMT SFR and DAC algos */ 4542 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4543 tp1->whoTo->saw_newack = 1; 4544 4545 if (tp1->snd_count < 2) { 4546 /* 4547 * True non-retransmited 4548 * chunk 4549 */ 4550 tp1->whoTo->net_ack2 += 4551 tp1->send_size; 4552 4553 /* update RTO too? */ 4554 if (tp1->do_rtt) { 4555 if (rto_ok) { 4556 tp1->whoTo->RTO = 4557 sctp_calculate_rto(stcb, 4558 asoc, tp1->whoTo, 4559 &tp1->sent_rcv_time, 4560 sctp_align_safe_nocopy, 4561 SCTP_RTT_FROM_DATA); 4562 rto_ok = 0; 4563 } 4564 if (tp1->whoTo->rto_needed == 0) { 4565 tp1->whoTo->rto_needed = 1; 4566 } 4567 tp1->do_rtt = 0; 4568 } 4569 } 4570 /* 4571 * CMT: CUCv2 algorithm. From the 4572 * cumack'd TSNs, for each TSN being 4573 * acked for the first time, set the 4574 * following variables for the 4575 * corresp destination. 4576 * new_pseudo_cumack will trigger a 4577 * cwnd update. 4578 * find_(rtx_)pseudo_cumack will 4579 * trigger search for the next 4580 * expected (rtx-)pseudo-cumack. 4581 */ 4582 tp1->whoTo->new_pseudo_cumack = 1; 4583 tp1->whoTo->find_pseudo_cumack = 1; 4584 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4585 4586 4587 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4588 sctp_log_sack(asoc->last_acked_seq, 4589 cum_ack, 4590 tp1->rec.data.TSN_seq, 4591 0, 4592 0, 4593 SCTP_LOG_TSN_ACKED); 4594 } 4595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4596 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4597 } 4598 } 4599 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4600 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4601 #ifdef SCTP_AUDITING_ENABLED 4602 sctp_audit_log(0xB3, 4603 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4604 #endif 4605 } 4606 if (tp1->rec.data.chunk_was_revoked) { 4607 /* deflate the cwnd */ 4608 tp1->whoTo->cwnd -= tp1->book_size; 4609 tp1->rec.data.chunk_was_revoked = 0; 4610 } 4611 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4612 tp1->sent = SCTP_DATAGRAM_ACKED; 4613 } 4614 } 4615 } else { 4616 break; 4617 } 4618 } 4619 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4620 /* always set this up to cum-ack */ 4621 asoc->this_sack_highest_gap = last_tsn; 4622 4623 if ((num_seg > 0) || (num_nr_seg > 0)) { 4624 4625 /* 4626 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4627 * to be greater than the cumack. Also reset saw_newack to 0 4628 * for all dests. 4629 */ 4630 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4631 net->saw_newack = 0; 4632 net->this_sack_highest_newack = last_tsn; 4633 } 4634 4635 /* 4636 * thisSackHighestGap will increase while handling NEW 4637 * segments this_sack_highest_newack will increase while 4638 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4639 * used for CMT DAC algo. saw_newack will also change. 4640 */ 4641 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4642 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4643 num_seg, num_nr_seg, &rto_ok)) { 4644 wake_him++; 4645 } 4646 /* 4647 * validate the biggest_tsn_acked in the gap acks if strict 4648 * adherence is wanted. 4649 */ 4650 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4651 /* 4652 * peer is either confused or we are under attack. 4653 * We must abort. 4654 */ 4655 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4656 biggest_tsn_acked, send_s); 4657 goto hopeless_peer; 4658 } 4659 } 4660 /*******************************************/ 4661 /* cancel ALL T3-send timer if accum moved */ 4662 /*******************************************/ 4663 if (asoc->sctp_cmt_on_off > 0) { 4664 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4665 if (net->new_pseudo_cumack) 4666 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4667 stcb, net, 4668 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4669 4670 } 4671 } else { 4672 if (accum_moved) { 4673 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4674 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4675 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4676 } 4677 } 4678 } 4679 /********************************************/ 4680 /* drop the acked chunks from the sentqueue */ 4681 /********************************************/ 4682 asoc->last_acked_seq = cum_ack; 4683 4684 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4685 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) { 4686 break; 4687 } 4688 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4689 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 4690 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 4691 #ifdef INVARIANTS 4692 } else { 4693 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 4694 #endif 4695 } 4696 } 4697 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 4698 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 4699 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) { 4700 asoc->trigger_reset = 1; 4701 } 4702 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4703 if (PR_SCTP_ENABLED(tp1->flags)) { 4704 if (asoc->pr_sctp_cnt != 0) 4705 asoc->pr_sctp_cnt--; 4706 } 4707 asoc->sent_queue_cnt--; 4708 if (tp1->data) { 4709 /* sa_ignore NO_NULL_CHK */ 4710 sctp_free_bufspace(stcb, asoc, tp1, 1); 4711 sctp_m_freem(tp1->data); 4712 tp1->data = NULL; 4713 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4714 asoc->sent_queue_cnt_removeable--; 4715 } 4716 } 4717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4718 sctp_log_sack(asoc->last_acked_seq, 4719 cum_ack, 4720 tp1->rec.data.TSN_seq, 4721 0, 4722 0, 4723 SCTP_LOG_FREE_SENT); 4724 } 4725 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4726 wake_him++; 4727 } 4728 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4729 #ifdef INVARIANTS 4730 panic("Warning flight size is positive and should be 0"); 4731 #else 4732 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4733 asoc->total_flight); 4734 #endif 4735 asoc->total_flight = 0; 4736 } 4737 /* sa_ignore NO_NULL_CHK */ 4738 if ((wake_him) && (stcb->sctp_socket)) { 4739 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4740 struct socket *so; 4741 4742 #endif 4743 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4744 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4745 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4746 } 4747 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4748 so = SCTP_INP_SO(stcb->sctp_ep); 4749 atomic_add_int(&stcb->asoc.refcnt, 1); 4750 SCTP_TCB_UNLOCK(stcb); 4751 SCTP_SOCKET_LOCK(so, 1); 4752 SCTP_TCB_LOCK(stcb); 4753 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4754 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4755 /* assoc was freed while we were unlocked */ 4756 SCTP_SOCKET_UNLOCK(so, 1); 4757 return; 4758 } 4759 #endif 4760 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4761 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4762 SCTP_SOCKET_UNLOCK(so, 1); 4763 #endif 4764 } else { 4765 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4766 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4767 } 4768 } 4769 4770 if (asoc->fast_retran_loss_recovery && accum_moved) { 4771 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4772 /* Setup so we will exit RFC2582 fast recovery */ 4773 will_exit_fast_recovery = 1; 4774 } 4775 } 4776 /* 4777 * Check for revoked fragments: 4778 * 4779 * if Previous sack - Had no frags then we can't have any revoked if 4780 * Previous sack - Had frag's then - If we now have frags aka 4781 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4782 * some of them. else - The peer revoked all ACKED fragments, since 4783 * we had some before and now we have NONE. 4784 */ 4785 4786 if (num_seg) { 4787 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4788 asoc->saw_sack_with_frags = 1; 4789 } else if (asoc->saw_sack_with_frags) { 4790 int cnt_revoked = 0; 4791 4792 /* Peer revoked all dg's marked or acked */ 4793 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4794 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4795 tp1->sent = SCTP_DATAGRAM_SENT; 4796 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4797 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4798 tp1->whoTo->flight_size, 4799 tp1->book_size, 4800 (uint32_t) (uintptr_t) tp1->whoTo, 4801 tp1->rec.data.TSN_seq); 4802 } 4803 sctp_flight_size_increase(tp1); 4804 sctp_total_flight_increase(stcb, tp1); 4805 tp1->rec.data.chunk_was_revoked = 1; 4806 /* 4807 * To ensure that this increase in 4808 * flightsize, which is artificial, does not 4809 * throttle the sender, we also increase the 4810 * cwnd artificially. 4811 */ 4812 tp1->whoTo->cwnd += tp1->book_size; 4813 cnt_revoked++; 4814 } 4815 } 4816 if (cnt_revoked) { 4817 reneged_all = 1; 4818 } 4819 asoc->saw_sack_with_frags = 0; 4820 } 4821 if (num_nr_seg > 0) 4822 asoc->saw_sack_with_nr_frags = 1; 4823 else 4824 asoc->saw_sack_with_nr_frags = 0; 4825 4826 /* JRS - Use the congestion control given in the CC module */ 4827 if (ecne_seen == 0) { 4828 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4829 if (net->net_ack2 > 0) { 4830 /* 4831 * Karn's rule applies to clearing error 4832 * count, this is optional. 4833 */ 4834 net->error_count = 0; 4835 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4836 /* addr came good */ 4837 net->dest_state |= SCTP_ADDR_REACHABLE; 4838 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4839 0, (void *)net, SCTP_SO_NOT_LOCKED); 4840 } 4841 if (net == stcb->asoc.primary_destination) { 4842 if (stcb->asoc.alternate) { 4843 /* 4844 * release the alternate, 4845 * primary is good 4846 */ 4847 sctp_free_remote_addr(stcb->asoc.alternate); 4848 stcb->asoc.alternate = NULL; 4849 } 4850 } 4851 if (net->dest_state & SCTP_ADDR_PF) { 4852 net->dest_state &= ~SCTP_ADDR_PF; 4853 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4854 stcb->sctp_ep, stcb, net, 4855 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4856 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4857 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4858 /* Done with this net */ 4859 net->net_ack = 0; 4860 } 4861 /* restore any doubled timers */ 4862 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4863 if (net->RTO < stcb->asoc.minrto) { 4864 net->RTO = stcb->asoc.minrto; 4865 } 4866 if (net->RTO > stcb->asoc.maxrto) { 4867 net->RTO = stcb->asoc.maxrto; 4868 } 4869 } 4870 } 4871 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4872 } 4873 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4874 /* nothing left in-flight */ 4875 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4876 /* stop all timers */ 4877 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4878 stcb, net, 4879 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4880 net->flight_size = 0; 4881 net->partial_bytes_acked = 0; 4882 } 4883 asoc->total_flight = 0; 4884 asoc->total_flight_count = 0; 4885 } 4886 /**********************************/ 4887 /* Now what about shutdown issues */ 4888 /**********************************/ 4889 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4890 /* nothing left on sendqueue.. consider done */ 4891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4892 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4893 asoc->peers_rwnd, 0, 0, a_rwnd); 4894 } 4895 asoc->peers_rwnd = a_rwnd; 4896 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4897 /* SWS sender side engages */ 4898 asoc->peers_rwnd = 0; 4899 } 4900 /* clean up */ 4901 if ((asoc->stream_queue_cnt == 1) && 4902 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4903 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4904 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4905 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4906 } 4907 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4908 (asoc->stream_queue_cnt == 0)) { 4909 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4910 /* Need to abort here */ 4911 struct mbuf *op_err; 4912 4913 abort_out_now: 4914 *abort_now = 1; 4915 /* XXX */ 4916 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4917 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 4918 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4919 return; 4920 } else { 4921 struct sctp_nets *netp; 4922 4923 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4924 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4925 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4926 } 4927 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4928 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4929 sctp_stop_timers_for_shutdown(stcb); 4930 if (asoc->alternate) { 4931 netp = asoc->alternate; 4932 } else { 4933 netp = asoc->primary_destination; 4934 } 4935 sctp_send_shutdown(stcb, netp); 4936 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4937 stcb->sctp_ep, stcb, netp); 4938 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4939 stcb->sctp_ep, stcb, netp); 4940 } 4941 return; 4942 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4943 (asoc->stream_queue_cnt == 0)) { 4944 struct sctp_nets *netp; 4945 4946 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4947 goto abort_out_now; 4948 } 4949 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4950 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4951 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4952 sctp_stop_timers_for_shutdown(stcb); 4953 if (asoc->alternate) { 4954 netp = asoc->alternate; 4955 } else { 4956 netp = asoc->primary_destination; 4957 } 4958 sctp_send_shutdown_ack(stcb, netp); 4959 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4960 stcb->sctp_ep, stcb, netp); 4961 return; 4962 } 4963 } 4964 /* 4965 * Now here we are going to recycle net_ack for a different use... 4966 * HEADS UP. 4967 */ 4968 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4969 net->net_ack = 0; 4970 } 4971 4972 /* 4973 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 4974 * to be done. Setting this_sack_lowest_newack to the cum_ack will 4975 * automatically ensure that. 4976 */ 4977 if ((asoc->sctp_cmt_on_off > 0) && 4978 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 4979 (cmt_dac_flag == 0)) { 4980 this_sack_lowest_newack = cum_ack; 4981 } 4982 if ((num_seg > 0) || (num_nr_seg > 0)) { 4983 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4984 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 4985 } 4986 /* JRS - Use the congestion control given in the CC module */ 4987 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 4988 4989 /* Now are we exiting loss recovery ? */ 4990 if (will_exit_fast_recovery) { 4991 /* Ok, we must exit fast recovery */ 4992 asoc->fast_retran_loss_recovery = 0; 4993 } 4994 if ((asoc->sat_t3_loss_recovery) && 4995 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 4996 /* end satellite t3 loss recovery */ 4997 asoc->sat_t3_loss_recovery = 0; 4998 } 4999 /* 5000 * CMT Fast recovery 5001 */ 5002 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5003 if (net->will_exit_fast_recovery) { 5004 /* Ok, we must exit fast recovery */ 5005 net->fast_retran_loss_recovery = 0; 5006 } 5007 } 5008 5009 /* Adjust and set the new rwnd value */ 5010 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5011 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5012 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5013 } 5014 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5015 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5016 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5017 /* SWS sender side engages */ 5018 asoc->peers_rwnd = 0; 5019 } 5020 if (asoc->peers_rwnd > old_rwnd) { 5021 win_probe_recovery = 1; 5022 } 5023 /* 5024 * Now we must setup so we have a timer up for anyone with 5025 * outstanding data. 5026 */ 5027 done_once = 0; 5028 again: 5029 j = 0; 5030 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5031 if (win_probe_recovery && (net->window_probe)) { 5032 win_probe_recovered = 1; 5033 /*- 5034 * Find first chunk that was used with 5035 * window probe and clear the event. Put 5036 * it back into the send queue as if has 5037 * not been sent. 5038 */ 5039 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5040 if (tp1->window_probe) { 5041 sctp_window_probe_recovery(stcb, asoc, tp1); 5042 break; 5043 } 5044 } 5045 } 5046 if (net->flight_size) { 5047 j++; 5048 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5049 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5050 stcb->sctp_ep, stcb, net); 5051 } 5052 if (net->window_probe) { 5053 net->window_probe = 0; 5054 } 5055 } else { 5056 if (net->window_probe) { 5057 /* 5058 * In window probes we must assure a timer 5059 * is still running there 5060 */ 5061 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5062 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5063 stcb->sctp_ep, stcb, net); 5064 5065 } 5066 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5067 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5068 stcb, net, 5069 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5070 } 5071 } 5072 } 5073 if ((j == 0) && 5074 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5075 (asoc->sent_queue_retran_cnt == 0) && 5076 (win_probe_recovered == 0) && 5077 (done_once == 0)) { 5078 /* 5079 * huh, this should not happen unless all packets are 5080 * PR-SCTP and marked to skip of course. 5081 */ 5082 if (sctp_fs_audit(asoc)) { 5083 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5084 net->flight_size = 0; 5085 } 5086 asoc->total_flight = 0; 5087 asoc->total_flight_count = 0; 5088 asoc->sent_queue_retran_cnt = 0; 5089 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5090 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5091 sctp_flight_size_increase(tp1); 5092 sctp_total_flight_increase(stcb, tp1); 5093 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5094 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5095 } 5096 } 5097 } 5098 done_once = 1; 5099 goto again; 5100 } 5101 /*********************************************/ 5102 /* Here we perform PR-SCTP procedures */ 5103 /* (section 4.2) */ 5104 /*********************************************/ 5105 /* C1. update advancedPeerAckPoint */ 5106 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5107 asoc->advanced_peer_ack_point = cum_ack; 5108 } 5109 /* C2. try to further move advancedPeerAckPoint ahead */ 5110 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5111 struct sctp_tmit_chunk *lchk; 5112 uint32_t old_adv_peer_ack_point; 5113 5114 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5115 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5116 /* C3. See if we need to send a Fwd-TSN */ 5117 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5118 /* 5119 * ISSUE with ECN, see FWD-TSN processing. 5120 */ 5121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5122 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5123 0xee, cum_ack, asoc->advanced_peer_ack_point, 5124 old_adv_peer_ack_point); 5125 } 5126 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5127 send_forward_tsn(stcb, asoc); 5128 } else if (lchk) { 5129 /* try to FR fwd-tsn's that get lost too */ 5130 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5131 send_forward_tsn(stcb, asoc); 5132 } 5133 } 5134 } 5135 if (lchk) { 5136 /* Assure a timer is up */ 5137 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5138 stcb->sctp_ep, stcb, lchk->whoTo); 5139 } 5140 } 5141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5142 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5143 a_rwnd, 5144 stcb->asoc.peers_rwnd, 5145 stcb->asoc.total_flight, 5146 stcb->asoc.total_output_queue_size); 5147 } 5148 } 5149 5150 void 5151 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5152 { 5153 /* Copy cum-ack */ 5154 uint32_t cum_ack, a_rwnd; 5155 5156 cum_ack = ntohl(cp->cumulative_tsn_ack); 5157 /* Arrange so a_rwnd does NOT change */ 5158 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5159 5160 /* Now call the express sack handling */ 5161 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5162 } 5163 5164 static void 5165 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5166 struct sctp_stream_in *strmin) 5167 { 5168 struct sctp_queued_to_read *ctl, *nctl; 5169 struct sctp_association *asoc; 5170 uint32_t tt; 5171 int need_reasm_check = 0, old; 5172 5173 asoc = &stcb->asoc; 5174 tt = strmin->last_sequence_delivered; 5175 if (asoc->idata_supported) { 5176 old = 0; 5177 } else { 5178 old = 1; 5179 } 5180 /* 5181 * First deliver anything prior to and including the stream no that 5182 * came in. 5183 */ 5184 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) { 5185 if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) { 5186 /* this is deliverable now */ 5187 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5188 if (ctl->on_strm_q) { 5189 if (ctl->on_strm_q == SCTP_ON_ORDERED) { 5190 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm); 5191 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) { 5192 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm); 5193 #ifdef INVARIANTS 5194 } else { 5195 panic("strmin: %p ctl: %p unknown %d", 5196 strmin, ctl, ctl->on_strm_q); 5197 #endif 5198 } 5199 ctl->on_strm_q = 0; 5200 } 5201 /* subtract pending on streams */ 5202 asoc->size_on_all_streams -= ctl->length; 5203 sctp_ucount_decr(asoc->cnt_on_all_streams); 5204 /* deliver it to at least the delivery-q */ 5205 if (stcb->sctp_socket) { 5206 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 5207 sctp_add_to_readq(stcb->sctp_ep, stcb, 5208 ctl, 5209 &stcb->sctp_socket->so_rcv, 5210 1, SCTP_READ_LOCK_HELD, 5211 SCTP_SO_NOT_LOCKED); 5212 } 5213 } else { 5214 /* Its a fragmented message */ 5215 if (ctl->first_frag_seen) { 5216 /* 5217 * Make it so this is next to 5218 * deliver, we restore later 5219 */ 5220 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1; 5221 need_reasm_check = 1; 5222 break; 5223 } 5224 } 5225 } else { 5226 /* no more delivery now. */ 5227 break; 5228 } 5229 } 5230 if (need_reasm_check) { 5231 int ret; 5232 5233 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5234 if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) { 5235 /* Restore the next to deliver unless we are ahead */ 5236 strmin->last_sequence_delivered = tt; 5237 } 5238 if (ret == 0) { 5239 /* Left the front Partial one on */ 5240 return; 5241 } 5242 need_reasm_check = 0; 5243 } 5244 /* 5245 * now we must deliver things in queue the normal way if any are 5246 * now ready. 5247 */ 5248 tt = strmin->last_sequence_delivered + 1; 5249 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) { 5250 if (tt == ctl->sinfo_ssn) { 5251 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5252 /* this is deliverable now */ 5253 if (ctl->on_strm_q) { 5254 if (ctl->on_strm_q == SCTP_ON_ORDERED) { 5255 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm); 5256 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) { 5257 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm); 5258 #ifdef INVARIANTS 5259 } else { 5260 panic("strmin: %p ctl: %p unknown %d", 5261 strmin, ctl, ctl->on_strm_q); 5262 #endif 5263 } 5264 ctl->on_strm_q = 0; 5265 } 5266 /* subtract pending on streams */ 5267 asoc->size_on_all_streams -= ctl->length; 5268 sctp_ucount_decr(asoc->cnt_on_all_streams); 5269 /* deliver it to at least the delivery-q */ 5270 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5271 if (stcb->sctp_socket) { 5272 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 5273 sctp_add_to_readq(stcb->sctp_ep, stcb, 5274 ctl, 5275 &stcb->sctp_socket->so_rcv, 1, 5276 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5277 5278 } 5279 tt = strmin->last_sequence_delivered + 1; 5280 } else { 5281 /* Its a fragmented message */ 5282 if (ctl->first_frag_seen) { 5283 /* 5284 * Make it so this is next to 5285 * deliver 5286 */ 5287 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1; 5288 need_reasm_check = 1; 5289 break; 5290 } 5291 } 5292 } else { 5293 break; 5294 } 5295 } 5296 if (need_reasm_check) { 5297 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5298 } 5299 } 5300 5301 5302 5303 static void 5304 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5305 struct sctp_association *asoc, 5306 uint16_t stream, uint32_t seq, int ordered, int old, uint32_t cumtsn) 5307 { 5308 struct sctp_queued_to_read *control; 5309 struct sctp_stream_in *strm; 5310 struct sctp_tmit_chunk *chk, *nchk; 5311 int cnt_removed = 0; 5312 5313 /* 5314 * For now large messages held on the stream reasm that are complete 5315 * will be tossed too. We could in theory do more work to spin 5316 * through and stop after dumping one msg aka seeing the start of a 5317 * new msg at the head, and call the delivery function... to see if 5318 * it can be delivered... But for now we just dump everything on the 5319 * queue. 5320 */ 5321 strm = &asoc->strmin[stream]; 5322 control = sctp_find_reasm_entry(strm, (uint32_t) seq, ordered, old); 5323 if (control == NULL) { 5324 /* Not found */ 5325 return; 5326 } 5327 if (old && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5328 return; 5329 } 5330 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5331 /* Purge hanging chunks */ 5332 if (old && (ordered == 0)) { 5333 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, cumtsn)) { 5334 break; 5335 } 5336 } 5337 cnt_removed++; 5338 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5339 asoc->size_on_reasm_queue -= chk->send_size; 5340 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5341 if (chk->data) { 5342 sctp_m_freem(chk->data); 5343 chk->data = NULL; 5344 } 5345 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5346 } 5347 if (!TAILQ_EMPTY(&control->reasm)) { 5348 /* This has to be old data, unordered */ 5349 if (control->data) { 5350 sctp_m_freem(control->data); 5351 control->data = NULL; 5352 } 5353 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5354 chk = TAILQ_FIRST(&control->reasm); 5355 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5356 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5357 sctp_add_chk_to_control(control, strm, stcb, asoc, 5358 chk, SCTP_READ_LOCK_HELD); 5359 } 5360 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5361 return; 5362 } 5363 if (control->on_strm_q == SCTP_ON_ORDERED) { 5364 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5365 control->on_strm_q = 0; 5366 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5367 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5368 control->on_strm_q = 0; 5369 #ifdef INVARIANTS 5370 } else if (control->on_strm_q) { 5371 panic("strm: %p ctl: %p unknown %d", 5372 strm, control, control->on_strm_q); 5373 #endif 5374 } 5375 control->on_strm_q = 0; 5376 if (control->on_read_q == 0) { 5377 sctp_free_remote_addr(control->whoFrom); 5378 if (control->data) { 5379 sctp_m_freem(control->data); 5380 control->data = NULL; 5381 } 5382 sctp_free_a_readq(stcb, control); 5383 } 5384 } 5385 5386 void 5387 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5388 struct sctp_forward_tsn_chunk *fwd, 5389 int *abort_flag, struct mbuf *m, int offset) 5390 { 5391 /* The pr-sctp fwd tsn */ 5392 /* 5393 * here we will perform all the data receiver side steps for 5394 * processing FwdTSN, as required in by pr-sctp draft: 5395 * 5396 * Assume we get FwdTSN(x): 5397 * 5398 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5399 * others we have 3) examine and update re-ordering queue on 5400 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5401 * report where we are. 5402 */ 5403 struct sctp_association *asoc; 5404 uint32_t new_cum_tsn, gap; 5405 unsigned int i, fwd_sz, m_size; 5406 uint32_t str_seq; 5407 struct sctp_stream_in *strm; 5408 struct sctp_queued_to_read *ctl, *sv; 5409 5410 asoc = &stcb->asoc; 5411 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5412 SCTPDBG(SCTP_DEBUG_INDATA1, 5413 "Bad size too small/big fwd-tsn\n"); 5414 return; 5415 } 5416 m_size = (stcb->asoc.mapping_array_size << 3); 5417 /*************************************************************/ 5418 /* 1. Here we update local cumTSN and shift the bitmap array */ 5419 /*************************************************************/ 5420 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5421 5422 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5423 /* Already got there ... */ 5424 return; 5425 } 5426 /* 5427 * now we know the new TSN is more advanced, let's find the actual 5428 * gap 5429 */ 5430 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5431 asoc->cumulative_tsn = new_cum_tsn; 5432 if (gap >= m_size) { 5433 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5434 struct mbuf *op_err; 5435 char msg[SCTP_DIAG_INFO_LEN]; 5436 5437 /* 5438 * out of range (of single byte chunks in the rwnd I 5439 * give out). This must be an attacker. 5440 */ 5441 *abort_flag = 1; 5442 snprintf(msg, sizeof(msg), 5443 "New cum ack %8.8x too high, highest TSN %8.8x", 5444 new_cum_tsn, asoc->highest_tsn_inside_map); 5445 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5446 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5447 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5448 return; 5449 } 5450 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5451 5452 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5453 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5454 asoc->highest_tsn_inside_map = new_cum_tsn; 5455 5456 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5457 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5458 5459 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5460 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5461 } 5462 } else { 5463 SCTP_TCB_LOCK_ASSERT(stcb); 5464 for (i = 0; i <= gap; i++) { 5465 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5466 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5467 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5468 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5469 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5470 } 5471 } 5472 } 5473 } 5474 /*************************************************************/ 5475 /* 2. Clear up re-assembly queue */ 5476 /*************************************************************/ 5477 5478 /* This is now done as part of clearing up the stream/seq */ 5479 if (asoc->idata_supported == 0) { 5480 uint16_t sid; 5481 5482 /* Flush all the un-ordered data based on cum-tsn */ 5483 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5484 for (sid = 0; sid < asoc->streamincnt; sid++) { 5485 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, 1, new_cum_tsn); 5486 } 5487 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5488 } 5489 /*******************************************************/ 5490 /* 3. Update the PR-stream re-ordering queues and fix */ 5491 /* delivery issues as needed. */ 5492 /*******************************************************/ 5493 fwd_sz -= sizeof(*fwd); 5494 if (m && fwd_sz) { 5495 /* New method. */ 5496 unsigned int num_str; 5497 uint32_t sequence; 5498 uint16_t stream; 5499 uint16_t ordered, flags; 5500 int old; 5501 struct sctp_strseq *stseq, strseqbuf; 5502 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5503 5504 offset += sizeof(*fwd); 5505 5506 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5507 if (asoc->idata_supported) { 5508 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5509 old = 0; 5510 } else { 5511 num_str = fwd_sz / sizeof(struct sctp_strseq); 5512 old = 1; 5513 } 5514 for (i = 0; i < num_str; i++) { 5515 if (asoc->idata_supported) { 5516 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5517 sizeof(struct sctp_strseq_mid), 5518 (uint8_t *) & strseqbuf_m); 5519 offset += sizeof(struct sctp_strseq_mid); 5520 if (stseq_m == NULL) { 5521 break; 5522 } 5523 stream = ntohs(stseq_m->stream); 5524 sequence = ntohl(stseq_m->msg_id); 5525 flags = ntohs(stseq_m->flags); 5526 if (flags & PR_SCTP_UNORDERED_FLAG) { 5527 ordered = 0; 5528 } else { 5529 ordered = 1; 5530 } 5531 } else { 5532 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5533 sizeof(struct sctp_strseq), 5534 (uint8_t *) & strseqbuf); 5535 offset += sizeof(struct sctp_strseq); 5536 if (stseq == NULL) { 5537 break; 5538 } 5539 stream = ntohs(stseq->stream); 5540 sequence = (uint32_t) ntohs(stseq->sequence); 5541 ordered = 1; 5542 } 5543 /* Convert */ 5544 5545 /* now process */ 5546 5547 /* 5548 * Ok we now look for the stream/seq on the read 5549 * queue where its not all delivered. If we find it 5550 * we transmute the read entry into a PDI_ABORTED. 5551 */ 5552 if (stream >= asoc->streamincnt) { 5553 /* screwed up streams, stop! */ 5554 break; 5555 } 5556 if ((asoc->str_of_pdapi == stream) && 5557 (asoc->ssn_of_pdapi == sequence)) { 5558 /* 5559 * If this is the one we were partially 5560 * delivering now then we no longer are. 5561 * Note this will change with the reassembly 5562 * re-write. 5563 */ 5564 asoc->fragmented_delivery_inprogress = 0; 5565 } 5566 strm = &asoc->strmin[stream]; 5567 if (asoc->idata_supported == 0) { 5568 uint16_t strm_at; 5569 5570 for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(1, sequence, strm_at); strm_at++) { 5571 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn); 5572 } 5573 } else { 5574 uint32_t strm_at; 5575 5576 for (strm_at = strm->last_sequence_delivered; SCTP_MSGID_GE(0, sequence, strm_at); strm_at++) { 5577 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, strm_at, ordered, old, new_cum_tsn); 5578 } 5579 } 5580 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { 5581 if ((ctl->sinfo_stream == stream) && 5582 (ctl->sinfo_ssn == sequence)) { 5583 str_seq = (stream << 16) | (0x0000ffff & sequence); 5584 ctl->pdapi_aborted = 1; 5585 sv = stcb->asoc.control_pdapi; 5586 ctl->end_added = 1; 5587 if (ctl->on_strm_q == SCTP_ON_ORDERED) { 5588 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm); 5589 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) { 5590 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm); 5591 #ifdef INVARIANTS 5592 } else if (ctl->on_strm_q) { 5593 panic("strm: %p ctl: %p unknown %d", 5594 strm, ctl, ctl->on_strm_q); 5595 #endif 5596 } 5597 ctl->on_strm_q = 0; 5598 stcb->asoc.control_pdapi = ctl; 5599 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5600 stcb, 5601 SCTP_PARTIAL_DELIVERY_ABORTED, 5602 (void *)&str_seq, 5603 SCTP_SO_NOT_LOCKED); 5604 stcb->asoc.control_pdapi = sv; 5605 break; 5606 } else if ((ctl->sinfo_stream == stream) && 5607 SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) { 5608 /* We are past our victim SSN */ 5609 break; 5610 } 5611 } 5612 if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) { 5613 /* Update the sequence number */ 5614 strm->last_sequence_delivered = sequence; 5615 } 5616 /* now kick the stream the new way */ 5617 /* sa_ignore NO_NULL_CHK */ 5618 sctp_kick_prsctp_reorder_queue(stcb, strm); 5619 } 5620 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5621 } 5622 /* 5623 * Now slide thing forward. 5624 */ 5625 sctp_slide_mapping_arrays(stcb); 5626 } 5627