1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <sys/proc.h> 38 #include <netinet/sctp_var.h> 39 #include <netinet/sctp_sysctl.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctp_pcb.h> 42 #include <netinet/sctputil.h> 43 #include <netinet/sctp_output.h> 44 #include <netinet/sctp_uio.h> 45 #include <netinet/sctp_auth.h> 46 #include <netinet/sctp_timer.h> 47 #include <netinet/sctp_asconf.h> 48 #include <netinet/sctp_indata.h> 49 #include <netinet/sctp_bsd_addr.h> 50 #include <netinet/sctp_input.h> 51 #include <netinet/sctp_crc32.h> 52 #include <netinet/sctp_lock_bsd.h> 53 /* 54 * NOTES: On the outbound side of things I need to check the sack timer to 55 * see if I should generate a sack into the chunk queue (if I have data to 56 * send that is and will be sending it .. for bundling. 57 * 58 * The callback in sctp_usrreq.c will get called when the socket is read from. 59 * This will cause sctp_service_queues() to get called on the top entry in 60 * the list. 61 */ 62 static void 63 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 64 struct sctp_stream_in *strm, 65 struct sctp_tcb *stcb, 66 struct sctp_association *asoc, 67 struct sctp_tmit_chunk *chk); 68 69 70 void 71 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 72 { 73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 74 } 75 76 /* Calculate what the rwnd would be */ 77 uint32_t 78 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 79 { 80 uint32_t calc = 0; 81 82 /* 83 * This is really set wrong with respect to a 1-2-m socket. Since 84 * the sb_cc is the count that everyone as put up. When we re-write 85 * sctp_soreceive then we will fix this so that ONLY this 86 * associations data is taken into account. 87 */ 88 if (stcb->sctp_socket == NULL) { 89 return (calc); 90 } 91 if (stcb->asoc.sb_cc == 0 && 92 asoc->size_on_reasm_queue == 0 && 93 asoc->size_on_all_streams == 0) { 94 /* Full rwnd granted */ 95 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 96 return (calc); 97 } 98 /* get actual space */ 99 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 100 /* 101 * take out what has NOT been put on socket queue and we yet hold 102 * for putting up. 103 */ 104 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue + 105 asoc->cnt_on_reasm_queue * MSIZE)); 106 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams + 107 asoc->cnt_on_all_streams * MSIZE)); 108 if (calc == 0) { 109 /* out of space */ 110 return (calc); 111 } 112 /* what is the overhead of all these rwnd's */ 113 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 114 /* 115 * If the window gets too small due to ctrl-stuff, reduce it to 1, 116 * even it is 0. SWS engaged 117 */ 118 if (calc < stcb->asoc.my_rwnd_control_len) { 119 calc = 1; 120 } 121 return (calc); 122 } 123 124 125 126 /* 127 * Build out our readq entry based on the incoming packet. 128 */ 129 struct sctp_queued_to_read * 130 sctp_build_readq_entry(struct sctp_tcb *stcb, 131 struct sctp_nets *net, 132 uint32_t tsn, uint32_t ppid, 133 uint32_t context, uint16_t stream_no, 134 uint32_t stream_seq, uint8_t flags, 135 struct mbuf *dm) 136 { 137 struct sctp_queued_to_read *read_queue_e = NULL; 138 139 sctp_alloc_a_readq(stcb, read_queue_e); 140 if (read_queue_e == NULL) { 141 goto failed_build; 142 } 143 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 144 read_queue_e->sinfo_stream = stream_no; 145 read_queue_e->sinfo_ssn = stream_seq; 146 read_queue_e->sinfo_flags = (flags << 8); 147 read_queue_e->sinfo_ppid = ppid; 148 read_queue_e->sinfo_context = context; 149 read_queue_e->sinfo_tsn = tsn; 150 read_queue_e->sinfo_cumtsn = tsn; 151 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 152 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 153 TAILQ_INIT(&read_queue_e->reasm); 154 read_queue_e->whoFrom = net; 155 atomic_add_int(&net->ref_count, 1); 156 read_queue_e->data = dm; 157 read_queue_e->stcb = stcb; 158 read_queue_e->port_from = stcb->rport; 159 failed_build: 160 return (read_queue_e); 161 } 162 163 struct mbuf * 164 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 165 { 166 struct sctp_extrcvinfo *seinfo; 167 struct sctp_sndrcvinfo *outinfo; 168 struct sctp_rcvinfo *rcvinfo; 169 struct sctp_nxtinfo *nxtinfo; 170 struct cmsghdr *cmh; 171 struct mbuf *ret; 172 int len; 173 int use_extended; 174 int provide_nxt; 175 176 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 177 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 178 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 179 /* user does not want any ancillary data */ 180 return (NULL); 181 } 182 len = 0; 183 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 184 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 185 } 186 seinfo = (struct sctp_extrcvinfo *)sinfo; 187 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 188 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 189 provide_nxt = 1; 190 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 191 } else { 192 provide_nxt = 0; 193 } 194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 195 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 196 use_extended = 1; 197 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 198 } else { 199 use_extended = 0; 200 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 201 } 202 } else { 203 use_extended = 0; 204 } 205 206 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 207 if (ret == NULL) { 208 /* No space */ 209 return (ret); 210 } 211 SCTP_BUF_LEN(ret) = 0; 212 213 /* We need a CMSG header followed by the struct */ 214 cmh = mtod(ret, struct cmsghdr *); 215 /* 216 * Make sure that there is no un-initialized padding between the 217 * cmsg header and cmsg data and after the cmsg data. 218 */ 219 memset(cmh, 0, len); 220 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 221 cmh->cmsg_level = IPPROTO_SCTP; 222 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 223 cmh->cmsg_type = SCTP_RCVINFO; 224 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 225 rcvinfo->rcv_sid = sinfo->sinfo_stream; 226 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 227 rcvinfo->rcv_flags = sinfo->sinfo_flags; 228 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 229 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 230 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 231 rcvinfo->rcv_context = sinfo->sinfo_context; 232 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 233 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 234 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 235 } 236 if (provide_nxt) { 237 cmh->cmsg_level = IPPROTO_SCTP; 238 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 239 cmh->cmsg_type = SCTP_NXTINFO; 240 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 241 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 242 nxtinfo->nxt_flags = 0; 243 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 244 nxtinfo->nxt_flags |= SCTP_UNORDERED; 245 } 246 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 247 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 248 } 249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 250 nxtinfo->nxt_flags |= SCTP_COMPLETE; 251 } 252 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 253 nxtinfo->nxt_length = seinfo->serinfo_next_length; 254 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 255 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 256 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 257 } 258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 259 cmh->cmsg_level = IPPROTO_SCTP; 260 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 261 if (use_extended) { 262 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 263 cmh->cmsg_type = SCTP_EXTRCV; 264 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 266 } else { 267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 268 cmh->cmsg_type = SCTP_SNDRCV; 269 *outinfo = *sinfo; 270 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 271 } 272 } 273 return (ret); 274 } 275 276 277 static void 278 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 279 { 280 uint32_t gap, i, cumackp1; 281 int fnd = 0; 282 int in_r = 0, in_nr = 0; 283 284 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 285 return; 286 } 287 cumackp1 = asoc->cumulative_tsn + 1; 288 if (SCTP_TSN_GT(cumackp1, tsn)) { 289 /* 290 * this tsn is behind the cum ack and thus we don't need to 291 * worry about it being moved from one to the other. 292 */ 293 return; 294 } 295 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 296 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 297 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 298 if ((in_r == 0) && (in_nr == 0)) { 299 #ifdef INVARIANTS 300 panic("Things are really messed up now"); 301 #else 302 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 303 sctp_print_mapping_array(asoc); 304 #endif 305 } 306 if (in_nr == 0) 307 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 308 if (in_r) 309 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 310 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 311 asoc->highest_tsn_inside_nr_map = tsn; 312 } 313 if (tsn == asoc->highest_tsn_inside_map) { 314 /* We must back down to see what the new highest is */ 315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 318 asoc->highest_tsn_inside_map = i; 319 fnd = 1; 320 break; 321 } 322 } 323 if (!fnd) { 324 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 325 } 326 } 327 } 328 329 static int 330 sctp_place_control_in_stream(struct sctp_stream_in *strm, 331 struct sctp_association *asoc, 332 struct sctp_queued_to_read *control) 333 { 334 struct sctp_queued_to_read *at; 335 struct sctp_readhead *q; 336 uint8_t bits, unordered; 337 338 bits = (control->sinfo_flags >> 8); 339 unordered = bits & SCTP_DATA_UNORDERED; 340 if (unordered) { 341 q = &strm->uno_inqueue; 342 if (asoc->idata_supported == 0) { 343 if (!TAILQ_EMPTY(q)) { 344 /* 345 * Only one stream can be here in old style 346 * -- abort 347 */ 348 return (-1); 349 } 350 TAILQ_INSERT_TAIL(q, control, next_instrm); 351 control->on_strm_q = SCTP_ON_UNORDERED; 352 return (0); 353 } 354 } else { 355 q = &strm->inqueue; 356 } 357 if ((bits & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 358 control->end_added = control->last_frag_seen = control->first_frag_seen = 1; 359 } 360 if (TAILQ_EMPTY(q)) { 361 /* Empty queue */ 362 TAILQ_INSERT_HEAD(q, control, next_instrm); 363 if (unordered) { 364 control->on_strm_q = SCTP_ON_UNORDERED; 365 } else { 366 control->on_strm_q = SCTP_ON_ORDERED; 367 } 368 return (0); 369 } else { 370 TAILQ_FOREACH(at, q, next_instrm) { 371 if (SCTP_TSN_GT(at->msg_id, control->msg_id)) { 372 /* 373 * one in queue is bigger than the new one, 374 * insert before this one 375 */ 376 TAILQ_INSERT_BEFORE(at, control, next_instrm); 377 if (unordered) { 378 control->on_strm_q = SCTP_ON_UNORDERED; 379 } else { 380 control->on_strm_q = SCTP_ON_ORDERED; 381 } 382 break; 383 } else if (at->msg_id == control->msg_id) { 384 /* 385 * Gak, He sent me a duplicate msg id 386 * number?? return -1 to abort. 387 */ 388 return (-1); 389 } else { 390 if (TAILQ_NEXT(at, next_instrm) == NULL) { 391 /* 392 * We are at the end, insert it 393 * after this one 394 */ 395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 396 sctp_log_strm_del(control, at, 397 SCTP_STR_LOG_FROM_INSERT_TL); 398 } 399 TAILQ_INSERT_AFTER(q, 400 at, control, next_instrm); 401 if (unordered) { 402 control->on_strm_q = SCTP_ON_UNORDERED; 403 } else { 404 control->on_strm_q = SCTP_ON_ORDERED; 405 } 406 break; 407 } 408 } 409 } 410 } 411 return (0); 412 } 413 414 static void 415 sctp_abort_in_reasm(struct sctp_tcb *stcb, 416 struct sctp_queued_to_read *control, 417 struct sctp_tmit_chunk *chk, 418 int *abort_flag, int opspot) 419 { 420 char msg[SCTP_DIAG_INFO_LEN]; 421 struct mbuf *oper; 422 423 if (stcb->asoc.idata_supported) { 424 snprintf(msg, sizeof(msg), 425 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 426 opspot, 427 control->fsn_included, 428 chk->rec.data.TSN_seq, 429 chk->rec.data.stream_number, 430 chk->rec.data.fsn_num, chk->rec.data.stream_seq); 431 } else { 432 snprintf(msg, sizeof(msg), 433 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 434 opspot, 435 control->fsn_included, 436 chk->rec.data.TSN_seq, 437 chk->rec.data.stream_number, 438 chk->rec.data.fsn_num, 439 (uint16_t) chk->rec.data.stream_seq); 440 } 441 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 442 sctp_m_freem(chk->data); 443 chk->data = NULL; 444 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 445 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 446 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 447 *abort_flag = 1; 448 } 449 450 static void 451 clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 452 { 453 /* 454 * The control could not be placed and must be cleaned. 455 */ 456 struct sctp_tmit_chunk *chk, *nchk; 457 458 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 459 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 460 if (chk->data) 461 sctp_m_freem(chk->data); 462 chk->data = NULL; 463 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 464 } 465 sctp_free_a_readq(stcb, control); 466 } 467 468 /* 469 * Queue the chunk either right into the socket buffer if it is the next one 470 * to go OR put it in the correct place in the delivery queue. If we do 471 * append to the so_buf, keep doing so until we are out of order as 472 * long as the control's entered are non-fragmented. 473 */ 474 static void 475 sctp_queue_data_to_stream(struct sctp_tcb *stcb, 476 struct sctp_stream_in *strm, 477 struct sctp_association *asoc, 478 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 479 { 480 /* 481 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 482 * all the data in one stream this could happen quite rapidly. One 483 * could use the TSN to keep track of things, but this scheme breaks 484 * down in the other type of stream usage that could occur. Send a 485 * single msg to stream 0, send 4Billion messages to stream 1, now 486 * send a message to stream 0. You have a situation where the TSN 487 * has wrapped but not in the stream. Is this worth worrying about 488 * or should we just change our queue sort at the bottom to be by 489 * TSN. 490 * 491 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 492 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 493 * assignment this could happen... and I don't see how this would be 494 * a violation. So for now I am undecided an will leave the sort by 495 * SSN alone. Maybe a hybred approach is the answer 496 * 497 */ 498 struct sctp_queued_to_read *at; 499 int queue_needed; 500 uint32_t nxt_todel; 501 struct mbuf *op_err; 502 char msg[SCTP_DIAG_INFO_LEN]; 503 504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 505 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 506 } 507 if (SCTP_MSGID_GT((!asoc->idata_supported), strm->last_sequence_delivered, control->sinfo_ssn)) { 508 /* The incoming sseq is behind where we last delivered? */ 509 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 510 control->sinfo_ssn, strm->last_sequence_delivered); 511 protocol_error: 512 /* 513 * throw it in the stream so it gets cleaned up in 514 * association destruction 515 */ 516 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 517 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 518 strm->last_sequence_delivered, control->sinfo_tsn, 519 control->sinfo_stream, control->sinfo_ssn); 520 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 521 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 522 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 523 *abort_flag = 1; 524 return; 525 526 } 527 if ((SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) && (asoc->idata_supported == 0)) { 528 goto protocol_error; 529 } 530 queue_needed = 1; 531 asoc->size_on_all_streams += control->length; 532 sctp_ucount_incr(asoc->cnt_on_all_streams); 533 nxt_todel = strm->last_sequence_delivered + 1; 534 if (nxt_todel == control->sinfo_ssn) { 535 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 536 struct socket *so; 537 538 so = SCTP_INP_SO(stcb->sctp_ep); 539 atomic_add_int(&stcb->asoc.refcnt, 1); 540 SCTP_TCB_UNLOCK(stcb); 541 SCTP_SOCKET_LOCK(so, 1); 542 SCTP_TCB_LOCK(stcb); 543 atomic_subtract_int(&stcb->asoc.refcnt, 1); 544 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 545 SCTP_SOCKET_UNLOCK(so, 1); 546 return; 547 } 548 #endif 549 /* can be delivered right away? */ 550 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 551 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 552 } 553 /* EY it wont be queued if it could be delivered directly */ 554 queue_needed = 0; 555 asoc->size_on_all_streams -= control->length; 556 sctp_ucount_decr(asoc->cnt_on_all_streams); 557 strm->last_sequence_delivered++; 558 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 559 sctp_add_to_readq(stcb->sctp_ep, stcb, 560 control, 561 &stcb->sctp_socket->so_rcv, 1, 562 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 563 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 564 /* all delivered */ 565 nxt_todel = strm->last_sequence_delivered + 1; 566 if ((nxt_todel == control->sinfo_ssn) && 567 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 568 asoc->size_on_all_streams -= control->length; 569 sctp_ucount_decr(asoc->cnt_on_all_streams); 570 if (control->on_strm_q == SCTP_ON_ORDERED) { 571 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 572 #ifdef INVARIANTS 573 } else { 574 panic("Huh control: %p is on_strm_q: %d", 575 control, control->on_strm_q); 576 #endif 577 } 578 control->on_strm_q = 0; 579 strm->last_sequence_delivered++; 580 /* 581 * We ignore the return of deliver_data here 582 * since we always can hold the chunk on the 583 * d-queue. And we have a finite number that 584 * can be delivered from the strq. 585 */ 586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 587 sctp_log_strm_del(control, NULL, 588 SCTP_STR_LOG_FROM_IMMED_DEL); 589 } 590 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 591 sctp_add_to_readq(stcb->sctp_ep, stcb, 592 control, 593 &stcb->sctp_socket->so_rcv, 1, 594 SCTP_READ_LOCK_NOT_HELD, 595 SCTP_SO_LOCKED); 596 continue; 597 } else if (nxt_todel == control->sinfo_ssn) { 598 *need_reasm = 1; 599 } 600 break; 601 } 602 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 603 SCTP_SOCKET_UNLOCK(so, 1); 604 #endif 605 } 606 if (queue_needed) { 607 /* 608 * Ok, we did not deliver this guy, find the correct place 609 * to put it on the queue. 610 */ 611 if (sctp_place_control_in_stream(strm, asoc, control)) { 612 snprintf(msg, sizeof(msg), 613 "Queue to str msg_id: %u duplicate", 614 control->msg_id); 615 clean_up_control(stcb, control); 616 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 617 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 618 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 619 *abort_flag = 1; 620 } 621 } 622 } 623 624 625 static void 626 sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 627 { 628 struct mbuf *m, *prev = NULL; 629 struct sctp_tcb *stcb; 630 631 stcb = control->stcb; 632 control->held_length = 0; 633 control->length = 0; 634 m = control->data; 635 while (m) { 636 if (SCTP_BUF_LEN(m) == 0) { 637 /* Skip mbufs with NO length */ 638 if (prev == NULL) { 639 /* First one */ 640 control->data = sctp_m_free(m); 641 m = control->data; 642 } else { 643 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 644 m = SCTP_BUF_NEXT(prev); 645 } 646 if (m == NULL) { 647 control->tail_mbuf = prev; 648 } 649 continue; 650 } 651 prev = m; 652 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 653 if (control->on_read_q) { 654 /* 655 * On read queue so we must increment the SB stuff, 656 * we assume caller has done any locks of SB. 657 */ 658 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 659 } 660 m = SCTP_BUF_NEXT(m); 661 } 662 if (prev) { 663 control->tail_mbuf = prev; 664 } 665 } 666 667 static void 668 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m) 669 { 670 struct mbuf *prev = NULL; 671 struct sctp_tcb *stcb; 672 673 stcb = control->stcb; 674 if (stcb == NULL) { 675 #ifdef INVARIANTS 676 panic("Control broken"); 677 #else 678 return; 679 #endif 680 } 681 if (control->tail_mbuf == NULL) { 682 /* TSNH */ 683 control->data = m; 684 sctp_setup_tail_pointer(control); 685 return; 686 } 687 control->tail_mbuf->m_next = m; 688 while (m) { 689 if (SCTP_BUF_LEN(m) == 0) { 690 /* Skip mbufs with NO length */ 691 if (prev == NULL) { 692 /* First one */ 693 control->tail_mbuf->m_next = sctp_m_free(m); 694 m = control->tail_mbuf->m_next; 695 } else { 696 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 697 m = SCTP_BUF_NEXT(prev); 698 } 699 if (m == NULL) { 700 control->tail_mbuf = prev; 701 } 702 continue; 703 } 704 prev = m; 705 if (control->on_read_q) { 706 /* 707 * On read queue so we must increment the SB stuff, 708 * we assume caller has done any locks of SB. 709 */ 710 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 711 } 712 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 713 m = SCTP_BUF_NEXT(m); 714 } 715 if (prev) { 716 control->tail_mbuf = prev; 717 } 718 } 719 720 static void 721 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 722 { 723 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 724 nc->sinfo_stream = control->sinfo_stream; 725 nc->sinfo_ssn = control->sinfo_ssn; 726 TAILQ_INIT(&nc->reasm); 727 nc->top_fsn = control->top_fsn; 728 nc->msg_id = control->msg_id; 729 nc->sinfo_flags = control->sinfo_flags; 730 nc->sinfo_ppid = control->sinfo_ppid; 731 nc->sinfo_context = control->sinfo_context; 732 nc->fsn_included = 0xffffffff; 733 nc->sinfo_tsn = control->sinfo_tsn; 734 nc->sinfo_cumtsn = control->sinfo_cumtsn; 735 nc->sinfo_assoc_id = control->sinfo_assoc_id; 736 nc->whoFrom = control->whoFrom; 737 atomic_add_int(&nc->whoFrom->ref_count, 1); 738 nc->stcb = control->stcb; 739 nc->port_from = control->port_from; 740 } 741 742 static int 743 sctp_handle_old_data(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm, 744 struct sctp_queued_to_read *control, uint32_t pd_point) 745 { 746 /* 747 * Special handling for the old un-ordered data chunk. All the 748 * chunks/TSN's go to msg_id 0. So we have to do the old style 749 * watching to see if we have it all. If you return one, no other 750 * control entries on the un-ordered queue will be looked at. In 751 * theory there should be no others entries in reality, unless the 752 * guy is sending both unordered NDATA and unordered DATA... 753 */ 754 struct sctp_tmit_chunk *chk, *lchk, *tchk; 755 uint32_t fsn; 756 struct sctp_queued_to_read *nc = NULL; 757 int cnt_added; 758 759 if (control->first_frag_seen == 0) { 760 /* Nothing we can do, we have not seen the first piece yet */ 761 return (1); 762 } 763 /* Collapse any we can */ 764 cnt_added = 0; 765 restart: 766 fsn = control->fsn_included + 1; 767 /* Now what can we add? */ 768 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 769 if (chk->rec.data.fsn_num == fsn) { 770 /* Ok lets add it */ 771 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 772 sctp_add_chk_to_control(control, strm, stcb, asoc, chk); 773 fsn++; 774 cnt_added++; 775 chk = NULL; 776 if (control->end_added) { 777 /* We are done */ 778 if (!TAILQ_EMPTY(&control->reasm)) { 779 /* 780 * Ok we have to move anything left 781 * on the control queue to a new 782 * control. 783 */ 784 sctp_alloc_a_readq(stcb, nc); 785 sctp_build_readq_entry_from_ctl(nc, control); 786 tchk = TAILQ_FIRST(&control->reasm); 787 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 788 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 789 nc->first_frag_seen = 1; 790 nc->fsn_included = tchk->rec.data.fsn_num; 791 nc->data = tchk->data; 792 sctp_mark_non_revokable(asoc, tchk->rec.data.TSN_seq); 793 tchk->data = NULL; 794 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 795 sctp_setup_tail_pointer(nc); 796 tchk = TAILQ_FIRST(&control->reasm); 797 } 798 /* Spin the rest onto the queue */ 799 while (tchk) { 800 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 801 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 802 tchk = TAILQ_FIRST(&control->reasm); 803 } 804 /* 805 * Now lets add it to the queue 806 * after removing control 807 */ 808 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 809 nc->on_strm_q = SCTP_ON_UNORDERED; 810 if (control->on_strm_q) { 811 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 812 control->on_strm_q = 0; 813 } 814 } 815 if (control->pdapi_started) { 816 strm->pd_api_started = 0; 817 control->pdapi_started = 0; 818 } 819 if (control->on_strm_q) { 820 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 821 control->on_strm_q = 0; 822 } 823 if (control->on_read_q == 0) { 824 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 825 &stcb->sctp_socket->so_rcv, control->end_added, 826 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 827 } 828 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 829 if ((nc) && (nc->first_frag_seen)) { 830 /* 831 * Switch to the new guy and 832 * continue 833 */ 834 control = nc; 835 nc = NULL; 836 goto restart; 837 } 838 return (1); 839 } 840 } else { 841 /* Can't add more */ 842 break; 843 } 844 } 845 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 846 strm->pd_api_started = 1; 847 control->pdapi_started = 1; 848 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 849 &stcb->sctp_socket->so_rcv, control->end_added, 850 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 851 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 852 return (0); 853 } else { 854 return (1); 855 } 856 } 857 858 static void 859 sctp_inject_old_data_unordered(struct sctp_tcb *stcb, struct sctp_association *asoc, 860 struct sctp_queued_to_read *control, 861 struct sctp_tmit_chunk *chk, 862 int *abort_flag) 863 { 864 struct sctp_tmit_chunk *at; 865 int inserted = 0; 866 867 /* 868 * Here we need to place the chunk into the control structure sorted 869 * in the correct order. 870 */ 871 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 872 /* Its the very first one. */ 873 SCTPDBG(SCTP_DEBUG_XXX, 874 "chunk is a first fsn: %u becomes fsn_included\n", 875 chk->rec.data.fsn_num); 876 if (control->first_frag_seen) { 877 /* 878 * In old un-ordered we can reassembly on one 879 * control multiple messages. As long as the next 880 * FIRST is greater then the old first (TSN i.e. FSN 881 * wise) 882 */ 883 struct mbuf *tdata; 884 uint32_t tmp; 885 886 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->fsn_included)) { 887 /* 888 * Easy way the start of a new guy beyond 889 * the lowest 890 */ 891 goto place_chunk; 892 } 893 if ((chk->rec.data.fsn_num == control->fsn_included) || 894 (control->pdapi_started)) { 895 /* 896 * Ok this should not happen, if it does we 897 * started the pd-api on the higher TSN 898 * (since the equals part is a TSN failure 899 * it must be that). 900 * 901 * We are completly hosed in that case since I 902 * have no way to recover. This really will 903 * only happen if we can get more TSN's 904 * higher before the pd-api-point. 905 */ 906 sctp_abort_in_reasm(stcb, control, chk, 907 abort_flag, 908 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 909 910 return; 911 } 912 /* 913 * Ok we have two firsts and the one we just got is 914 * smaller than the one we previously placed.. yuck! 915 * We must swap them out. 916 */ 917 /* swap the mbufs */ 918 tdata = control->data; 919 control->data = chk->data; 920 chk->data = tdata; 921 /* Swap the lengths */ 922 tmp = control->length; 923 control->length = chk->send_size; 924 chk->send_size = tmp; 925 /* Fix the FSN included */ 926 tmp = control->fsn_included; 927 control->fsn_included = chk->rec.data.fsn_num; 928 chk->rec.data.fsn_num = tmp; 929 goto place_chunk; 930 } 931 control->first_frag_seen = 1; 932 control->top_fsn = control->fsn_included = chk->rec.data.fsn_num; 933 control->data = chk->data; 934 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 935 chk->data = NULL; 936 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 937 sctp_setup_tail_pointer(control); 938 return; 939 } 940 place_chunk: 941 if (TAILQ_EMPTY(&control->reasm)) { 942 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 943 asoc->size_on_reasm_queue += chk->send_size; 944 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 945 return; 946 } 947 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 948 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) { 949 /* 950 * This one in queue is bigger than the new one, 951 * insert the new one before at. 952 */ 953 asoc->size_on_reasm_queue += chk->send_size; 954 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 955 inserted = 1; 956 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 957 break; 958 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) { 959 /* 960 * They sent a duplicate fsn number. This really 961 * should not happen since the FSN is a TSN and it 962 * should have been dropped earlier. 963 */ 964 if (chk->data) { 965 sctp_m_freem(chk->data); 966 chk->data = NULL; 967 } 968 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 969 sctp_abort_in_reasm(stcb, control, chk, 970 abort_flag, 971 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 972 return; 973 } 974 } 975 if (inserted == 0) { 976 /* Its at the end */ 977 asoc->size_on_reasm_queue += chk->send_size; 978 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 979 control->top_fsn = chk->rec.data.fsn_num; 980 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 981 } 982 } 983 984 static int 985 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, struct sctp_stream_in *strm) 986 { 987 /* 988 * Given a stream, strm, see if any of the SSN's on it that are 989 * fragmented are ready to deliver. If so go ahead and place them on 990 * the read queue. In so placing if we have hit the end, then we 991 * need to remove them from the stream's queue. 992 */ 993 struct sctp_queued_to_read *control, *nctl = NULL; 994 uint32_t next_to_del; 995 uint32_t pd_point; 996 int ret = 0; 997 998 if (stcb->sctp_socket) { 999 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1000 stcb->sctp_ep->partial_delivery_point); 1001 } else { 1002 pd_point = stcb->sctp_ep->partial_delivery_point; 1003 } 1004 control = TAILQ_FIRST(&strm->uno_inqueue); 1005 if ((control) && 1006 (asoc->idata_supported == 0)) { 1007 /* Special handling needed for "old" data format */ 1008 if (sctp_handle_old_data(stcb, asoc, strm, control, pd_point)) { 1009 goto done_un; 1010 } 1011 } 1012 if (strm->pd_api_started) { 1013 /* Can't add more */ 1014 return (0); 1015 } 1016 while (control) { 1017 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1018 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included); 1019 nctl = TAILQ_NEXT(control, next_instrm); 1020 if (control->end_added) { 1021 /* We just put the last bit on */ 1022 if (control->on_strm_q) { 1023 #ifdef INVARIANTS 1024 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1025 panic("Huh control: %p on_q: %d -- not unordered?", 1026 control, control->on_strm_q); 1027 } 1028 #endif 1029 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1030 control->on_strm_q = 0; 1031 } 1032 if (control->on_read_q == 0) { 1033 sctp_add_to_readq(stcb->sctp_ep, stcb, 1034 control, 1035 &stcb->sctp_socket->so_rcv, control->end_added, 1036 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1037 } 1038 } else { 1039 /* Can we do a PD-API for this un-ordered guy? */ 1040 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1041 strm->pd_api_started = 1; 1042 control->pdapi_started = 1; 1043 sctp_add_to_readq(stcb->sctp_ep, stcb, 1044 control, 1045 &stcb->sctp_socket->so_rcv, control->end_added, 1046 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1047 1048 break; 1049 } 1050 } 1051 control = nctl; 1052 } 1053 done_un: 1054 control = TAILQ_FIRST(&strm->inqueue); 1055 if (strm->pd_api_started) { 1056 /* Can't add more */ 1057 return (0); 1058 } 1059 if (control == NULL) { 1060 return (ret); 1061 } 1062 if (strm->last_sequence_delivered == control->sinfo_ssn) { 1063 /* 1064 * Ok the guy at the top was being partially delivered 1065 * completed, so we remove it. Note the pd_api flag was 1066 * taken off when the chunk was merged on in 1067 * sctp_queue_data_for_reasm below. 1068 */ 1069 nctl = TAILQ_NEXT(control, next_instrm); 1070 SCTPDBG(SCTP_DEBUG_XXX, 1071 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1072 control, control->end_added, control->sinfo_ssn, 1073 control->top_fsn, control->fsn_included, 1074 strm->last_sequence_delivered); 1075 if (control->end_added) { 1076 if (control->on_strm_q) { 1077 #ifdef INVARIANTS 1078 if (control->on_strm_q != SCTP_ON_ORDERED) { 1079 panic("Huh control: %p on_q: %d -- not ordered?", 1080 control, control->on_strm_q); 1081 } 1082 #endif 1083 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1084 control->on_strm_q = 0; 1085 } 1086 if (strm->pd_api_started && control->pdapi_started) { 1087 control->pdapi_started = 0; 1088 strm->pd_api_started = 0; 1089 } 1090 if (control->on_read_q == 0) { 1091 sctp_add_to_readq(stcb->sctp_ep, stcb, 1092 control, 1093 &stcb->sctp_socket->so_rcv, control->end_added, 1094 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1095 } 1096 control = nctl; 1097 } 1098 } 1099 if (strm->pd_api_started) { 1100 /* 1101 * Can't add more must have gotten an un-ordered above being 1102 * partially delivered. 1103 */ 1104 return (0); 1105 } 1106 deliver_more: 1107 next_to_del = strm->last_sequence_delivered + 1; 1108 if (control) { 1109 SCTPDBG(SCTP_DEBUG_XXX, 1110 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1111 control, control->end_added, control->sinfo_ssn, control->top_fsn, control->fsn_included, 1112 next_to_del); 1113 nctl = TAILQ_NEXT(control, next_instrm); 1114 if ((control->sinfo_ssn == next_to_del) && 1115 (control->first_frag_seen)) { 1116 int done; 1117 1118 /* Ok we can deliver it onto the stream. */ 1119 if (control->end_added) { 1120 /* We are done with it afterwards */ 1121 if (control->on_strm_q) { 1122 #ifdef INVARIANTS 1123 if (control->on_strm_q != SCTP_ON_ORDERED) { 1124 panic("Huh control: %p on_q: %d -- not ordered?", 1125 control, control->on_strm_q); 1126 } 1127 #endif 1128 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1129 control->on_strm_q = 0; 1130 } 1131 ret++; 1132 } 1133 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1134 /* 1135 * A singleton now slipping through - mark 1136 * it non-revokable too 1137 */ 1138 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1139 } else if (control->end_added == 0) { 1140 /* 1141 * Check if we can defer adding until its 1142 * all there 1143 */ 1144 if ((control->length < pd_point) || (strm->pd_api_started)) { 1145 /* 1146 * Don't need it or cannot add more 1147 * (one being delivered that way) 1148 */ 1149 goto out; 1150 } 1151 } 1152 done = (control->end_added) && (control->last_frag_seen); 1153 if (control->on_read_q == 0) { 1154 sctp_add_to_readq(stcb->sctp_ep, stcb, 1155 control, 1156 &stcb->sctp_socket->so_rcv, control->end_added, 1157 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1158 } 1159 strm->last_sequence_delivered = next_to_del; 1160 if (done) { 1161 control = nctl; 1162 goto deliver_more; 1163 } else { 1164 /* We are now doing PD API */ 1165 strm->pd_api_started = 1; 1166 control->pdapi_started = 1; 1167 } 1168 } 1169 } 1170 out: 1171 return (ret); 1172 } 1173 1174 void 1175 sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1176 struct sctp_stream_in *strm, 1177 struct sctp_tcb *stcb, struct sctp_association *asoc, 1178 struct sctp_tmit_chunk *chk) 1179 { 1180 /* 1181 * Given a control and a chunk, merge the data from the chk onto the 1182 * control and free up the chunk resources. 1183 */ 1184 int i_locked = 0; 1185 1186 if (control->on_read_q) { 1187 /* 1188 * Its being pd-api'd so we must do some locks. 1189 */ 1190 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1191 i_locked = 1; 1192 } 1193 if (control->data == NULL) { 1194 control->data = chk->data; 1195 sctp_setup_tail_pointer(control); 1196 } else { 1197 sctp_add_to_tail_pointer(control, chk->data); 1198 } 1199 control->fsn_included = chk->rec.data.fsn_num; 1200 asoc->size_on_reasm_queue -= chk->send_size; 1201 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1202 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 1203 chk->data = NULL; 1204 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1205 control->first_frag_seen = 1; 1206 } 1207 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1208 /* Its complete */ 1209 if ((control->on_strm_q) && (control->on_read_q)) { 1210 if (control->pdapi_started) { 1211 control->pdapi_started = 0; 1212 strm->pd_api_started = 0; 1213 } 1214 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1215 /* Unordered */ 1216 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1217 control->on_strm_q = 0; 1218 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1219 /* Ordered */ 1220 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1221 control->on_strm_q = 0; 1222 #ifdef INVARIANTS 1223 } else if (control->on_strm_q) { 1224 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1225 control->on_strm_q); 1226 #endif 1227 } 1228 } 1229 control->end_added = 1; 1230 control->last_frag_seen = 1; 1231 } 1232 if (i_locked) { 1233 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1234 } 1235 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1236 } 1237 1238 /* 1239 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1240 * queue, see if anthing can be delivered. If so pull it off (or as much as 1241 * we can. If we run out of space then we must dump what we can and set the 1242 * appropriate flag to say we queued what we could. 1243 */ 1244 static void 1245 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1246 struct sctp_stream_in *strm, 1247 struct sctp_queued_to_read *control, 1248 struct sctp_tmit_chunk *chk, 1249 int created_control, 1250 int *abort_flag, uint32_t tsn) 1251 { 1252 uint32_t next_fsn; 1253 struct sctp_tmit_chunk *at, *nat; 1254 int do_wakeup, unordered; 1255 1256 /* 1257 * For old un-ordered data chunks. 1258 */ 1259 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1260 unordered = 1; 1261 } else { 1262 unordered = 0; 1263 } 1264 /* Must be added to the stream-in queue */ 1265 if (created_control) { 1266 if (sctp_place_control_in_stream(strm, asoc, control)) { 1267 /* Duplicate SSN? */ 1268 clean_up_control(stcb, control); 1269 sctp_abort_in_reasm(stcb, control, chk, 1270 abort_flag, 1271 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1272 return; 1273 } 1274 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1275 /* 1276 * Ok we created this control and now lets validate 1277 * that its legal i.e. there is a B bit set, if not 1278 * and we have up to the cum-ack then its invalid. 1279 */ 1280 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1281 sctp_abort_in_reasm(stcb, control, chk, 1282 abort_flag, 1283 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1284 return; 1285 } 1286 } 1287 } 1288 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1289 sctp_inject_old_data_unordered(stcb, asoc, control, chk, abort_flag); 1290 return; 1291 } 1292 /* 1293 * Ok we must queue the chunk into the reasembly portion: o if its 1294 * the first it goes to the control mbuf. o if its not first but the 1295 * next in sequence it goes to the control, and each succeeding one 1296 * in order also goes. o if its not in order we place it on the list 1297 * in its place. 1298 */ 1299 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1300 /* Its the very first one. */ 1301 SCTPDBG(SCTP_DEBUG_XXX, 1302 "chunk is a first fsn: %u becomes fsn_included\n", 1303 chk->rec.data.fsn_num); 1304 if (control->first_frag_seen) { 1305 /* 1306 * Error on senders part, they either sent us two 1307 * data chunks with FIRST, or they sent two 1308 * un-ordered chunks that were fragmented at the 1309 * same time in the same stream. 1310 */ 1311 sctp_abort_in_reasm(stcb, control, chk, 1312 abort_flag, 1313 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1314 return; 1315 } 1316 control->first_frag_seen = 1; 1317 control->fsn_included = chk->rec.data.fsn_num; 1318 control->data = chk->data; 1319 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 1320 chk->data = NULL; 1321 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1322 sctp_setup_tail_pointer(control); 1323 } else { 1324 /* Place the chunk in our list */ 1325 int inserted = 0; 1326 1327 if (control->last_frag_seen == 0) { 1328 /* Still willing to raise highest FSN seen */ 1329 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) { 1330 SCTPDBG(SCTP_DEBUG_XXX, 1331 "We have a new top_fsn: %u\n", 1332 chk->rec.data.fsn_num); 1333 control->top_fsn = chk->rec.data.fsn_num; 1334 } 1335 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1336 SCTPDBG(SCTP_DEBUG_XXX, 1337 "The last fsn is now in place fsn: %u\n", 1338 chk->rec.data.fsn_num); 1339 control->last_frag_seen = 1; 1340 } 1341 if (asoc->idata_supported || control->first_frag_seen) { 1342 /* 1343 * For IDATA we always check since we know 1344 * that the first fragment is 0. For old 1345 * DATA we have to receive the first before 1346 * we know the first FSN (which is the TSN). 1347 */ 1348 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) { 1349 /* 1350 * We have already delivered up to 1351 * this so its a dup 1352 */ 1353 sctp_abort_in_reasm(stcb, control, chk, 1354 abort_flag, 1355 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1356 return; 1357 } 1358 } 1359 } else { 1360 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1361 /* Second last? huh? */ 1362 SCTPDBG(SCTP_DEBUG_XXX, 1363 "Duplicate last fsn: %u (top: %u) -- abort\n", 1364 chk->rec.data.fsn_num, control->top_fsn); 1365 sctp_abort_in_reasm(stcb, control, 1366 chk, abort_flag, 1367 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1368 return; 1369 } 1370 if (asoc->idata_supported || control->first_frag_seen) { 1371 /* 1372 * For IDATA we always check since we know 1373 * that the first fragment is 0. For old 1374 * DATA we have to receive the first before 1375 * we know the first FSN (which is the TSN). 1376 */ 1377 1378 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn_num)) { 1379 /* 1380 * We have already delivered up to 1381 * this so its a dup 1382 */ 1383 SCTPDBG(SCTP_DEBUG_XXX, 1384 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1385 chk->rec.data.fsn_num, control->fsn_included); 1386 sctp_abort_in_reasm(stcb, control, chk, 1387 abort_flag, 1388 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1389 return; 1390 } 1391 } 1392 /* 1393 * validate not beyond top FSN if we have seen last 1394 * one 1395 */ 1396 if (SCTP_TSN_GT(chk->rec.data.fsn_num, control->top_fsn)) { 1397 SCTPDBG(SCTP_DEBUG_XXX, 1398 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1399 chk->rec.data.fsn_num, 1400 control->top_fsn); 1401 sctp_abort_in_reasm(stcb, control, chk, 1402 abort_flag, 1403 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1404 return; 1405 } 1406 } 1407 /* 1408 * If we reach here, we need to place the new chunk in the 1409 * reassembly for this control. 1410 */ 1411 SCTPDBG(SCTP_DEBUG_XXX, 1412 "chunk is a not first fsn: %u needs to be inserted\n", 1413 chk->rec.data.fsn_num); 1414 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1415 if (SCTP_TSN_GT(at->rec.data.fsn_num, chk->rec.data.fsn_num)) { 1416 /* 1417 * This one in queue is bigger than the new 1418 * one, insert the new one before at. 1419 */ 1420 SCTPDBG(SCTP_DEBUG_XXX, 1421 "Insert it before fsn: %u\n", 1422 at->rec.data.fsn_num); 1423 asoc->size_on_reasm_queue += chk->send_size; 1424 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1425 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1426 inserted = 1; 1427 break; 1428 } else if (at->rec.data.fsn_num == chk->rec.data.fsn_num) { 1429 /* 1430 * Gak, He sent me a duplicate str seq 1431 * number 1432 */ 1433 /* 1434 * foo bar, I guess I will just free this 1435 * new guy, should we abort too? FIX ME 1436 * MAYBE? Or it COULD be that the SSN's have 1437 * wrapped. Maybe I should compare to TSN 1438 * somehow... sigh for now just blow away 1439 * the chunk! 1440 */ 1441 SCTPDBG(SCTP_DEBUG_XXX, 1442 "Duplicate to fsn: %u -- abort\n", 1443 at->rec.data.fsn_num); 1444 sctp_abort_in_reasm(stcb, control, 1445 chk, abort_flag, 1446 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1447 return; 1448 } 1449 } 1450 if (inserted == 0) { 1451 /* Goes on the end */ 1452 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1453 chk->rec.data.fsn_num); 1454 asoc->size_on_reasm_queue += chk->send_size; 1455 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1456 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1457 } 1458 } 1459 /* 1460 * Ok lets see if we can suck any up into the control structure that 1461 * are in seq if it makes sense. 1462 */ 1463 do_wakeup = 0; 1464 /* 1465 * If the first fragment has not been seen there is no sense in 1466 * looking. 1467 */ 1468 if (control->first_frag_seen) { 1469 next_fsn = control->fsn_included + 1; 1470 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1471 if (at->rec.data.fsn_num == next_fsn) { 1472 /* We can add this one now to the control */ 1473 SCTPDBG(SCTP_DEBUG_XXX, 1474 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1475 control, at, 1476 at->rec.data.fsn_num, 1477 next_fsn, control->fsn_included); 1478 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1479 sctp_add_chk_to_control(control, strm, stcb, asoc, at); 1480 if (control->on_read_q) { 1481 do_wakeup = 1; 1482 } 1483 next_fsn++; 1484 if (control->end_added && control->pdapi_started) { 1485 if (strm->pd_api_started) { 1486 strm->pd_api_started = 0; 1487 control->pdapi_started = 0; 1488 } 1489 if (control->on_read_q == 0) { 1490 sctp_add_to_readq(stcb->sctp_ep, stcb, 1491 control, 1492 &stcb->sctp_socket->so_rcv, control->end_added, 1493 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1494 do_wakeup = 1; 1495 } 1496 break; 1497 } 1498 } else { 1499 break; 1500 } 1501 } 1502 } 1503 if (do_wakeup) { 1504 /* Need to wakeup the reader */ 1505 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1506 } 1507 } 1508 1509 static struct sctp_queued_to_read * 1510 find_reasm_entry(struct sctp_stream_in *strm, uint32_t msg_id, int ordered, int old) 1511 { 1512 struct sctp_queued_to_read *control; 1513 1514 if (ordered) { 1515 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1516 if (control->msg_id == msg_id) { 1517 break; 1518 } 1519 } 1520 } else { 1521 if (old) { 1522 control = TAILQ_FIRST(&strm->uno_inqueue); 1523 return (control); 1524 } 1525 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1526 if (control->msg_id == msg_id) { 1527 break; 1528 } 1529 } 1530 } 1531 return (control); 1532 } 1533 1534 static int 1535 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1536 struct mbuf **m, int offset, int chk_length, 1537 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1538 int *break_flag, int last_chunk, uint8_t chtype) 1539 { 1540 /* Process a data chunk */ 1541 /* struct sctp_tmit_chunk *chk; */ 1542 struct sctp_data_chunk *ch; 1543 struct sctp_idata_chunk *nch, chunk_buf; 1544 struct sctp_tmit_chunk *chk; 1545 uint32_t tsn, fsn, gap, msg_id; 1546 struct mbuf *dmbuf; 1547 int the_len; 1548 int need_reasm_check = 0; 1549 uint16_t strmno; 1550 struct mbuf *op_err; 1551 char msg[SCTP_DIAG_INFO_LEN]; 1552 struct sctp_queued_to_read *control = NULL; 1553 uint32_t protocol_id; 1554 uint8_t chunk_flags; 1555 struct sctp_stream_reset_list *liste; 1556 struct sctp_stream_in *strm; 1557 int ordered; 1558 size_t clen; 1559 int created_control = 0; 1560 uint8_t old_data; 1561 1562 chk = NULL; 1563 if (chtype == SCTP_IDATA) { 1564 nch = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1565 sizeof(struct sctp_idata_chunk), (uint8_t *) & chunk_buf); 1566 ch = (struct sctp_data_chunk *)nch; 1567 clen = sizeof(struct sctp_idata_chunk); 1568 tsn = ntohl(ch->dp.tsn); 1569 msg_id = ntohl(nch->dp.msg_id); 1570 if (ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) 1571 fsn = 0; 1572 else 1573 fsn = ntohl(nch->dp.ppid_fsn.fsn); 1574 old_data = 0; 1575 } else { 1576 ch = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1577 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 1578 tsn = ntohl(ch->dp.tsn); 1579 clen = sizeof(struct sctp_data_chunk); 1580 fsn = tsn; 1581 msg_id = (uint32_t) (ntohs(ch->dp.stream_sequence)); 1582 nch = NULL; 1583 old_data = 1; 1584 } 1585 chunk_flags = ch->ch.chunk_flags; 1586 if ((size_t)chk_length == clen) { 1587 /* 1588 * Need to send an abort since we had a empty data chunk. 1589 */ 1590 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn); 1591 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1592 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1593 *abort_flag = 1; 1594 return (0); 1595 } 1596 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1597 asoc->send_sack = 1; 1598 } 1599 protocol_id = ch->dp.protocol_id; 1600 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0); 1601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1602 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1603 } 1604 if (stcb == NULL) { 1605 return (0); 1606 } 1607 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1608 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1609 /* It is a duplicate */ 1610 SCTP_STAT_INCR(sctps_recvdupdata); 1611 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1612 /* Record a dup for the next outbound sack */ 1613 asoc->dup_tsns[asoc->numduptsns] = tsn; 1614 asoc->numduptsns++; 1615 } 1616 asoc->send_sack = 1; 1617 return (0); 1618 } 1619 /* Calculate the number of TSN's between the base and this TSN */ 1620 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1621 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1622 /* Can't hold the bit in the mapping at max array, toss it */ 1623 return (0); 1624 } 1625 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1626 SCTP_TCB_LOCK_ASSERT(stcb); 1627 if (sctp_expand_mapping_array(asoc, gap)) { 1628 /* Can't expand, drop it */ 1629 return (0); 1630 } 1631 } 1632 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1633 *high_tsn = tsn; 1634 } 1635 /* See if we have received this one already */ 1636 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1637 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1638 SCTP_STAT_INCR(sctps_recvdupdata); 1639 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1640 /* Record a dup for the next outbound sack */ 1641 asoc->dup_tsns[asoc->numduptsns] = tsn; 1642 asoc->numduptsns++; 1643 } 1644 asoc->send_sack = 1; 1645 return (0); 1646 } 1647 /* 1648 * Check to see about the GONE flag, duplicates would cause a sack 1649 * to be sent up above 1650 */ 1651 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1652 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1653 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1654 /* 1655 * wait a minute, this guy is gone, there is no longer a 1656 * receiver. Send peer an ABORT! 1657 */ 1658 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1659 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1660 *abort_flag = 1; 1661 return (0); 1662 } 1663 /* 1664 * Now before going further we see if there is room. If NOT then we 1665 * MAY let one through only IF this TSN is the one we are waiting 1666 * for on a partial delivery API. 1667 */ 1668 1669 /* Is the stream valid? */ 1670 strmno = ntohs(ch->dp.stream_id); 1671 1672 if (strmno >= asoc->streamincnt) { 1673 struct sctp_error_invalid_stream *cause; 1674 1675 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1676 0, M_NOWAIT, 1, MT_DATA); 1677 if (op_err != NULL) { 1678 /* add some space up front so prepend will work well */ 1679 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1680 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1681 /* 1682 * Error causes are just param's and this one has 1683 * two back to back phdr, one with the error type 1684 * and size, the other with the streamid and a rsvd 1685 */ 1686 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1687 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1688 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1689 cause->stream_id = ch->dp.stream_id; 1690 cause->reserved = htons(0); 1691 sctp_queue_op_err(stcb, op_err); 1692 } 1693 SCTP_STAT_INCR(sctps_badsid); 1694 SCTP_TCB_LOCK_ASSERT(stcb); 1695 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1696 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1697 asoc->highest_tsn_inside_nr_map = tsn; 1698 } 1699 if (tsn == (asoc->cumulative_tsn + 1)) { 1700 /* Update cum-ack */ 1701 asoc->cumulative_tsn = tsn; 1702 } 1703 return (0); 1704 } 1705 strm = &asoc->strmin[strmno]; 1706 /* 1707 * If its a fragmented message, lets see if we can find the control 1708 * on the reassembly queues. 1709 */ 1710 if ((chtype == SCTP_IDATA) && ((chunk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) { 1711 /* 1712 * The first *must* be fsn 0, and other (middle/end) pieces 1713 * can *not* be fsn 0. 1714 */ 1715 goto err_out; 1716 } 1717 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1718 /* See if we can find the re-assembly entity */ 1719 control = find_reasm_entry(strm, msg_id, ordered, old_data); 1720 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1721 chunk_flags, control); 1722 if (control) { 1723 /* We found something, does it belong? */ 1724 if (ordered && (msg_id != control->sinfo_ssn)) { 1725 err_out: 1726 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1727 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1728 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1729 *abort_flag = 1; 1730 return (0); 1731 } 1732 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1733 /* 1734 * We can't have a switched order with an 1735 * unordered chunk 1736 */ 1737 goto err_out; 1738 } 1739 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1740 /* 1741 * We can't have a switched unordered with a 1742 * ordered chunk 1743 */ 1744 goto err_out; 1745 } 1746 } 1747 } else { 1748 /* 1749 * Its a complete segment. Lets validate we don't have a 1750 * re-assembly going on with the same Stream/Seq (for 1751 * ordered) or in the same Stream for unordered. 1752 */ 1753 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for msg in case we have dup\n", 1754 chunk_flags); 1755 if (find_reasm_entry(strm, msg_id, ordered, old_data)) { 1756 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on msg_id: %u\n", 1757 chunk_flags, 1758 msg_id); 1759 1760 goto err_out; 1761 } 1762 } 1763 /* now do the tests */ 1764 if (((asoc->cnt_on_all_streams + 1765 asoc->cnt_on_reasm_queue + 1766 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1767 (((int)asoc->my_rwnd) <= 0)) { 1768 /* 1769 * When we have NO room in the rwnd we check to make sure 1770 * the reader is doing its job... 1771 */ 1772 if (stcb->sctp_socket->so_rcv.sb_cc) { 1773 /* some to read, wake-up */ 1774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1775 struct socket *so; 1776 1777 so = SCTP_INP_SO(stcb->sctp_ep); 1778 atomic_add_int(&stcb->asoc.refcnt, 1); 1779 SCTP_TCB_UNLOCK(stcb); 1780 SCTP_SOCKET_LOCK(so, 1); 1781 SCTP_TCB_LOCK(stcb); 1782 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1783 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1784 /* assoc was freed while we were unlocked */ 1785 SCTP_SOCKET_UNLOCK(so, 1); 1786 return (0); 1787 } 1788 #endif 1789 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1790 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1791 SCTP_SOCKET_UNLOCK(so, 1); 1792 #endif 1793 } 1794 /* now is it in the mapping array of what we have accepted? */ 1795 if (nch == NULL) { 1796 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1797 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1798 /* Nope not in the valid range dump it */ 1799 dump_packet: 1800 sctp_set_rwnd(stcb, asoc); 1801 if ((asoc->cnt_on_all_streams + 1802 asoc->cnt_on_reasm_queue + 1803 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1804 SCTP_STAT_INCR(sctps_datadropchklmt); 1805 } else { 1806 SCTP_STAT_INCR(sctps_datadroprwnd); 1807 } 1808 *break_flag = 1; 1809 return (0); 1810 } 1811 } else { 1812 if (control == NULL) { 1813 goto dump_packet; 1814 } 1815 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1816 goto dump_packet; 1817 } 1818 } 1819 } 1820 #ifdef SCTP_ASOCLOG_OF_TSNS 1821 SCTP_TCB_LOCK_ASSERT(stcb); 1822 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1823 asoc->tsn_in_at = 0; 1824 asoc->tsn_in_wrapped = 1; 1825 } 1826 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1827 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1828 asoc->in_tsnlog[asoc->tsn_in_at].seq = msg_id; 1829 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1830 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1831 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1832 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1833 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1834 asoc->tsn_in_at++; 1835 #endif 1836 /* 1837 * Before we continue lets validate that we are not being fooled by 1838 * an evil attacker. We can only have Nk chunks based on our TSN 1839 * spread allowed by the mapping array N * 8 bits, so there is no 1840 * way our stream sequence numbers could have wrapped. We of course 1841 * only validate the FIRST fragment so the bit must be set. 1842 */ 1843 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1844 (TAILQ_EMPTY(&asoc->resetHead)) && 1845 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1846 SCTP_MSGID_GE(old_data, asoc->strmin[strmno].last_sequence_delivered, msg_id)) { 1847 /* The incoming sseq is behind where we last delivered? */ 1848 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 1849 msg_id, asoc->strmin[strmno].last_sequence_delivered); 1850 1851 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 1852 asoc->strmin[strmno].last_sequence_delivered, 1853 tsn, strmno, msg_id); 1854 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1855 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1856 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1857 *abort_flag = 1; 1858 return (0); 1859 } 1860 /************************************ 1861 * From here down we may find ch-> invalid 1862 * so its a good idea NOT to use it. 1863 *************************************/ 1864 if (nch) { 1865 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 1866 } else { 1867 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1868 } 1869 if (last_chunk == 0) { 1870 if (nch) { 1871 dmbuf = SCTP_M_COPYM(*m, 1872 (offset + sizeof(struct sctp_idata_chunk)), 1873 the_len, M_NOWAIT); 1874 } else { 1875 dmbuf = SCTP_M_COPYM(*m, 1876 (offset + sizeof(struct sctp_data_chunk)), 1877 the_len, M_NOWAIT); 1878 } 1879 #ifdef SCTP_MBUF_LOGGING 1880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1881 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 1882 } 1883 #endif 1884 } else { 1885 /* We can steal the last chunk */ 1886 int l_len; 1887 1888 dmbuf = *m; 1889 /* lop off the top part */ 1890 if (nch) { 1891 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 1892 } else { 1893 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1894 } 1895 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1896 l_len = SCTP_BUF_LEN(dmbuf); 1897 } else { 1898 /* 1899 * need to count up the size hopefully does not hit 1900 * this to often :-0 1901 */ 1902 struct mbuf *lat; 1903 1904 l_len = 0; 1905 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 1906 l_len += SCTP_BUF_LEN(lat); 1907 } 1908 } 1909 if (l_len > the_len) { 1910 /* Trim the end round bytes off too */ 1911 m_adj(dmbuf, -(l_len - the_len)); 1912 } 1913 } 1914 if (dmbuf == NULL) { 1915 SCTP_STAT_INCR(sctps_nomem); 1916 return (0); 1917 } 1918 /* 1919 * Now no matter what we need a control, get one if we don't have 1920 * one (we may have gotten it above when we found the message was 1921 * fragmented 1922 */ 1923 if (control == NULL) { 1924 sctp_alloc_a_readq(stcb, control); 1925 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1926 protocol_id, 1927 strmno, msg_id, 1928 chunk_flags, 1929 NULL, fsn, msg_id); 1930 if (control == NULL) { 1931 SCTP_STAT_INCR(sctps_nomem); 1932 return (0); 1933 } 1934 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1935 control->data = dmbuf; 1936 control->tail_mbuf = NULL; 1937 control->end_added = control->last_frag_seen = control->first_frag_seen = 1; 1938 control->top_fsn = control->fsn_included = fsn; 1939 } 1940 created_control = 1; 1941 } 1942 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d msgid: %u control: %p\n", 1943 chunk_flags, ordered, msg_id, control); 1944 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1945 TAILQ_EMPTY(&asoc->resetHead) && 1946 ((ordered == 0) || 1947 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == msg_id && 1948 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1949 /* Candidate for express delivery */ 1950 /* 1951 * Its not fragmented, No PD-API is up, Nothing in the 1952 * delivery queue, Its un-ordered OR ordered and the next to 1953 * deliver AND nothing else is stuck on the stream queue, 1954 * And there is room for it in the socket buffer. Lets just 1955 * stuff it up the buffer.... 1956 */ 1957 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1958 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1959 asoc->highest_tsn_inside_nr_map = tsn; 1960 } 1961 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (msg_id: %u)\n", 1962 control, msg_id); 1963 1964 sctp_add_to_readq(stcb->sctp_ep, stcb, 1965 control, &stcb->sctp_socket->so_rcv, 1966 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1967 1968 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1969 /* for ordered, bump what we delivered */ 1970 strm->last_sequence_delivered++; 1971 } 1972 SCTP_STAT_INCR(sctps_recvexpress); 1973 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1974 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, 1975 SCTP_STR_LOG_FROM_EXPRS_DEL); 1976 } 1977 control = NULL; 1978 goto finish_express_del; 1979 } 1980 /* Now will we need a chunk too? */ 1981 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1982 sctp_alloc_a_chunk(stcb, chk); 1983 if (chk == NULL) { 1984 /* No memory so we drop the chunk */ 1985 SCTP_STAT_INCR(sctps_nomem); 1986 if (last_chunk == 0) { 1987 /* we copied it, free the copy */ 1988 sctp_m_freem(dmbuf); 1989 } 1990 return (0); 1991 } 1992 chk->rec.data.TSN_seq = tsn; 1993 chk->no_fr_allowed = 0; 1994 chk->rec.data.fsn_num = fsn; 1995 chk->rec.data.stream_seq = msg_id; 1996 chk->rec.data.stream_number = strmno; 1997 chk->rec.data.payloadtype = protocol_id; 1998 chk->rec.data.context = stcb->asoc.context; 1999 chk->rec.data.doing_fast_retransmit = 0; 2000 chk->rec.data.rcv_flags = chunk_flags; 2001 chk->asoc = asoc; 2002 chk->send_size = the_len; 2003 chk->whoTo = net; 2004 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (msg_id: %u)\n", 2005 chk, 2006 control, msg_id); 2007 atomic_add_int(&net->ref_count, 1); 2008 chk->data = dmbuf; 2009 } 2010 /* Set the appropriate TSN mark */ 2011 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2012 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2013 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2014 asoc->highest_tsn_inside_nr_map = tsn; 2015 } 2016 } else { 2017 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2018 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2019 asoc->highest_tsn_inside_map = tsn; 2020 } 2021 } 2022 /* Now is it complete (i.e. not fragmented)? */ 2023 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2024 /* 2025 * Special check for when streams are resetting. We could be 2026 * more smart about this and check the actual stream to see 2027 * if it is not being reset.. that way we would not create a 2028 * HOLB when amongst streams being reset and those not being 2029 * reset. 2030 * 2031 */ 2032 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2033 SCTP_TSN_GT(tsn, liste->tsn)) { 2034 /* 2035 * yep its past where we need to reset... go ahead 2036 * and queue it. 2037 */ 2038 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2039 /* first one on */ 2040 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2041 } else { 2042 struct sctp_queued_to_read *ctlOn, *nctlOn; 2043 unsigned char inserted = 0; 2044 2045 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) { 2046 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) { 2047 2048 continue; 2049 } else { 2050 /* found it */ 2051 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2052 inserted = 1; 2053 break; 2054 } 2055 } 2056 if (inserted == 0) { 2057 /* 2058 * must be put at end, use prevP 2059 * (all setup from loop) to setup 2060 * nextP. 2061 */ 2062 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2063 } 2064 } 2065 goto finish_express_del; 2066 } 2067 if (chunk_flags & SCTP_DATA_UNORDERED) { 2068 /* queue directly into socket buffer */ 2069 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p msg_id: %u\n", 2070 control, msg_id); 2071 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2072 sctp_add_to_readq(stcb->sctp_ep, stcb, 2073 control, 2074 &stcb->sctp_socket->so_rcv, 1, 2075 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2076 2077 } else { 2078 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering msg_id: %u\n", control, 2079 msg_id); 2080 sctp_queue_data_to_stream(stcb, strm, asoc, control, abort_flag, &need_reasm_check); 2081 if (*abort_flag) { 2082 if (last_chunk) { 2083 *m = NULL; 2084 } 2085 return (0); 2086 } 2087 } 2088 goto finish_express_del; 2089 } 2090 /* If we reach here its a reassembly */ 2091 need_reasm_check = 1; 2092 SCTPDBG(SCTP_DEBUG_XXX, 2093 "Queue data to stream for reasm control: %p msg_id: %u\n", 2094 control, msg_id); 2095 sctp_queue_data_for_reasm(stcb, asoc, strm, control, chk, created_control, abort_flag, tsn); 2096 if (*abort_flag) { 2097 /* 2098 * the assoc is now gone and chk was put onto the reasm 2099 * queue, which has all been freed. 2100 */ 2101 if (last_chunk) { 2102 *m = NULL; 2103 } 2104 return (0); 2105 } 2106 finish_express_del: 2107 /* Here we tidy up things */ 2108 if (tsn == (asoc->cumulative_tsn + 1)) { 2109 /* Update cum-ack */ 2110 asoc->cumulative_tsn = tsn; 2111 } 2112 if (last_chunk) { 2113 *m = NULL; 2114 } 2115 if (ordered) { 2116 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2117 } else { 2118 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2119 } 2120 SCTP_STAT_INCR(sctps_recvdata); 2121 /* Set it present please */ 2122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2123 sctp_log_strm_del_alt(stcb, tsn, msg_id, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2124 } 2125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2126 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2127 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2128 } 2129 /* check the special flag for stream resets */ 2130 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2131 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2132 /* 2133 * we have finished working through the backlogged TSN's now 2134 * time to reset streams. 1: call reset function. 2: free 2135 * pending_reply space 3: distribute any chunks in 2136 * pending_reply_queue. 2137 */ 2138 struct sctp_queued_to_read *ctl, *nctl; 2139 2140 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2141 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2142 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2143 SCTP_FREE(liste, SCTP_M_STRESET); 2144 /* sa_ignore FREED_MEMORY */ 2145 liste = TAILQ_FIRST(&asoc->resetHead); 2146 if (TAILQ_EMPTY(&asoc->resetHead)) { 2147 /* All can be removed */ 2148 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 2149 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2150 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check); 2151 if (*abort_flag) { 2152 return (0); 2153 } 2154 } 2155 } else { 2156 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 2157 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) { 2158 break; 2159 } 2160 /* 2161 * if ctl->sinfo_tsn is <= liste->tsn we can 2162 * process it which is the NOT of 2163 * ctl->sinfo_tsn > liste->tsn 2164 */ 2165 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2166 sctp_queue_data_to_stream(stcb, strm, asoc, ctl, abort_flag, &need_reasm_check); 2167 if (*abort_flag) { 2168 return (0); 2169 } 2170 } 2171 } 2172 /* 2173 * Now service re-assembly to pick up anything that has been 2174 * held on reassembly queue? 2175 */ 2176 (void)sctp_deliver_reasm_check(stcb, asoc, strm); 2177 need_reasm_check = 0; 2178 } 2179 if (need_reasm_check) { 2180 /* Another one waits ? */ 2181 (void)sctp_deliver_reasm_check(stcb, asoc, strm); 2182 } 2183 return (1); 2184 } 2185 2186 static const int8_t sctp_map_lookup_tab[256] = { 2187 0, 1, 0, 2, 0, 1, 0, 3, 2188 0, 1, 0, 2, 0, 1, 0, 4, 2189 0, 1, 0, 2, 0, 1, 0, 3, 2190 0, 1, 0, 2, 0, 1, 0, 5, 2191 0, 1, 0, 2, 0, 1, 0, 3, 2192 0, 1, 0, 2, 0, 1, 0, 4, 2193 0, 1, 0, 2, 0, 1, 0, 3, 2194 0, 1, 0, 2, 0, 1, 0, 6, 2195 0, 1, 0, 2, 0, 1, 0, 3, 2196 0, 1, 0, 2, 0, 1, 0, 4, 2197 0, 1, 0, 2, 0, 1, 0, 3, 2198 0, 1, 0, 2, 0, 1, 0, 5, 2199 0, 1, 0, 2, 0, 1, 0, 3, 2200 0, 1, 0, 2, 0, 1, 0, 4, 2201 0, 1, 0, 2, 0, 1, 0, 3, 2202 0, 1, 0, 2, 0, 1, 0, 7, 2203 0, 1, 0, 2, 0, 1, 0, 3, 2204 0, 1, 0, 2, 0, 1, 0, 4, 2205 0, 1, 0, 2, 0, 1, 0, 3, 2206 0, 1, 0, 2, 0, 1, 0, 5, 2207 0, 1, 0, 2, 0, 1, 0, 3, 2208 0, 1, 0, 2, 0, 1, 0, 4, 2209 0, 1, 0, 2, 0, 1, 0, 3, 2210 0, 1, 0, 2, 0, 1, 0, 6, 2211 0, 1, 0, 2, 0, 1, 0, 3, 2212 0, 1, 0, 2, 0, 1, 0, 4, 2213 0, 1, 0, 2, 0, 1, 0, 3, 2214 0, 1, 0, 2, 0, 1, 0, 5, 2215 0, 1, 0, 2, 0, 1, 0, 3, 2216 0, 1, 0, 2, 0, 1, 0, 4, 2217 0, 1, 0, 2, 0, 1, 0, 3, 2218 0, 1, 0, 2, 0, 1, 0, 8 2219 }; 2220 2221 2222 void 2223 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2224 { 2225 /* 2226 * Now we also need to check the mapping array in a couple of ways. 2227 * 1) Did we move the cum-ack point? 2228 * 2229 * When you first glance at this you might think that all entries that 2230 * make up the position of the cum-ack would be in the nr-mapping 2231 * array only.. i.e. things up to the cum-ack are always 2232 * deliverable. Thats true with one exception, when its a fragmented 2233 * message we may not deliver the data until some threshold (or all 2234 * of it) is in place. So we must OR the nr_mapping_array and 2235 * mapping_array to get a true picture of the cum-ack. 2236 */ 2237 struct sctp_association *asoc; 2238 int at; 2239 uint8_t val; 2240 int slide_from, slide_end, lgap, distance; 2241 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2242 2243 asoc = &stcb->asoc; 2244 2245 old_cumack = asoc->cumulative_tsn; 2246 old_base = asoc->mapping_array_base_tsn; 2247 old_highest = asoc->highest_tsn_inside_map; 2248 /* 2249 * We could probably improve this a small bit by calculating the 2250 * offset of the current cum-ack as the starting point. 2251 */ 2252 at = 0; 2253 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2254 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2255 if (val == 0xff) { 2256 at += 8; 2257 } else { 2258 /* there is a 0 bit */ 2259 at += sctp_map_lookup_tab[val]; 2260 break; 2261 } 2262 } 2263 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2264 2265 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2266 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2267 #ifdef INVARIANTS 2268 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2269 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2270 #else 2271 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2272 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2273 sctp_print_mapping_array(asoc); 2274 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2275 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2276 } 2277 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2278 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2279 #endif 2280 } 2281 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2282 highest_tsn = asoc->highest_tsn_inside_nr_map; 2283 } else { 2284 highest_tsn = asoc->highest_tsn_inside_map; 2285 } 2286 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2287 /* The complete array was completed by a single FR */ 2288 /* highest becomes the cum-ack */ 2289 int clr; 2290 2291 #ifdef INVARIANTS 2292 unsigned int i; 2293 2294 #endif 2295 2296 /* clear the array */ 2297 clr = ((at + 7) >> 3); 2298 if (clr > asoc->mapping_array_size) { 2299 clr = asoc->mapping_array_size; 2300 } 2301 memset(asoc->mapping_array, 0, clr); 2302 memset(asoc->nr_mapping_array, 0, clr); 2303 #ifdef INVARIANTS 2304 for (i = 0; i < asoc->mapping_array_size; i++) { 2305 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2306 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2307 sctp_print_mapping_array(asoc); 2308 } 2309 } 2310 #endif 2311 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2312 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2313 } else if (at >= 8) { 2314 /* we can slide the mapping array down */ 2315 /* slide_from holds where we hit the first NON 0xff byte */ 2316 2317 /* 2318 * now calculate the ceiling of the move using our highest 2319 * TSN value 2320 */ 2321 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2322 slide_end = (lgap >> 3); 2323 if (slide_end < slide_from) { 2324 sctp_print_mapping_array(asoc); 2325 #ifdef INVARIANTS 2326 panic("impossible slide"); 2327 #else 2328 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2329 lgap, slide_end, slide_from, at); 2330 return; 2331 #endif 2332 } 2333 if (slide_end > asoc->mapping_array_size) { 2334 #ifdef INVARIANTS 2335 panic("would overrun buffer"); 2336 #else 2337 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2338 asoc->mapping_array_size, slide_end); 2339 slide_end = asoc->mapping_array_size; 2340 #endif 2341 } 2342 distance = (slide_end - slide_from) + 1; 2343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2344 sctp_log_map(old_base, old_cumack, old_highest, 2345 SCTP_MAP_PREPARE_SLIDE); 2346 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2347 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2348 } 2349 if (distance + slide_from > asoc->mapping_array_size || 2350 distance < 0) { 2351 /* 2352 * Here we do NOT slide forward the array so that 2353 * hopefully when more data comes in to fill it up 2354 * we will be able to slide it forward. Really I 2355 * don't think this should happen :-0 2356 */ 2357 2358 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2359 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2360 (uint32_t) asoc->mapping_array_size, 2361 SCTP_MAP_SLIDE_NONE); 2362 } 2363 } else { 2364 int ii; 2365 2366 for (ii = 0; ii < distance; ii++) { 2367 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2368 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2369 2370 } 2371 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2372 asoc->mapping_array[ii] = 0; 2373 asoc->nr_mapping_array[ii] = 0; 2374 } 2375 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2376 asoc->highest_tsn_inside_map += (slide_from << 3); 2377 } 2378 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2379 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2380 } 2381 asoc->mapping_array_base_tsn += (slide_from << 3); 2382 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2383 sctp_log_map(asoc->mapping_array_base_tsn, 2384 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2385 SCTP_MAP_SLIDE_RESULT); 2386 } 2387 } 2388 } 2389 } 2390 2391 void 2392 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2393 { 2394 struct sctp_association *asoc; 2395 uint32_t highest_tsn; 2396 2397 asoc = &stcb->asoc; 2398 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2399 highest_tsn = asoc->highest_tsn_inside_nr_map; 2400 } else { 2401 highest_tsn = asoc->highest_tsn_inside_map; 2402 } 2403 2404 /* 2405 * Now we need to see if we need to queue a sack or just start the 2406 * timer (if allowed). 2407 */ 2408 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2409 /* 2410 * Ok special case, in SHUTDOWN-SENT case. here we maker 2411 * sure SACK timer is off and instead send a SHUTDOWN and a 2412 * SACK 2413 */ 2414 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2415 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2416 stcb->sctp_ep, stcb, NULL, 2417 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2418 } 2419 sctp_send_shutdown(stcb, 2420 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2421 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2422 } else { 2423 int is_a_gap; 2424 2425 /* is there a gap now ? */ 2426 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2427 2428 /* 2429 * CMT DAC algorithm: increase number of packets received 2430 * since last ack 2431 */ 2432 stcb->asoc.cmt_dac_pkts_rcvd++; 2433 2434 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2435 * SACK */ 2436 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2437 * longer is one */ 2438 (stcb->asoc.numduptsns) || /* we have dup's */ 2439 (is_a_gap) || /* is still a gap */ 2440 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2441 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2442 ) { 2443 2444 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2445 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2446 (stcb->asoc.send_sack == 0) && 2447 (stcb->asoc.numduptsns == 0) && 2448 (stcb->asoc.delayed_ack) && 2449 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2450 2451 /* 2452 * CMT DAC algorithm: With CMT, delay acks 2453 * even in the face of 2454 * 2455 * reordering. Therefore, if acks that do not 2456 * have to be sent because of the above 2457 * reasons, will be delayed. That is, acks 2458 * that would have been sent due to gap 2459 * reports will be delayed with DAC. Start 2460 * the delayed ack timer. 2461 */ 2462 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2463 stcb->sctp_ep, stcb, NULL); 2464 } else { 2465 /* 2466 * Ok we must build a SACK since the timer 2467 * is pending, we got our first packet OR 2468 * there are gaps or duplicates. 2469 */ 2470 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2471 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2472 } 2473 } else { 2474 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2475 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2476 stcb->sctp_ep, stcb, NULL); 2477 } 2478 } 2479 } 2480 } 2481 2482 int 2483 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2484 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2485 struct sctp_nets *net, uint32_t * high_tsn) 2486 { 2487 struct sctp_chunkhdr *ch, chunk_buf; 2488 struct sctp_association *asoc; 2489 int num_chunks = 0; /* number of control chunks processed */ 2490 int stop_proc = 0; 2491 int chk_length, break_flag, last_chunk; 2492 int abort_flag = 0, was_a_gap; 2493 struct mbuf *m; 2494 uint32_t highest_tsn; 2495 2496 /* set the rwnd */ 2497 sctp_set_rwnd(stcb, &stcb->asoc); 2498 2499 m = *mm; 2500 SCTP_TCB_LOCK_ASSERT(stcb); 2501 asoc = &stcb->asoc; 2502 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2503 highest_tsn = asoc->highest_tsn_inside_nr_map; 2504 } else { 2505 highest_tsn = asoc->highest_tsn_inside_map; 2506 } 2507 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2508 /* 2509 * setup where we got the last DATA packet from for any SACK that 2510 * may need to go out. Don't bump the net. This is done ONLY when a 2511 * chunk is assigned. 2512 */ 2513 asoc->last_data_chunk_from = net; 2514 2515 /*- 2516 * Now before we proceed we must figure out if this is a wasted 2517 * cluster... i.e. it is a small packet sent in and yet the driver 2518 * underneath allocated a full cluster for it. If so we must copy it 2519 * to a smaller mbuf and free up the cluster mbuf. This will help 2520 * with cluster starvation. Note for __Panda__ we don't do this 2521 * since it has clusters all the way down to 64 bytes. 2522 */ 2523 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2524 /* we only handle mbufs that are singletons.. not chains */ 2525 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2526 if (m) { 2527 /* ok lets see if we can copy the data up */ 2528 caddr_t *from, *to; 2529 2530 /* get the pointers and copy */ 2531 to = mtod(m, caddr_t *); 2532 from = mtod((*mm), caddr_t *); 2533 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2534 /* copy the length and free up the old */ 2535 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2536 sctp_m_freem(*mm); 2537 /* success, back copy */ 2538 *mm = m; 2539 } else { 2540 /* We are in trouble in the mbuf world .. yikes */ 2541 m = *mm; 2542 } 2543 } 2544 /* get pointer to the first chunk header */ 2545 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2546 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf); 2547 if (ch == NULL) { 2548 return (1); 2549 } 2550 /* 2551 * process all DATA chunks... 2552 */ 2553 *high_tsn = asoc->cumulative_tsn; 2554 break_flag = 0; 2555 asoc->data_pkts_seen++; 2556 while (stop_proc == 0) { 2557 /* validate chunk length */ 2558 chk_length = ntohs(ch->chunk_length); 2559 if (length - *offset < chk_length) { 2560 /* all done, mutulated chunk */ 2561 stop_proc = 1; 2562 continue; 2563 } 2564 if ((asoc->idata_supported == 1) && 2565 (ch->chunk_type == SCTP_DATA)) { 2566 struct mbuf *op_err; 2567 char msg[SCTP_DIAG_INFO_LEN]; 2568 2569 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2570 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2571 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2572 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2573 return (2); 2574 } 2575 if ((asoc->idata_supported == 0) && 2576 (ch->chunk_type == SCTP_IDATA)) { 2577 struct mbuf *op_err; 2578 char msg[SCTP_DIAG_INFO_LEN]; 2579 2580 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2581 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2582 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2583 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2584 return (2); 2585 } 2586 if ((ch->chunk_type == SCTP_DATA) || 2587 (ch->chunk_type == SCTP_IDATA)) { 2588 int clen; 2589 2590 if (ch->chunk_type == SCTP_DATA) { 2591 clen = sizeof(struct sctp_data_chunk); 2592 } else { 2593 clen = sizeof(struct sctp_idata_chunk); 2594 } 2595 if (chk_length < clen) { 2596 /* 2597 * Need to send an abort since we had a 2598 * invalid data chunk. 2599 */ 2600 struct mbuf *op_err; 2601 char msg[SCTP_DIAG_INFO_LEN]; 2602 2603 snprintf(msg, sizeof(msg), "DATA chunk of length %d", 2604 chk_length); 2605 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2606 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2607 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2608 return (2); 2609 } 2610 #ifdef SCTP_AUDITING_ENABLED 2611 sctp_audit_log(0xB1, 0); 2612 #endif 2613 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2614 last_chunk = 1; 2615 } else { 2616 last_chunk = 0; 2617 } 2618 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2619 chk_length, net, high_tsn, &abort_flag, &break_flag, 2620 last_chunk, ch->chunk_type)) { 2621 num_chunks++; 2622 } 2623 if (abort_flag) 2624 return (2); 2625 2626 if (break_flag) { 2627 /* 2628 * Set because of out of rwnd space and no 2629 * drop rep space left. 2630 */ 2631 stop_proc = 1; 2632 continue; 2633 } 2634 } else { 2635 /* not a data chunk in the data region */ 2636 switch (ch->chunk_type) { 2637 case SCTP_INITIATION: 2638 case SCTP_INITIATION_ACK: 2639 case SCTP_SELECTIVE_ACK: 2640 case SCTP_NR_SELECTIVE_ACK: 2641 case SCTP_HEARTBEAT_REQUEST: 2642 case SCTP_HEARTBEAT_ACK: 2643 case SCTP_ABORT_ASSOCIATION: 2644 case SCTP_SHUTDOWN: 2645 case SCTP_SHUTDOWN_ACK: 2646 case SCTP_OPERATION_ERROR: 2647 case SCTP_COOKIE_ECHO: 2648 case SCTP_COOKIE_ACK: 2649 case SCTP_ECN_ECHO: 2650 case SCTP_ECN_CWR: 2651 case SCTP_SHUTDOWN_COMPLETE: 2652 case SCTP_AUTHENTICATION: 2653 case SCTP_ASCONF_ACK: 2654 case SCTP_PACKET_DROPPED: 2655 case SCTP_STREAM_RESET: 2656 case SCTP_FORWARD_CUM_TSN: 2657 case SCTP_ASCONF: 2658 { 2659 /* 2660 * Now, what do we do with KNOWN 2661 * chunks that are NOT in the right 2662 * place? 2663 * 2664 * For now, I do nothing but ignore 2665 * them. We may later want to add 2666 * sysctl stuff to switch out and do 2667 * either an ABORT() or possibly 2668 * process them. 2669 */ 2670 struct mbuf *op_err; 2671 char msg[SCTP_DIAG_INFO_LEN]; 2672 2673 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2674 ch->chunk_type); 2675 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2676 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2677 return (2); 2678 } 2679 default: 2680 /* unknown chunk type, use bit rules */ 2681 if (ch->chunk_type & 0x40) { 2682 /* Add a error report to the queue */ 2683 struct mbuf *op_err; 2684 struct sctp_gen_error_cause *cause; 2685 2686 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2687 0, M_NOWAIT, 1, MT_DATA); 2688 if (op_err != NULL) { 2689 cause = mtod(op_err, struct sctp_gen_error_cause *); 2690 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2691 cause->length = htons((uint16_t) (chk_length + sizeof(struct sctp_gen_error_cause))); 2692 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2693 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2694 if (SCTP_BUF_NEXT(op_err) != NULL) { 2695 sctp_queue_op_err(stcb, op_err); 2696 } else { 2697 sctp_m_freem(op_err); 2698 } 2699 } 2700 } 2701 if ((ch->chunk_type & 0x80) == 0) { 2702 /* discard the rest of this packet */ 2703 stop_proc = 1; 2704 } /* else skip this bad chunk and 2705 * continue... */ 2706 break; 2707 } /* switch of chunk type */ 2708 } 2709 *offset += SCTP_SIZE32(chk_length); 2710 if ((*offset >= length) || stop_proc) { 2711 /* no more data left in the mbuf chain */ 2712 stop_proc = 1; 2713 continue; 2714 } 2715 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2716 sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf); 2717 if (ch == NULL) { 2718 *offset = length; 2719 stop_proc = 1; 2720 continue; 2721 } 2722 } 2723 if (break_flag) { 2724 /* 2725 * we need to report rwnd overrun drops. 2726 */ 2727 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2728 } 2729 if (num_chunks) { 2730 /* 2731 * Did we get data, if so update the time for auto-close and 2732 * give peer credit for being alive. 2733 */ 2734 SCTP_STAT_INCR(sctps_recvpktwithdata); 2735 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2736 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2737 stcb->asoc.overall_error_count, 2738 0, 2739 SCTP_FROM_SCTP_INDATA, 2740 __LINE__); 2741 } 2742 stcb->asoc.overall_error_count = 0; 2743 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2744 } 2745 /* now service all of the reassm queue if needed */ 2746 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2747 /* Assure that we ack right away */ 2748 stcb->asoc.send_sack = 1; 2749 } 2750 /* Start a sack timer or QUEUE a SACK for sending */ 2751 sctp_sack_check(stcb, was_a_gap); 2752 return (0); 2753 } 2754 2755 static int 2756 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2757 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2758 int *num_frs, 2759 uint32_t * biggest_newly_acked_tsn, 2760 uint32_t * this_sack_lowest_newack, 2761 int *rto_ok) 2762 { 2763 struct sctp_tmit_chunk *tp1; 2764 unsigned int theTSN; 2765 int j, wake_him = 0, circled = 0; 2766 2767 /* Recover the tp1 we last saw */ 2768 tp1 = *p_tp1; 2769 if (tp1 == NULL) { 2770 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2771 } 2772 for (j = frag_strt; j <= frag_end; j++) { 2773 theTSN = j + last_tsn; 2774 while (tp1) { 2775 if (tp1->rec.data.doing_fast_retransmit) 2776 (*num_frs) += 1; 2777 2778 /*- 2779 * CMT: CUCv2 algorithm. For each TSN being 2780 * processed from the sent queue, track the 2781 * next expected pseudo-cumack, or 2782 * rtx_pseudo_cumack, if required. Separate 2783 * cumack trackers for first transmissions, 2784 * and retransmissions. 2785 */ 2786 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2787 (tp1->whoTo->find_pseudo_cumack == 1) && 2788 (tp1->snd_count == 1)) { 2789 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2790 tp1->whoTo->find_pseudo_cumack = 0; 2791 } 2792 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2793 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2794 (tp1->snd_count > 1)) { 2795 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2796 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2797 } 2798 if (tp1->rec.data.TSN_seq == theTSN) { 2799 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2800 /*- 2801 * must be held until 2802 * cum-ack passes 2803 */ 2804 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2805 /*- 2806 * If it is less than RESEND, it is 2807 * now no-longer in flight. 2808 * Higher values may already be set 2809 * via previous Gap Ack Blocks... 2810 * i.e. ACKED or RESEND. 2811 */ 2812 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2813 *biggest_newly_acked_tsn)) { 2814 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2815 } 2816 /*- 2817 * CMT: SFR algo (and HTNA) - set 2818 * saw_newack to 1 for dest being 2819 * newly acked. update 2820 * this_sack_highest_newack if 2821 * appropriate. 2822 */ 2823 if (tp1->rec.data.chunk_was_revoked == 0) 2824 tp1->whoTo->saw_newack = 1; 2825 2826 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2827 tp1->whoTo->this_sack_highest_newack)) { 2828 tp1->whoTo->this_sack_highest_newack = 2829 tp1->rec.data.TSN_seq; 2830 } 2831 /*- 2832 * CMT DAC algo: also update 2833 * this_sack_lowest_newack 2834 */ 2835 if (*this_sack_lowest_newack == 0) { 2836 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2837 sctp_log_sack(*this_sack_lowest_newack, 2838 last_tsn, 2839 tp1->rec.data.TSN_seq, 2840 0, 2841 0, 2842 SCTP_LOG_TSN_ACKED); 2843 } 2844 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2845 } 2846 /*- 2847 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 2848 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 2849 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 2850 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 2851 * Separate pseudo_cumack trackers for first transmissions and 2852 * retransmissions. 2853 */ 2854 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2855 if (tp1->rec.data.chunk_was_revoked == 0) { 2856 tp1->whoTo->new_pseudo_cumack = 1; 2857 } 2858 tp1->whoTo->find_pseudo_cumack = 1; 2859 } 2860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2861 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2862 } 2863 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2864 if (tp1->rec.data.chunk_was_revoked == 0) { 2865 tp1->whoTo->new_pseudo_cumack = 1; 2866 } 2867 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2868 } 2869 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2870 sctp_log_sack(*biggest_newly_acked_tsn, 2871 last_tsn, 2872 tp1->rec.data.TSN_seq, 2873 frag_strt, 2874 frag_end, 2875 SCTP_LOG_TSN_ACKED); 2876 } 2877 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2878 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2879 tp1->whoTo->flight_size, 2880 tp1->book_size, 2881 (uint32_t) (uintptr_t) tp1->whoTo, 2882 tp1->rec.data.TSN_seq); 2883 } 2884 sctp_flight_size_decrease(tp1); 2885 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 2886 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 2887 tp1); 2888 } 2889 sctp_total_flight_decrease(stcb, tp1); 2890 2891 tp1->whoTo->net_ack += tp1->send_size; 2892 if (tp1->snd_count < 2) { 2893 /*- 2894 * True non-retransmited chunk 2895 */ 2896 tp1->whoTo->net_ack2 += tp1->send_size; 2897 2898 /*- 2899 * update RTO too ? 2900 */ 2901 if (tp1->do_rtt) { 2902 if (*rto_ok) { 2903 tp1->whoTo->RTO = 2904 sctp_calculate_rto(stcb, 2905 &stcb->asoc, 2906 tp1->whoTo, 2907 &tp1->sent_rcv_time, 2908 sctp_align_safe_nocopy, 2909 SCTP_RTT_FROM_DATA); 2910 *rto_ok = 0; 2911 } 2912 if (tp1->whoTo->rto_needed == 0) { 2913 tp1->whoTo->rto_needed = 1; 2914 } 2915 tp1->do_rtt = 0; 2916 } 2917 } 2918 } 2919 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 2920 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2921 stcb->asoc.this_sack_highest_gap)) { 2922 stcb->asoc.this_sack_highest_gap = 2923 tp1->rec.data.TSN_seq; 2924 } 2925 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 2926 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 2927 #ifdef SCTP_AUDITING_ENABLED 2928 sctp_audit_log(0xB2, 2929 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 2930 #endif 2931 } 2932 } 2933 /*- 2934 * All chunks NOT UNSENT fall through here and are marked 2935 * (leave PR-SCTP ones that are to skip alone though) 2936 */ 2937 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 2938 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2939 tp1->sent = SCTP_DATAGRAM_MARKED; 2940 } 2941 if (tp1->rec.data.chunk_was_revoked) { 2942 /* deflate the cwnd */ 2943 tp1->whoTo->cwnd -= tp1->book_size; 2944 tp1->rec.data.chunk_was_revoked = 0; 2945 } 2946 /* NR Sack code here */ 2947 if (nr_sacking && 2948 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2949 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 2950 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--; 2951 #ifdef INVARIANTS 2952 } else { 2953 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 2954 #endif 2955 } 2956 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 2957 (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 2958 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) { 2959 stcb->asoc.trigger_reset = 1; 2960 } 2961 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 2962 if (tp1->data) { 2963 /* 2964 * sa_ignore 2965 * NO_NULL_CHK 2966 */ 2967 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 2968 sctp_m_freem(tp1->data); 2969 tp1->data = NULL; 2970 } 2971 wake_him++; 2972 } 2973 } 2974 break; 2975 } /* if (tp1->TSN_seq == theTSN) */ 2976 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) { 2977 break; 2978 } 2979 tp1 = TAILQ_NEXT(tp1, sctp_next); 2980 if ((tp1 == NULL) && (circled == 0)) { 2981 circled++; 2982 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2983 } 2984 } /* end while (tp1) */ 2985 if (tp1 == NULL) { 2986 circled = 0; 2987 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2988 } 2989 /* In case the fragments were not in order we must reset */ 2990 } /* end for (j = fragStart */ 2991 *p_tp1 = tp1; 2992 return (wake_him); /* Return value only used for nr-sack */ 2993 } 2994 2995 2996 static int 2997 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 2998 uint32_t last_tsn, uint32_t * biggest_tsn_acked, 2999 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 3000 int num_seg, int num_nr_seg, int *rto_ok) 3001 { 3002 struct sctp_gap_ack_block *frag, block; 3003 struct sctp_tmit_chunk *tp1; 3004 int i; 3005 int num_frs = 0; 3006 int chunk_freed; 3007 int non_revocable; 3008 uint16_t frag_strt, frag_end, prev_frag_end; 3009 3010 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3011 prev_frag_end = 0; 3012 chunk_freed = 0; 3013 3014 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3015 if (i == num_seg) { 3016 prev_frag_end = 0; 3017 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3018 } 3019 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3020 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 3021 *offset += sizeof(block); 3022 if (frag == NULL) { 3023 return (chunk_freed); 3024 } 3025 frag_strt = ntohs(frag->start); 3026 frag_end = ntohs(frag->end); 3027 3028 if (frag_strt > frag_end) { 3029 /* This gap report is malformed, skip it. */ 3030 continue; 3031 } 3032 if (frag_strt <= prev_frag_end) { 3033 /* This gap report is not in order, so restart. */ 3034 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3035 } 3036 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3037 *biggest_tsn_acked = last_tsn + frag_end; 3038 } 3039 if (i < num_seg) { 3040 non_revocable = 0; 3041 } else { 3042 non_revocable = 1; 3043 } 3044 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3045 non_revocable, &num_frs, biggest_newly_acked_tsn, 3046 this_sack_lowest_newack, rto_ok)) { 3047 chunk_freed = 1; 3048 } 3049 prev_frag_end = frag_end; 3050 } 3051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3052 if (num_frs) 3053 sctp_log_fr(*biggest_tsn_acked, 3054 *biggest_newly_acked_tsn, 3055 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3056 } 3057 return (chunk_freed); 3058 } 3059 3060 static void 3061 sctp_check_for_revoked(struct sctp_tcb *stcb, 3062 struct sctp_association *asoc, uint32_t cumack, 3063 uint32_t biggest_tsn_acked) 3064 { 3065 struct sctp_tmit_chunk *tp1; 3066 3067 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3068 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) { 3069 /* 3070 * ok this guy is either ACK or MARKED. If it is 3071 * ACKED it has been previously acked but not this 3072 * time i.e. revoked. If it is MARKED it was ACK'ed 3073 * again. 3074 */ 3075 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) { 3076 break; 3077 } 3078 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3079 /* it has been revoked */ 3080 tp1->sent = SCTP_DATAGRAM_SENT; 3081 tp1->rec.data.chunk_was_revoked = 1; 3082 /* 3083 * We must add this stuff back in to assure 3084 * timers and such get started. 3085 */ 3086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3087 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3088 tp1->whoTo->flight_size, 3089 tp1->book_size, 3090 (uint32_t) (uintptr_t) tp1->whoTo, 3091 tp1->rec.data.TSN_seq); 3092 } 3093 sctp_flight_size_increase(tp1); 3094 sctp_total_flight_increase(stcb, tp1); 3095 /* 3096 * We inflate the cwnd to compensate for our 3097 * artificial inflation of the flight_size. 3098 */ 3099 tp1->whoTo->cwnd += tp1->book_size; 3100 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3101 sctp_log_sack(asoc->last_acked_seq, 3102 cumack, 3103 tp1->rec.data.TSN_seq, 3104 0, 3105 0, 3106 SCTP_LOG_TSN_REVOKED); 3107 } 3108 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3109 /* it has been re-acked in this SACK */ 3110 tp1->sent = SCTP_DATAGRAM_ACKED; 3111 } 3112 } 3113 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3114 break; 3115 } 3116 } 3117 3118 3119 static void 3120 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3121 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3122 { 3123 struct sctp_tmit_chunk *tp1; 3124 int strike_flag = 0; 3125 struct timeval now; 3126 int tot_retrans = 0; 3127 uint32_t sending_seq; 3128 struct sctp_nets *net; 3129 int num_dests_sacked = 0; 3130 3131 /* 3132 * select the sending_seq, this is either the next thing ready to be 3133 * sent but not transmitted, OR, the next seq we assign. 3134 */ 3135 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3136 if (tp1 == NULL) { 3137 sending_seq = asoc->sending_seq; 3138 } else { 3139 sending_seq = tp1->rec.data.TSN_seq; 3140 } 3141 3142 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3143 if ((asoc->sctp_cmt_on_off > 0) && 3144 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3145 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3146 if (net->saw_newack) 3147 num_dests_sacked++; 3148 } 3149 } 3150 if (stcb->asoc.prsctp_supported) { 3151 (void)SCTP_GETTIME_TIMEVAL(&now); 3152 } 3153 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3154 strike_flag = 0; 3155 if (tp1->no_fr_allowed) { 3156 /* this one had a timeout or something */ 3157 continue; 3158 } 3159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3160 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3161 sctp_log_fr(biggest_tsn_newly_acked, 3162 tp1->rec.data.TSN_seq, 3163 tp1->sent, 3164 SCTP_FR_LOG_CHECK_STRIKE); 3165 } 3166 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) || 3167 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3168 /* done */ 3169 break; 3170 } 3171 if (stcb->asoc.prsctp_supported) { 3172 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3173 /* Is it expired? */ 3174 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3175 /* Yes so drop it */ 3176 if (tp1->data != NULL) { 3177 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3178 SCTP_SO_NOT_LOCKED); 3179 } 3180 continue; 3181 } 3182 } 3183 } 3184 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) { 3185 /* we are beyond the tsn in the sack */ 3186 break; 3187 } 3188 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3189 /* either a RESEND, ACKED, or MARKED */ 3190 /* skip */ 3191 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3192 /* Continue strikin FWD-TSN chunks */ 3193 tp1->rec.data.fwd_tsn_cnt++; 3194 } 3195 continue; 3196 } 3197 /* 3198 * CMT : SFR algo (covers part of DAC and HTNA as well) 3199 */ 3200 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3201 /* 3202 * No new acks were receieved for data sent to this 3203 * dest. Therefore, according to the SFR algo for 3204 * CMT, no data sent to this dest can be marked for 3205 * FR using this SACK. 3206 */ 3207 continue; 3208 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3209 tp1->whoTo->this_sack_highest_newack)) { 3210 /* 3211 * CMT: New acks were receieved for data sent to 3212 * this dest. But no new acks were seen for data 3213 * sent after tp1. Therefore, according to the SFR 3214 * algo for CMT, tp1 cannot be marked for FR using 3215 * this SACK. This step covers part of the DAC algo 3216 * and the HTNA algo as well. 3217 */ 3218 continue; 3219 } 3220 /* 3221 * Here we check to see if we were have already done a FR 3222 * and if so we see if the biggest TSN we saw in the sack is 3223 * smaller than the recovery point. If so we don't strike 3224 * the tsn... otherwise we CAN strike the TSN. 3225 */ 3226 /* 3227 * @@@ JRI: Check for CMT if (accum_moved && 3228 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3229 * 0)) { 3230 */ 3231 if (accum_moved && asoc->fast_retran_loss_recovery) { 3232 /* 3233 * Strike the TSN if in fast-recovery and cum-ack 3234 * moved. 3235 */ 3236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3237 sctp_log_fr(biggest_tsn_newly_acked, 3238 tp1->rec.data.TSN_seq, 3239 tp1->sent, 3240 SCTP_FR_LOG_STRIKE_CHUNK); 3241 } 3242 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3243 tp1->sent++; 3244 } 3245 if ((asoc->sctp_cmt_on_off > 0) && 3246 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3247 /* 3248 * CMT DAC algorithm: If SACK flag is set to 3249 * 0, then lowest_newack test will not pass 3250 * because it would have been set to the 3251 * cumack earlier. If not already to be 3252 * rtx'd, If not a mixed sack and if tp1 is 3253 * not between two sacked TSNs, then mark by 3254 * one more. NOTE that we are marking by one 3255 * additional time since the SACK DAC flag 3256 * indicates that two packets have been 3257 * received after this missing TSN. 3258 */ 3259 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3260 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3261 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3262 sctp_log_fr(16 + num_dests_sacked, 3263 tp1->rec.data.TSN_seq, 3264 tp1->sent, 3265 SCTP_FR_LOG_STRIKE_CHUNK); 3266 } 3267 tp1->sent++; 3268 } 3269 } 3270 } else if ((tp1->rec.data.doing_fast_retransmit) && 3271 (asoc->sctp_cmt_on_off == 0)) { 3272 /* 3273 * For those that have done a FR we must take 3274 * special consideration if we strike. I.e the 3275 * biggest_newly_acked must be higher than the 3276 * sending_seq at the time we did the FR. 3277 */ 3278 if ( 3279 #ifdef SCTP_FR_TO_ALTERNATE 3280 /* 3281 * If FR's go to new networks, then we must only do 3282 * this for singly homed asoc's. However if the FR's 3283 * go to the same network (Armando's work) then its 3284 * ok to FR multiple times. 3285 */ 3286 (asoc->numnets < 2) 3287 #else 3288 (1) 3289 #endif 3290 ) { 3291 3292 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3293 tp1->rec.data.fast_retran_tsn)) { 3294 /* 3295 * Strike the TSN, since this ack is 3296 * beyond where things were when we 3297 * did a FR. 3298 */ 3299 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3300 sctp_log_fr(biggest_tsn_newly_acked, 3301 tp1->rec.data.TSN_seq, 3302 tp1->sent, 3303 SCTP_FR_LOG_STRIKE_CHUNK); 3304 } 3305 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3306 tp1->sent++; 3307 } 3308 strike_flag = 1; 3309 if ((asoc->sctp_cmt_on_off > 0) && 3310 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3311 /* 3312 * CMT DAC algorithm: If 3313 * SACK flag is set to 0, 3314 * then lowest_newack test 3315 * will not pass because it 3316 * would have been set to 3317 * the cumack earlier. If 3318 * not already to be rtx'd, 3319 * If not a mixed sack and 3320 * if tp1 is not between two 3321 * sacked TSNs, then mark by 3322 * one more. NOTE that we 3323 * are marking by one 3324 * additional time since the 3325 * SACK DAC flag indicates 3326 * that two packets have 3327 * been received after this 3328 * missing TSN. 3329 */ 3330 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3331 (num_dests_sacked == 1) && 3332 SCTP_TSN_GT(this_sack_lowest_newack, 3333 tp1->rec.data.TSN_seq)) { 3334 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3335 sctp_log_fr(32 + num_dests_sacked, 3336 tp1->rec.data.TSN_seq, 3337 tp1->sent, 3338 SCTP_FR_LOG_STRIKE_CHUNK); 3339 } 3340 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3341 tp1->sent++; 3342 } 3343 } 3344 } 3345 } 3346 } 3347 /* 3348 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3349 * algo covers HTNA. 3350 */ 3351 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3352 biggest_tsn_newly_acked)) { 3353 /* 3354 * We don't strike these: This is the HTNA 3355 * algorithm i.e. we don't strike If our TSN is 3356 * larger than the Highest TSN Newly Acked. 3357 */ 3358 ; 3359 } else { 3360 /* Strike the TSN */ 3361 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3362 sctp_log_fr(biggest_tsn_newly_acked, 3363 tp1->rec.data.TSN_seq, 3364 tp1->sent, 3365 SCTP_FR_LOG_STRIKE_CHUNK); 3366 } 3367 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3368 tp1->sent++; 3369 } 3370 if ((asoc->sctp_cmt_on_off > 0) && 3371 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3372 /* 3373 * CMT DAC algorithm: If SACK flag is set to 3374 * 0, then lowest_newack test will not pass 3375 * because it would have been set to the 3376 * cumack earlier. If not already to be 3377 * rtx'd, If not a mixed sack and if tp1 is 3378 * not between two sacked TSNs, then mark by 3379 * one more. NOTE that we are marking by one 3380 * additional time since the SACK DAC flag 3381 * indicates that two packets have been 3382 * received after this missing TSN. 3383 */ 3384 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3385 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3386 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3387 sctp_log_fr(48 + num_dests_sacked, 3388 tp1->rec.data.TSN_seq, 3389 tp1->sent, 3390 SCTP_FR_LOG_STRIKE_CHUNK); 3391 } 3392 tp1->sent++; 3393 } 3394 } 3395 } 3396 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3397 struct sctp_nets *alt; 3398 3399 /* fix counts and things */ 3400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3401 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3402 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3403 tp1->book_size, 3404 (uint32_t) (uintptr_t) tp1->whoTo, 3405 tp1->rec.data.TSN_seq); 3406 } 3407 if (tp1->whoTo) { 3408 tp1->whoTo->net_ack++; 3409 sctp_flight_size_decrease(tp1); 3410 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3411 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3412 tp1); 3413 } 3414 } 3415 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3416 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3417 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3418 } 3419 /* add back to the rwnd */ 3420 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3421 3422 /* remove from the total flight */ 3423 sctp_total_flight_decrease(stcb, tp1); 3424 3425 if ((stcb->asoc.prsctp_supported) && 3426 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3427 /* 3428 * Has it been retransmitted tv_sec times? - 3429 * we store the retran count there. 3430 */ 3431 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3432 /* Yes, so drop it */ 3433 if (tp1->data != NULL) { 3434 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3435 SCTP_SO_NOT_LOCKED); 3436 } 3437 /* Make sure to flag we had a FR */ 3438 tp1->whoTo->net_ack++; 3439 continue; 3440 } 3441 } 3442 /* 3443 * SCTP_PRINTF("OK, we are now ready to FR this 3444 * guy\n"); 3445 */ 3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3447 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3448 0, SCTP_FR_MARKED); 3449 } 3450 if (strike_flag) { 3451 /* This is a subsequent FR */ 3452 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3453 } 3454 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3455 if (asoc->sctp_cmt_on_off > 0) { 3456 /* 3457 * CMT: Using RTX_SSTHRESH policy for CMT. 3458 * If CMT is being used, then pick dest with 3459 * largest ssthresh for any retransmission. 3460 */ 3461 tp1->no_fr_allowed = 1; 3462 alt = tp1->whoTo; 3463 /* sa_ignore NO_NULL_CHK */ 3464 if (asoc->sctp_cmt_pf > 0) { 3465 /* 3466 * JRS 5/18/07 - If CMT PF is on, 3467 * use the PF version of 3468 * find_alt_net() 3469 */ 3470 alt = sctp_find_alternate_net(stcb, alt, 2); 3471 } else { 3472 /* 3473 * JRS 5/18/07 - If only CMT is on, 3474 * use the CMT version of 3475 * find_alt_net() 3476 */ 3477 /* sa_ignore NO_NULL_CHK */ 3478 alt = sctp_find_alternate_net(stcb, alt, 1); 3479 } 3480 if (alt == NULL) { 3481 alt = tp1->whoTo; 3482 } 3483 /* 3484 * CUCv2: If a different dest is picked for 3485 * the retransmission, then new 3486 * (rtx-)pseudo_cumack needs to be tracked 3487 * for orig dest. Let CUCv2 track new (rtx-) 3488 * pseudo-cumack always. 3489 */ 3490 if (tp1->whoTo) { 3491 tp1->whoTo->find_pseudo_cumack = 1; 3492 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3493 } 3494 } else {/* CMT is OFF */ 3495 3496 #ifdef SCTP_FR_TO_ALTERNATE 3497 /* Can we find an alternate? */ 3498 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3499 #else 3500 /* 3501 * default behavior is to NOT retransmit 3502 * FR's to an alternate. Armando Caro's 3503 * paper details why. 3504 */ 3505 alt = tp1->whoTo; 3506 #endif 3507 } 3508 3509 tp1->rec.data.doing_fast_retransmit = 1; 3510 tot_retrans++; 3511 /* mark the sending seq for possible subsequent FR's */ 3512 /* 3513 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3514 * (uint32_t)tpi->rec.data.TSN_seq); 3515 */ 3516 if (TAILQ_EMPTY(&asoc->send_queue)) { 3517 /* 3518 * If the queue of send is empty then its 3519 * the next sequence number that will be 3520 * assigned so we subtract one from this to 3521 * get the one we last sent. 3522 */ 3523 tp1->rec.data.fast_retran_tsn = sending_seq; 3524 } else { 3525 /* 3526 * If there are chunks on the send queue 3527 * (unsent data that has made it from the 3528 * stream queues but not out the door, we 3529 * take the first one (which will have the 3530 * lowest TSN) and subtract one to get the 3531 * one we last sent. 3532 */ 3533 struct sctp_tmit_chunk *ttt; 3534 3535 ttt = TAILQ_FIRST(&asoc->send_queue); 3536 tp1->rec.data.fast_retran_tsn = 3537 ttt->rec.data.TSN_seq; 3538 } 3539 3540 if (tp1->do_rtt) { 3541 /* 3542 * this guy had a RTO calculation pending on 3543 * it, cancel it 3544 */ 3545 if ((tp1->whoTo != NULL) && 3546 (tp1->whoTo->rto_needed == 0)) { 3547 tp1->whoTo->rto_needed = 1; 3548 } 3549 tp1->do_rtt = 0; 3550 } 3551 if (alt != tp1->whoTo) { 3552 /* yes, there is an alternate. */ 3553 sctp_free_remote_addr(tp1->whoTo); 3554 /* sa_ignore FREED_MEMORY */ 3555 tp1->whoTo = alt; 3556 atomic_add_int(&alt->ref_count, 1); 3557 } 3558 } 3559 } 3560 } 3561 3562 struct sctp_tmit_chunk * 3563 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3564 struct sctp_association *asoc) 3565 { 3566 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3567 struct timeval now; 3568 int now_filled = 0; 3569 3570 if (asoc->prsctp_supported == 0) { 3571 return (NULL); 3572 } 3573 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3574 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3575 tp1->sent != SCTP_DATAGRAM_RESEND && 3576 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3577 /* no chance to advance, out of here */ 3578 break; 3579 } 3580 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3581 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3582 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3583 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3584 asoc->advanced_peer_ack_point, 3585 tp1->rec.data.TSN_seq, 0, 0); 3586 } 3587 } 3588 if (!PR_SCTP_ENABLED(tp1->flags)) { 3589 /* 3590 * We can't fwd-tsn past any that are reliable aka 3591 * retransmitted until the asoc fails. 3592 */ 3593 break; 3594 } 3595 if (!now_filled) { 3596 (void)SCTP_GETTIME_TIMEVAL(&now); 3597 now_filled = 1; 3598 } 3599 /* 3600 * now we got a chunk which is marked for another 3601 * retransmission to a PR-stream but has run out its chances 3602 * already maybe OR has been marked to skip now. Can we skip 3603 * it if its a resend? 3604 */ 3605 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3606 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3607 /* 3608 * Now is this one marked for resend and its time is 3609 * now up? 3610 */ 3611 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3612 /* Yes so drop it */ 3613 if (tp1->data) { 3614 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3615 1, SCTP_SO_NOT_LOCKED); 3616 } 3617 } else { 3618 /* 3619 * No, we are done when hit one for resend 3620 * whos time as not expired. 3621 */ 3622 break; 3623 } 3624 } 3625 /* 3626 * Ok now if this chunk is marked to drop it we can clean up 3627 * the chunk, advance our peer ack point and we can check 3628 * the next chunk. 3629 */ 3630 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3631 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3632 /* advance PeerAckPoint goes forward */ 3633 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) { 3634 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3635 a_adv = tp1; 3636 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { 3637 /* No update but we do save the chk */ 3638 a_adv = tp1; 3639 } 3640 } else { 3641 /* 3642 * If it is still in RESEND we can advance no 3643 * further 3644 */ 3645 break; 3646 } 3647 } 3648 return (a_adv); 3649 } 3650 3651 static int 3652 sctp_fs_audit(struct sctp_association *asoc) 3653 { 3654 struct sctp_tmit_chunk *chk; 3655 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3656 int ret; 3657 3658 #ifndef INVARIANTS 3659 int entry_flight, entry_cnt; 3660 3661 #endif 3662 3663 ret = 0; 3664 #ifndef INVARIANTS 3665 entry_flight = asoc->total_flight; 3666 entry_cnt = asoc->total_flight_count; 3667 #endif 3668 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3669 return (0); 3670 3671 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3672 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3673 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3674 chk->rec.data.TSN_seq, 3675 chk->send_size, 3676 chk->snd_count); 3677 inflight++; 3678 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3679 resend++; 3680 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3681 inbetween++; 3682 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3683 above++; 3684 } else { 3685 acked++; 3686 } 3687 } 3688 3689 if ((inflight > 0) || (inbetween > 0)) { 3690 #ifdef INVARIANTS 3691 panic("Flight size-express incorrect? \n"); 3692 #else 3693 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3694 entry_flight, entry_cnt); 3695 3696 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3697 inflight, inbetween, resend, above, acked); 3698 ret = 1; 3699 #endif 3700 } 3701 return (ret); 3702 } 3703 3704 3705 static void 3706 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3707 struct sctp_association *asoc, 3708 struct sctp_tmit_chunk *tp1) 3709 { 3710 tp1->window_probe = 0; 3711 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3712 /* TSN's skipped we do NOT move back. */ 3713 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3714 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3715 tp1->book_size, 3716 (uint32_t) (uintptr_t) tp1->whoTo, 3717 tp1->rec.data.TSN_seq); 3718 return; 3719 } 3720 /* First setup this by shrinking flight */ 3721 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3722 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3723 tp1); 3724 } 3725 sctp_flight_size_decrease(tp1); 3726 sctp_total_flight_decrease(stcb, tp1); 3727 /* Now mark for resend */ 3728 tp1->sent = SCTP_DATAGRAM_RESEND; 3729 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3730 3731 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3732 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3733 tp1->whoTo->flight_size, 3734 tp1->book_size, 3735 (uint32_t) (uintptr_t) tp1->whoTo, 3736 tp1->rec.data.TSN_seq); 3737 } 3738 } 3739 3740 void 3741 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3742 uint32_t rwnd, int *abort_now, int ecne_seen) 3743 { 3744 struct sctp_nets *net; 3745 struct sctp_association *asoc; 3746 struct sctp_tmit_chunk *tp1, *tp2; 3747 uint32_t old_rwnd; 3748 int win_probe_recovery = 0; 3749 int win_probe_recovered = 0; 3750 int j, done_once = 0; 3751 int rto_ok = 1; 3752 uint32_t send_s; 3753 3754 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3755 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3756 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3757 } 3758 SCTP_TCB_LOCK_ASSERT(stcb); 3759 #ifdef SCTP_ASOCLOG_OF_TSNS 3760 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3761 stcb->asoc.cumack_log_at++; 3762 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3763 stcb->asoc.cumack_log_at = 0; 3764 } 3765 #endif 3766 asoc = &stcb->asoc; 3767 old_rwnd = asoc->peers_rwnd; 3768 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3769 /* old ack */ 3770 return; 3771 } else if (asoc->last_acked_seq == cumack) { 3772 /* Window update sack */ 3773 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3774 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3775 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3776 /* SWS sender side engages */ 3777 asoc->peers_rwnd = 0; 3778 } 3779 if (asoc->peers_rwnd > old_rwnd) { 3780 goto again; 3781 } 3782 return; 3783 } 3784 /* First setup for CC stuff */ 3785 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3786 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3787 /* Drag along the window_tsn for cwr's */ 3788 net->cwr_window_tsn = cumack; 3789 } 3790 net->prev_cwnd = net->cwnd; 3791 net->net_ack = 0; 3792 net->net_ack2 = 0; 3793 3794 /* 3795 * CMT: Reset CUC and Fast recovery algo variables before 3796 * SACK processing 3797 */ 3798 net->new_pseudo_cumack = 0; 3799 net->will_exit_fast_recovery = 0; 3800 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3801 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3802 } 3803 } 3804 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3805 tp1 = TAILQ_LAST(&asoc->sent_queue, 3806 sctpchunk_listhead); 3807 send_s = tp1->rec.data.TSN_seq + 1; 3808 } else { 3809 send_s = asoc->sending_seq; 3810 } 3811 if (SCTP_TSN_GE(cumack, send_s)) { 3812 struct mbuf *op_err; 3813 char msg[SCTP_DIAG_INFO_LEN]; 3814 3815 *abort_now = 1; 3816 /* XXX */ 3817 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 3818 cumack, send_s); 3819 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 3820 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 3821 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 3822 return; 3823 } 3824 asoc->this_sack_highest_gap = cumack; 3825 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 3826 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3827 stcb->asoc.overall_error_count, 3828 0, 3829 SCTP_FROM_SCTP_INDATA, 3830 __LINE__); 3831 } 3832 stcb->asoc.overall_error_count = 0; 3833 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 3834 /* process the new consecutive TSN first */ 3835 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3836 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) { 3837 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3838 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 3839 } 3840 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3841 /* 3842 * If it is less than ACKED, it is 3843 * now no-longer in flight. Higher 3844 * values may occur during marking 3845 */ 3846 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3848 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3849 tp1->whoTo->flight_size, 3850 tp1->book_size, 3851 (uint32_t) (uintptr_t) tp1->whoTo, 3852 tp1->rec.data.TSN_seq); 3853 } 3854 sctp_flight_size_decrease(tp1); 3855 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3856 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3857 tp1); 3858 } 3859 /* sa_ignore NO_NULL_CHK */ 3860 sctp_total_flight_decrease(stcb, tp1); 3861 } 3862 tp1->whoTo->net_ack += tp1->send_size; 3863 if (tp1->snd_count < 2) { 3864 /* 3865 * True non-retransmited 3866 * chunk 3867 */ 3868 tp1->whoTo->net_ack2 += 3869 tp1->send_size; 3870 3871 /* update RTO too? */ 3872 if (tp1->do_rtt) { 3873 if (rto_ok) { 3874 tp1->whoTo->RTO = 3875 /* 3876 * sa_ignore 3877 * NO_NULL_CH 3878 * K 3879 */ 3880 sctp_calculate_rto(stcb, 3881 asoc, tp1->whoTo, 3882 &tp1->sent_rcv_time, 3883 sctp_align_safe_nocopy, 3884 SCTP_RTT_FROM_DATA); 3885 rto_ok = 0; 3886 } 3887 if (tp1->whoTo->rto_needed == 0) { 3888 tp1->whoTo->rto_needed = 1; 3889 } 3890 tp1->do_rtt = 0; 3891 } 3892 } 3893 /* 3894 * CMT: CUCv2 algorithm. From the 3895 * cumack'd TSNs, for each TSN being 3896 * acked for the first time, set the 3897 * following variables for the 3898 * corresp destination. 3899 * new_pseudo_cumack will trigger a 3900 * cwnd update. 3901 * find_(rtx_)pseudo_cumack will 3902 * trigger search for the next 3903 * expected (rtx-)pseudo-cumack. 3904 */ 3905 tp1->whoTo->new_pseudo_cumack = 1; 3906 tp1->whoTo->find_pseudo_cumack = 1; 3907 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3908 3909 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3910 /* sa_ignore NO_NULL_CHK */ 3911 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3912 } 3913 } 3914 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3915 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3916 } 3917 if (tp1->rec.data.chunk_was_revoked) { 3918 /* deflate the cwnd */ 3919 tp1->whoTo->cwnd -= tp1->book_size; 3920 tp1->rec.data.chunk_was_revoked = 0; 3921 } 3922 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3923 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 3924 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 3925 #ifdef INVARIANTS 3926 } else { 3927 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 3928 #endif 3929 } 3930 } 3931 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 3932 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 3933 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) { 3934 asoc->trigger_reset = 1; 3935 } 3936 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3937 if (tp1->data) { 3938 /* sa_ignore NO_NULL_CHK */ 3939 sctp_free_bufspace(stcb, asoc, tp1, 1); 3940 sctp_m_freem(tp1->data); 3941 tp1->data = NULL; 3942 } 3943 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3944 sctp_log_sack(asoc->last_acked_seq, 3945 cumack, 3946 tp1->rec.data.TSN_seq, 3947 0, 3948 0, 3949 SCTP_LOG_FREE_SENT); 3950 } 3951 asoc->sent_queue_cnt--; 3952 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 3953 } else { 3954 break; 3955 } 3956 } 3957 3958 } 3959 /* sa_ignore NO_NULL_CHK */ 3960 if (stcb->sctp_socket) { 3961 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3962 struct socket *so; 3963 3964 #endif 3965 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 3966 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 3967 /* sa_ignore NO_NULL_CHK */ 3968 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 3969 } 3970 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3971 so = SCTP_INP_SO(stcb->sctp_ep); 3972 atomic_add_int(&stcb->asoc.refcnt, 1); 3973 SCTP_TCB_UNLOCK(stcb); 3974 SCTP_SOCKET_LOCK(so, 1); 3975 SCTP_TCB_LOCK(stcb); 3976 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3977 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3978 /* assoc was freed while we were unlocked */ 3979 SCTP_SOCKET_UNLOCK(so, 1); 3980 return; 3981 } 3982 #endif 3983 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 3984 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3985 SCTP_SOCKET_UNLOCK(so, 1); 3986 #endif 3987 } else { 3988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 3989 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 3990 } 3991 } 3992 3993 /* JRS - Use the congestion control given in the CC module */ 3994 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 3995 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3996 if (net->net_ack2 > 0) { 3997 /* 3998 * Karn's rule applies to clearing error 3999 * count, this is optional. 4000 */ 4001 net->error_count = 0; 4002 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4003 /* addr came good */ 4004 net->dest_state |= SCTP_ADDR_REACHABLE; 4005 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4006 0, (void *)net, SCTP_SO_NOT_LOCKED); 4007 } 4008 if (net == stcb->asoc.primary_destination) { 4009 if (stcb->asoc.alternate) { 4010 /* 4011 * release the alternate, 4012 * primary is good 4013 */ 4014 sctp_free_remote_addr(stcb->asoc.alternate); 4015 stcb->asoc.alternate = NULL; 4016 } 4017 } 4018 if (net->dest_state & SCTP_ADDR_PF) { 4019 net->dest_state &= ~SCTP_ADDR_PF; 4020 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4021 stcb->sctp_ep, stcb, net, 4022 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4023 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4024 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4025 /* Done with this net */ 4026 net->net_ack = 0; 4027 } 4028 /* restore any doubled timers */ 4029 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4030 if (net->RTO < stcb->asoc.minrto) { 4031 net->RTO = stcb->asoc.minrto; 4032 } 4033 if (net->RTO > stcb->asoc.maxrto) { 4034 net->RTO = stcb->asoc.maxrto; 4035 } 4036 } 4037 } 4038 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4039 } 4040 asoc->last_acked_seq = cumack; 4041 4042 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4043 /* nothing left in-flight */ 4044 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4045 net->flight_size = 0; 4046 net->partial_bytes_acked = 0; 4047 } 4048 asoc->total_flight = 0; 4049 asoc->total_flight_count = 0; 4050 } 4051 /* RWND update */ 4052 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4053 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4054 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4055 /* SWS sender side engages */ 4056 asoc->peers_rwnd = 0; 4057 } 4058 if (asoc->peers_rwnd > old_rwnd) { 4059 win_probe_recovery = 1; 4060 } 4061 /* Now assure a timer where data is queued at */ 4062 again: 4063 j = 0; 4064 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4065 int to_ticks; 4066 4067 if (win_probe_recovery && (net->window_probe)) { 4068 win_probe_recovered = 1; 4069 /* 4070 * Find first chunk that was used with window probe 4071 * and clear the sent 4072 */ 4073 /* sa_ignore FREED_MEMORY */ 4074 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4075 if (tp1->window_probe) { 4076 /* move back to data send queue */ 4077 sctp_window_probe_recovery(stcb, asoc, tp1); 4078 break; 4079 } 4080 } 4081 } 4082 if (net->RTO == 0) { 4083 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4084 } else { 4085 to_ticks = MSEC_TO_TICKS(net->RTO); 4086 } 4087 if (net->flight_size) { 4088 j++; 4089 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4090 sctp_timeout_handler, &net->rxt_timer); 4091 if (net->window_probe) { 4092 net->window_probe = 0; 4093 } 4094 } else { 4095 if (net->window_probe) { 4096 /* 4097 * In window probes we must assure a timer 4098 * is still running there 4099 */ 4100 net->window_probe = 0; 4101 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4102 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4103 sctp_timeout_handler, &net->rxt_timer); 4104 } 4105 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4106 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4107 stcb, net, 4108 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4109 } 4110 } 4111 } 4112 if ((j == 0) && 4113 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4114 (asoc->sent_queue_retran_cnt == 0) && 4115 (win_probe_recovered == 0) && 4116 (done_once == 0)) { 4117 /* 4118 * huh, this should not happen unless all packets are 4119 * PR-SCTP and marked to skip of course. 4120 */ 4121 if (sctp_fs_audit(asoc)) { 4122 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4123 net->flight_size = 0; 4124 } 4125 asoc->total_flight = 0; 4126 asoc->total_flight_count = 0; 4127 asoc->sent_queue_retran_cnt = 0; 4128 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4129 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4130 sctp_flight_size_increase(tp1); 4131 sctp_total_flight_increase(stcb, tp1); 4132 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4133 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4134 } 4135 } 4136 } 4137 done_once = 1; 4138 goto again; 4139 } 4140 /**********************************/ 4141 /* Now what about shutdown issues */ 4142 /**********************************/ 4143 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4144 /* nothing left on sendqueue.. consider done */ 4145 /* clean up */ 4146 if ((asoc->stream_queue_cnt == 1) && 4147 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4148 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4149 (asoc->locked_on_sending) 4150 ) { 4151 struct sctp_stream_queue_pending *sp; 4152 4153 /* 4154 * I may be in a state where we got all across.. but 4155 * cannot write more due to a shutdown... we abort 4156 * since the user did not indicate EOR in this case. 4157 * The sp will be cleaned during free of the asoc. 4158 */ 4159 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4160 sctp_streamhead); 4161 if ((sp) && (sp->length == 0)) { 4162 /* Let cleanup code purge it */ 4163 if (sp->msg_is_complete) { 4164 asoc->stream_queue_cnt--; 4165 } else { 4166 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4167 asoc->locked_on_sending = NULL; 4168 asoc->stream_queue_cnt--; 4169 } 4170 } 4171 } 4172 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4173 (asoc->stream_queue_cnt == 0)) { 4174 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4175 /* Need to abort here */ 4176 struct mbuf *op_err; 4177 4178 abort_out_now: 4179 *abort_now = 1; 4180 /* XXX */ 4181 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4182 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4183 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4184 return; 4185 } else { 4186 struct sctp_nets *netp; 4187 4188 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4189 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4190 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4191 } 4192 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4193 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4194 sctp_stop_timers_for_shutdown(stcb); 4195 if (asoc->alternate) { 4196 netp = asoc->alternate; 4197 } else { 4198 netp = asoc->primary_destination; 4199 } 4200 sctp_send_shutdown(stcb, netp); 4201 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4202 stcb->sctp_ep, stcb, netp); 4203 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4204 stcb->sctp_ep, stcb, netp); 4205 } 4206 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4207 (asoc->stream_queue_cnt == 0)) { 4208 struct sctp_nets *netp; 4209 4210 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4211 goto abort_out_now; 4212 } 4213 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4214 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4215 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4216 sctp_stop_timers_for_shutdown(stcb); 4217 if (asoc->alternate) { 4218 netp = asoc->alternate; 4219 } else { 4220 netp = asoc->primary_destination; 4221 } 4222 sctp_send_shutdown_ack(stcb, netp); 4223 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4224 stcb->sctp_ep, stcb, netp); 4225 } 4226 } 4227 /*********************************************/ 4228 /* Here we perform PR-SCTP procedures */ 4229 /* (section 4.2) */ 4230 /*********************************************/ 4231 /* C1. update advancedPeerAckPoint */ 4232 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4233 asoc->advanced_peer_ack_point = cumack; 4234 } 4235 /* PR-Sctp issues need to be addressed too */ 4236 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4237 struct sctp_tmit_chunk *lchk; 4238 uint32_t old_adv_peer_ack_point; 4239 4240 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4241 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4242 /* C3. See if we need to send a Fwd-TSN */ 4243 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4244 /* 4245 * ISSUE with ECN, see FWD-TSN processing. 4246 */ 4247 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4248 send_forward_tsn(stcb, asoc); 4249 } else if (lchk) { 4250 /* try to FR fwd-tsn's that get lost too */ 4251 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4252 send_forward_tsn(stcb, asoc); 4253 } 4254 } 4255 } 4256 if (lchk) { 4257 /* Assure a timer is up */ 4258 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4259 stcb->sctp_ep, stcb, lchk->whoTo); 4260 } 4261 } 4262 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4263 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4264 rwnd, 4265 stcb->asoc.peers_rwnd, 4266 stcb->asoc.total_flight, 4267 stcb->asoc.total_output_queue_size); 4268 } 4269 } 4270 4271 void 4272 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4273 struct sctp_tcb *stcb, 4274 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4275 int *abort_now, uint8_t flags, 4276 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4277 { 4278 struct sctp_association *asoc; 4279 struct sctp_tmit_chunk *tp1, *tp2; 4280 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4281 uint16_t wake_him = 0; 4282 uint32_t send_s = 0; 4283 long j; 4284 int accum_moved = 0; 4285 int will_exit_fast_recovery = 0; 4286 uint32_t a_rwnd, old_rwnd; 4287 int win_probe_recovery = 0; 4288 int win_probe_recovered = 0; 4289 struct sctp_nets *net = NULL; 4290 int done_once; 4291 int rto_ok = 1; 4292 uint8_t reneged_all = 0; 4293 uint8_t cmt_dac_flag; 4294 4295 /* 4296 * we take any chance we can to service our queues since we cannot 4297 * get awoken when the socket is read from :< 4298 */ 4299 /* 4300 * Now perform the actual SACK handling: 1) Verify that it is not an 4301 * old sack, if so discard. 2) If there is nothing left in the send 4302 * queue (cum-ack is equal to last acked) then you have a duplicate 4303 * too, update any rwnd change and verify no timers are running. 4304 * then return. 3) Process any new consequtive data i.e. cum-ack 4305 * moved process these first and note that it moved. 4) Process any 4306 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4307 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4308 * sync up flightsizes and things, stop all timers and also check 4309 * for shutdown_pending state. If so then go ahead and send off the 4310 * shutdown. If in shutdown recv, send off the shutdown-ack and 4311 * start that timer, Ret. 9) Strike any non-acked things and do FR 4312 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4313 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4314 * if in shutdown_recv state. 4315 */ 4316 SCTP_TCB_LOCK_ASSERT(stcb); 4317 /* CMT DAC algo */ 4318 this_sack_lowest_newack = 0; 4319 SCTP_STAT_INCR(sctps_slowpath_sack); 4320 last_tsn = cum_ack; 4321 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4322 #ifdef SCTP_ASOCLOG_OF_TSNS 4323 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4324 stcb->asoc.cumack_log_at++; 4325 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4326 stcb->asoc.cumack_log_at = 0; 4327 } 4328 #endif 4329 a_rwnd = rwnd; 4330 4331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4332 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4333 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4334 } 4335 old_rwnd = stcb->asoc.peers_rwnd; 4336 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4337 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4338 stcb->asoc.overall_error_count, 4339 0, 4340 SCTP_FROM_SCTP_INDATA, 4341 __LINE__); 4342 } 4343 stcb->asoc.overall_error_count = 0; 4344 asoc = &stcb->asoc; 4345 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4346 sctp_log_sack(asoc->last_acked_seq, 4347 cum_ack, 4348 0, 4349 num_seg, 4350 num_dup, 4351 SCTP_LOG_NEW_SACK); 4352 } 4353 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4354 uint16_t i; 4355 uint32_t *dupdata, dblock; 4356 4357 for (i = 0; i < num_dup; i++) { 4358 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4359 sizeof(uint32_t), (uint8_t *) & dblock); 4360 if (dupdata == NULL) { 4361 break; 4362 } 4363 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4364 } 4365 } 4366 /* reality check */ 4367 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4368 tp1 = TAILQ_LAST(&asoc->sent_queue, 4369 sctpchunk_listhead); 4370 send_s = tp1->rec.data.TSN_seq + 1; 4371 } else { 4372 tp1 = NULL; 4373 send_s = asoc->sending_seq; 4374 } 4375 if (SCTP_TSN_GE(cum_ack, send_s)) { 4376 struct mbuf *op_err; 4377 char msg[SCTP_DIAG_INFO_LEN]; 4378 4379 /* 4380 * no way, we have not even sent this TSN out yet. Peer is 4381 * hopelessly messed up with us. 4382 */ 4383 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4384 cum_ack, send_s); 4385 if (tp1) { 4386 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4387 tp1->rec.data.TSN_seq, (void *)tp1); 4388 } 4389 hopeless_peer: 4390 *abort_now = 1; 4391 /* XXX */ 4392 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4393 cum_ack, send_s); 4394 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4395 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4396 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4397 return; 4398 } 4399 /**********************/ 4400 /* 1) check the range */ 4401 /**********************/ 4402 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4403 /* acking something behind */ 4404 return; 4405 } 4406 /* update the Rwnd of the peer */ 4407 if (TAILQ_EMPTY(&asoc->sent_queue) && 4408 TAILQ_EMPTY(&asoc->send_queue) && 4409 (asoc->stream_queue_cnt == 0)) { 4410 /* nothing left on send/sent and strmq */ 4411 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4412 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4413 asoc->peers_rwnd, 0, 0, a_rwnd); 4414 } 4415 asoc->peers_rwnd = a_rwnd; 4416 if (asoc->sent_queue_retran_cnt) { 4417 asoc->sent_queue_retran_cnt = 0; 4418 } 4419 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4420 /* SWS sender side engages */ 4421 asoc->peers_rwnd = 0; 4422 } 4423 /* stop any timers */ 4424 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4425 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4426 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4427 net->partial_bytes_acked = 0; 4428 net->flight_size = 0; 4429 } 4430 asoc->total_flight = 0; 4431 asoc->total_flight_count = 0; 4432 return; 4433 } 4434 /* 4435 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4436 * things. The total byte count acked is tracked in netAckSz AND 4437 * netAck2 is used to track the total bytes acked that are un- 4438 * amibguious and were never retransmitted. We track these on a per 4439 * destination address basis. 4440 */ 4441 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4442 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4443 /* Drag along the window_tsn for cwr's */ 4444 net->cwr_window_tsn = cum_ack; 4445 } 4446 net->prev_cwnd = net->cwnd; 4447 net->net_ack = 0; 4448 net->net_ack2 = 0; 4449 4450 /* 4451 * CMT: Reset CUC and Fast recovery algo variables before 4452 * SACK processing 4453 */ 4454 net->new_pseudo_cumack = 0; 4455 net->will_exit_fast_recovery = 0; 4456 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4457 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4458 } 4459 } 4460 /* process the new consecutive TSN first */ 4461 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4462 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) { 4463 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4464 accum_moved = 1; 4465 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4466 /* 4467 * If it is less than ACKED, it is 4468 * now no-longer in flight. Higher 4469 * values may occur during marking 4470 */ 4471 if ((tp1->whoTo->dest_state & 4472 SCTP_ADDR_UNCONFIRMED) && 4473 (tp1->snd_count < 2)) { 4474 /* 4475 * If there was no retran 4476 * and the address is 4477 * un-confirmed and we sent 4478 * there and are now 4479 * sacked.. its confirmed, 4480 * mark it so. 4481 */ 4482 tp1->whoTo->dest_state &= 4483 ~SCTP_ADDR_UNCONFIRMED; 4484 } 4485 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4487 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4488 tp1->whoTo->flight_size, 4489 tp1->book_size, 4490 (uint32_t) (uintptr_t) tp1->whoTo, 4491 tp1->rec.data.TSN_seq); 4492 } 4493 sctp_flight_size_decrease(tp1); 4494 sctp_total_flight_decrease(stcb, tp1); 4495 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4496 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4497 tp1); 4498 } 4499 } 4500 tp1->whoTo->net_ack += tp1->send_size; 4501 4502 /* CMT SFR and DAC algos */ 4503 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4504 tp1->whoTo->saw_newack = 1; 4505 4506 if (tp1->snd_count < 2) { 4507 /* 4508 * True non-retransmited 4509 * chunk 4510 */ 4511 tp1->whoTo->net_ack2 += 4512 tp1->send_size; 4513 4514 /* update RTO too? */ 4515 if (tp1->do_rtt) { 4516 if (rto_ok) { 4517 tp1->whoTo->RTO = 4518 sctp_calculate_rto(stcb, 4519 asoc, tp1->whoTo, 4520 &tp1->sent_rcv_time, 4521 sctp_align_safe_nocopy, 4522 SCTP_RTT_FROM_DATA); 4523 rto_ok = 0; 4524 } 4525 if (tp1->whoTo->rto_needed == 0) { 4526 tp1->whoTo->rto_needed = 1; 4527 } 4528 tp1->do_rtt = 0; 4529 } 4530 } 4531 /* 4532 * CMT: CUCv2 algorithm. From the 4533 * cumack'd TSNs, for each TSN being 4534 * acked for the first time, set the 4535 * following variables for the 4536 * corresp destination. 4537 * new_pseudo_cumack will trigger a 4538 * cwnd update. 4539 * find_(rtx_)pseudo_cumack will 4540 * trigger search for the next 4541 * expected (rtx-)pseudo-cumack. 4542 */ 4543 tp1->whoTo->new_pseudo_cumack = 1; 4544 tp1->whoTo->find_pseudo_cumack = 1; 4545 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4546 4547 4548 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4549 sctp_log_sack(asoc->last_acked_seq, 4550 cum_ack, 4551 tp1->rec.data.TSN_seq, 4552 0, 4553 0, 4554 SCTP_LOG_TSN_ACKED); 4555 } 4556 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4557 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4558 } 4559 } 4560 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4561 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4562 #ifdef SCTP_AUDITING_ENABLED 4563 sctp_audit_log(0xB3, 4564 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4565 #endif 4566 } 4567 if (tp1->rec.data.chunk_was_revoked) { 4568 /* deflate the cwnd */ 4569 tp1->whoTo->cwnd -= tp1->book_size; 4570 tp1->rec.data.chunk_was_revoked = 0; 4571 } 4572 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4573 tp1->sent = SCTP_DATAGRAM_ACKED; 4574 } 4575 } 4576 } else { 4577 break; 4578 } 4579 } 4580 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4581 /* always set this up to cum-ack */ 4582 asoc->this_sack_highest_gap = last_tsn; 4583 4584 if ((num_seg > 0) || (num_nr_seg > 0)) { 4585 4586 /* 4587 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4588 * to be greater than the cumack. Also reset saw_newack to 0 4589 * for all dests. 4590 */ 4591 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4592 net->saw_newack = 0; 4593 net->this_sack_highest_newack = last_tsn; 4594 } 4595 4596 /* 4597 * thisSackHighestGap will increase while handling NEW 4598 * segments this_sack_highest_newack will increase while 4599 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4600 * used for CMT DAC algo. saw_newack will also change. 4601 */ 4602 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4603 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4604 num_seg, num_nr_seg, &rto_ok)) { 4605 wake_him++; 4606 } 4607 /* 4608 * validate the biggest_tsn_acked in the gap acks if strict 4609 * adherence is wanted. 4610 */ 4611 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4612 /* 4613 * peer is either confused or we are under attack. 4614 * We must abort. 4615 */ 4616 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4617 biggest_tsn_acked, send_s); 4618 goto hopeless_peer; 4619 } 4620 } 4621 /*******************************************/ 4622 /* cancel ALL T3-send timer if accum moved */ 4623 /*******************************************/ 4624 if (asoc->sctp_cmt_on_off > 0) { 4625 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4626 if (net->new_pseudo_cumack) 4627 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4628 stcb, net, 4629 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4630 4631 } 4632 } else { 4633 if (accum_moved) { 4634 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4635 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4636 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4637 } 4638 } 4639 } 4640 /********************************************/ 4641 /* drop the acked chunks from the sentqueue */ 4642 /********************************************/ 4643 asoc->last_acked_seq = cum_ack; 4644 4645 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4646 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) { 4647 break; 4648 } 4649 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4650 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 4651 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 4652 #ifdef INVARIANTS 4653 } else { 4654 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 4655 #endif 4656 } 4657 } 4658 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) && 4659 (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) && 4660 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) { 4661 asoc->trigger_reset = 1; 4662 } 4663 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4664 if (PR_SCTP_ENABLED(tp1->flags)) { 4665 if (asoc->pr_sctp_cnt != 0) 4666 asoc->pr_sctp_cnt--; 4667 } 4668 asoc->sent_queue_cnt--; 4669 if (tp1->data) { 4670 /* sa_ignore NO_NULL_CHK */ 4671 sctp_free_bufspace(stcb, asoc, tp1, 1); 4672 sctp_m_freem(tp1->data); 4673 tp1->data = NULL; 4674 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4675 asoc->sent_queue_cnt_removeable--; 4676 } 4677 } 4678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4679 sctp_log_sack(asoc->last_acked_seq, 4680 cum_ack, 4681 tp1->rec.data.TSN_seq, 4682 0, 4683 0, 4684 SCTP_LOG_FREE_SENT); 4685 } 4686 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4687 wake_him++; 4688 } 4689 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4690 #ifdef INVARIANTS 4691 panic("Warning flight size is positive and should be 0"); 4692 #else 4693 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4694 asoc->total_flight); 4695 #endif 4696 asoc->total_flight = 0; 4697 } 4698 /* sa_ignore NO_NULL_CHK */ 4699 if ((wake_him) && (stcb->sctp_socket)) { 4700 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4701 struct socket *so; 4702 4703 #endif 4704 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4705 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4706 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4707 } 4708 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4709 so = SCTP_INP_SO(stcb->sctp_ep); 4710 atomic_add_int(&stcb->asoc.refcnt, 1); 4711 SCTP_TCB_UNLOCK(stcb); 4712 SCTP_SOCKET_LOCK(so, 1); 4713 SCTP_TCB_LOCK(stcb); 4714 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4715 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4716 /* assoc was freed while we were unlocked */ 4717 SCTP_SOCKET_UNLOCK(so, 1); 4718 return; 4719 } 4720 #endif 4721 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4722 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4723 SCTP_SOCKET_UNLOCK(so, 1); 4724 #endif 4725 } else { 4726 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4727 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4728 } 4729 } 4730 4731 if (asoc->fast_retran_loss_recovery && accum_moved) { 4732 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4733 /* Setup so we will exit RFC2582 fast recovery */ 4734 will_exit_fast_recovery = 1; 4735 } 4736 } 4737 /* 4738 * Check for revoked fragments: 4739 * 4740 * if Previous sack - Had no frags then we can't have any revoked if 4741 * Previous sack - Had frag's then - If we now have frags aka 4742 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4743 * some of them. else - The peer revoked all ACKED fragments, since 4744 * we had some before and now we have NONE. 4745 */ 4746 4747 if (num_seg) { 4748 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4749 asoc->saw_sack_with_frags = 1; 4750 } else if (asoc->saw_sack_with_frags) { 4751 int cnt_revoked = 0; 4752 4753 /* Peer revoked all dg's marked or acked */ 4754 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4755 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4756 tp1->sent = SCTP_DATAGRAM_SENT; 4757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4758 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4759 tp1->whoTo->flight_size, 4760 tp1->book_size, 4761 (uint32_t) (uintptr_t) tp1->whoTo, 4762 tp1->rec.data.TSN_seq); 4763 } 4764 sctp_flight_size_increase(tp1); 4765 sctp_total_flight_increase(stcb, tp1); 4766 tp1->rec.data.chunk_was_revoked = 1; 4767 /* 4768 * To ensure that this increase in 4769 * flightsize, which is artificial, does not 4770 * throttle the sender, we also increase the 4771 * cwnd artificially. 4772 */ 4773 tp1->whoTo->cwnd += tp1->book_size; 4774 cnt_revoked++; 4775 } 4776 } 4777 if (cnt_revoked) { 4778 reneged_all = 1; 4779 } 4780 asoc->saw_sack_with_frags = 0; 4781 } 4782 if (num_nr_seg > 0) 4783 asoc->saw_sack_with_nr_frags = 1; 4784 else 4785 asoc->saw_sack_with_nr_frags = 0; 4786 4787 /* JRS - Use the congestion control given in the CC module */ 4788 if (ecne_seen == 0) { 4789 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4790 if (net->net_ack2 > 0) { 4791 /* 4792 * Karn's rule applies to clearing error 4793 * count, this is optional. 4794 */ 4795 net->error_count = 0; 4796 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4797 /* addr came good */ 4798 net->dest_state |= SCTP_ADDR_REACHABLE; 4799 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4800 0, (void *)net, SCTP_SO_NOT_LOCKED); 4801 } 4802 if (net == stcb->asoc.primary_destination) { 4803 if (stcb->asoc.alternate) { 4804 /* 4805 * release the alternate, 4806 * primary is good 4807 */ 4808 sctp_free_remote_addr(stcb->asoc.alternate); 4809 stcb->asoc.alternate = NULL; 4810 } 4811 } 4812 if (net->dest_state & SCTP_ADDR_PF) { 4813 net->dest_state &= ~SCTP_ADDR_PF; 4814 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4815 stcb->sctp_ep, stcb, net, 4816 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4817 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4818 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4819 /* Done with this net */ 4820 net->net_ack = 0; 4821 } 4822 /* restore any doubled timers */ 4823 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4824 if (net->RTO < stcb->asoc.minrto) { 4825 net->RTO = stcb->asoc.minrto; 4826 } 4827 if (net->RTO > stcb->asoc.maxrto) { 4828 net->RTO = stcb->asoc.maxrto; 4829 } 4830 } 4831 } 4832 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4833 } 4834 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4835 /* nothing left in-flight */ 4836 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4837 /* stop all timers */ 4838 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4839 stcb, net, 4840 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4841 net->flight_size = 0; 4842 net->partial_bytes_acked = 0; 4843 } 4844 asoc->total_flight = 0; 4845 asoc->total_flight_count = 0; 4846 } 4847 /**********************************/ 4848 /* Now what about shutdown issues */ 4849 /**********************************/ 4850 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4851 /* nothing left on sendqueue.. consider done */ 4852 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4853 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4854 asoc->peers_rwnd, 0, 0, a_rwnd); 4855 } 4856 asoc->peers_rwnd = a_rwnd; 4857 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4858 /* SWS sender side engages */ 4859 asoc->peers_rwnd = 0; 4860 } 4861 /* clean up */ 4862 if ((asoc->stream_queue_cnt == 1) && 4863 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4864 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4865 (asoc->locked_on_sending) 4866 ) { 4867 struct sctp_stream_queue_pending *sp; 4868 4869 /* 4870 * I may be in a state where we got all across.. but 4871 * cannot write more due to a shutdown... we abort 4872 * since the user did not indicate EOR in this case. 4873 */ 4874 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4875 sctp_streamhead); 4876 if ((sp) && (sp->length == 0)) { 4877 asoc->locked_on_sending = NULL; 4878 if (sp->msg_is_complete) { 4879 asoc->stream_queue_cnt--; 4880 } else { 4881 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4882 asoc->stream_queue_cnt--; 4883 } 4884 } 4885 } 4886 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4887 (asoc->stream_queue_cnt == 0)) { 4888 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4889 /* Need to abort here */ 4890 struct mbuf *op_err; 4891 4892 abort_out_now: 4893 *abort_now = 1; 4894 /* XXX */ 4895 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4896 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 4897 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4898 return; 4899 } else { 4900 struct sctp_nets *netp; 4901 4902 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4903 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4904 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4905 } 4906 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4907 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4908 sctp_stop_timers_for_shutdown(stcb); 4909 if (asoc->alternate) { 4910 netp = asoc->alternate; 4911 } else { 4912 netp = asoc->primary_destination; 4913 } 4914 sctp_send_shutdown(stcb, netp); 4915 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4916 stcb->sctp_ep, stcb, netp); 4917 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4918 stcb->sctp_ep, stcb, netp); 4919 } 4920 return; 4921 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4922 (asoc->stream_queue_cnt == 0)) { 4923 struct sctp_nets *netp; 4924 4925 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4926 goto abort_out_now; 4927 } 4928 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4929 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4930 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4931 sctp_stop_timers_for_shutdown(stcb); 4932 if (asoc->alternate) { 4933 netp = asoc->alternate; 4934 } else { 4935 netp = asoc->primary_destination; 4936 } 4937 sctp_send_shutdown_ack(stcb, netp); 4938 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4939 stcb->sctp_ep, stcb, netp); 4940 return; 4941 } 4942 } 4943 /* 4944 * Now here we are going to recycle net_ack for a different use... 4945 * HEADS UP. 4946 */ 4947 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4948 net->net_ack = 0; 4949 } 4950 4951 /* 4952 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 4953 * to be done. Setting this_sack_lowest_newack to the cum_ack will 4954 * automatically ensure that. 4955 */ 4956 if ((asoc->sctp_cmt_on_off > 0) && 4957 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 4958 (cmt_dac_flag == 0)) { 4959 this_sack_lowest_newack = cum_ack; 4960 } 4961 if ((num_seg > 0) || (num_nr_seg > 0)) { 4962 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4963 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 4964 } 4965 /* JRS - Use the congestion control given in the CC module */ 4966 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 4967 4968 /* Now are we exiting loss recovery ? */ 4969 if (will_exit_fast_recovery) { 4970 /* Ok, we must exit fast recovery */ 4971 asoc->fast_retran_loss_recovery = 0; 4972 } 4973 if ((asoc->sat_t3_loss_recovery) && 4974 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 4975 /* end satellite t3 loss recovery */ 4976 asoc->sat_t3_loss_recovery = 0; 4977 } 4978 /* 4979 * CMT Fast recovery 4980 */ 4981 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4982 if (net->will_exit_fast_recovery) { 4983 /* Ok, we must exit fast recovery */ 4984 net->fast_retran_loss_recovery = 0; 4985 } 4986 } 4987 4988 /* Adjust and set the new rwnd value */ 4989 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4990 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4991 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 4992 } 4993 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 4994 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4995 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4996 /* SWS sender side engages */ 4997 asoc->peers_rwnd = 0; 4998 } 4999 if (asoc->peers_rwnd > old_rwnd) { 5000 win_probe_recovery = 1; 5001 } 5002 /* 5003 * Now we must setup so we have a timer up for anyone with 5004 * outstanding data. 5005 */ 5006 done_once = 0; 5007 again: 5008 j = 0; 5009 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5010 if (win_probe_recovery && (net->window_probe)) { 5011 win_probe_recovered = 1; 5012 /*- 5013 * Find first chunk that was used with 5014 * window probe and clear the event. Put 5015 * it back into the send queue as if has 5016 * not been sent. 5017 */ 5018 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5019 if (tp1->window_probe) { 5020 sctp_window_probe_recovery(stcb, asoc, tp1); 5021 break; 5022 } 5023 } 5024 } 5025 if (net->flight_size) { 5026 j++; 5027 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5028 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5029 stcb->sctp_ep, stcb, net); 5030 } 5031 if (net->window_probe) { 5032 net->window_probe = 0; 5033 } 5034 } else { 5035 if (net->window_probe) { 5036 /* 5037 * In window probes we must assure a timer 5038 * is still running there 5039 */ 5040 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5041 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5042 stcb->sctp_ep, stcb, net); 5043 5044 } 5045 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5046 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5047 stcb, net, 5048 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5049 } 5050 } 5051 } 5052 if ((j == 0) && 5053 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5054 (asoc->sent_queue_retran_cnt == 0) && 5055 (win_probe_recovered == 0) && 5056 (done_once == 0)) { 5057 /* 5058 * huh, this should not happen unless all packets are 5059 * PR-SCTP and marked to skip of course. 5060 */ 5061 if (sctp_fs_audit(asoc)) { 5062 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5063 net->flight_size = 0; 5064 } 5065 asoc->total_flight = 0; 5066 asoc->total_flight_count = 0; 5067 asoc->sent_queue_retran_cnt = 0; 5068 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5069 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5070 sctp_flight_size_increase(tp1); 5071 sctp_total_flight_increase(stcb, tp1); 5072 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5073 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5074 } 5075 } 5076 } 5077 done_once = 1; 5078 goto again; 5079 } 5080 /*********************************************/ 5081 /* Here we perform PR-SCTP procedures */ 5082 /* (section 4.2) */ 5083 /*********************************************/ 5084 /* C1. update advancedPeerAckPoint */ 5085 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5086 asoc->advanced_peer_ack_point = cum_ack; 5087 } 5088 /* C2. try to further move advancedPeerAckPoint ahead */ 5089 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5090 struct sctp_tmit_chunk *lchk; 5091 uint32_t old_adv_peer_ack_point; 5092 5093 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5094 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5095 /* C3. See if we need to send a Fwd-TSN */ 5096 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5097 /* 5098 * ISSUE with ECN, see FWD-TSN processing. 5099 */ 5100 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5101 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5102 0xee, cum_ack, asoc->advanced_peer_ack_point, 5103 old_adv_peer_ack_point); 5104 } 5105 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5106 send_forward_tsn(stcb, asoc); 5107 } else if (lchk) { 5108 /* try to FR fwd-tsn's that get lost too */ 5109 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5110 send_forward_tsn(stcb, asoc); 5111 } 5112 } 5113 } 5114 if (lchk) { 5115 /* Assure a timer is up */ 5116 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5117 stcb->sctp_ep, stcb, lchk->whoTo); 5118 } 5119 } 5120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5121 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5122 a_rwnd, 5123 stcb->asoc.peers_rwnd, 5124 stcb->asoc.total_flight, 5125 stcb->asoc.total_output_queue_size); 5126 } 5127 } 5128 5129 void 5130 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5131 { 5132 /* Copy cum-ack */ 5133 uint32_t cum_ack, a_rwnd; 5134 5135 cum_ack = ntohl(cp->cumulative_tsn_ack); 5136 /* Arrange so a_rwnd does NOT change */ 5137 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5138 5139 /* Now call the express sack handling */ 5140 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5141 } 5142 5143 static void 5144 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5145 struct sctp_stream_in *strmin) 5146 { 5147 struct sctp_queued_to_read *ctl, *nctl; 5148 struct sctp_association *asoc; 5149 uint32_t tt; 5150 int need_reasm_check = 0, old; 5151 5152 asoc = &stcb->asoc; 5153 tt = strmin->last_sequence_delivered; 5154 if (asoc->idata_supported) { 5155 old = 0; 5156 } else { 5157 old = 1; 5158 } 5159 /* 5160 * First deliver anything prior to and including the stream no that 5161 * came in. 5162 */ 5163 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) { 5164 if (SCTP_MSGID_GE(old, tt, ctl->sinfo_ssn)) { 5165 /* this is deliverable now */ 5166 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5167 if (ctl->on_strm_q) { 5168 if (ctl->on_strm_q == SCTP_ON_ORDERED) { 5169 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm); 5170 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) { 5171 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm); 5172 #ifdef INVARIANTS 5173 } else { 5174 panic("strmin: %p ctl: %p unknown %d", 5175 strmin, ctl, ctl->on_strm_q); 5176 #endif 5177 } 5178 ctl->on_strm_q = 0; 5179 } 5180 /* subtract pending on streams */ 5181 asoc->size_on_all_streams -= ctl->length; 5182 sctp_ucount_decr(asoc->cnt_on_all_streams); 5183 /* deliver it to at least the delivery-q */ 5184 if (stcb->sctp_socket) { 5185 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 5186 sctp_add_to_readq(stcb->sctp_ep, stcb, 5187 ctl, 5188 &stcb->sctp_socket->so_rcv, 5189 1, SCTP_READ_LOCK_HELD, 5190 SCTP_SO_NOT_LOCKED); 5191 } 5192 } else { 5193 /* Its a fragmented message */ 5194 if (ctl->first_frag_seen) { 5195 /* 5196 * Make it so this is next to 5197 * deliver, we restore later 5198 */ 5199 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1; 5200 need_reasm_check = 1; 5201 break; 5202 } 5203 } 5204 } else { 5205 /* no more delivery now. */ 5206 break; 5207 } 5208 } 5209 if (need_reasm_check) { 5210 int ret; 5211 5212 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin); 5213 if (SCTP_MSGID_GT(old, tt, strmin->last_sequence_delivered)) { 5214 /* Restore the next to deliver unless we are ahead */ 5215 strmin->last_sequence_delivered = tt; 5216 } 5217 if (ret == 0) { 5218 /* Left the front Partial one on */ 5219 return; 5220 } 5221 need_reasm_check = 0; 5222 } 5223 /* 5224 * now we must deliver things in queue the normal way if any are 5225 * now ready. 5226 */ 5227 tt = strmin->last_sequence_delivered + 1; 5228 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) { 5229 if (tt == ctl->sinfo_ssn) { 5230 if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5231 /* this is deliverable now */ 5232 if (ctl->on_strm_q) { 5233 if (ctl->on_strm_q == SCTP_ON_ORDERED) { 5234 TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm); 5235 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) { 5236 TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm); 5237 #ifdef INVARIANTS 5238 } else { 5239 panic("strmin: %p ctl: %p unknown %d", 5240 strmin, ctl, ctl->on_strm_q); 5241 #endif 5242 } 5243 ctl->on_strm_q = 0; 5244 } 5245 /* subtract pending on streams */ 5246 asoc->size_on_all_streams -= ctl->length; 5247 sctp_ucount_decr(asoc->cnt_on_all_streams); 5248 /* deliver it to at least the delivery-q */ 5249 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5250 if (stcb->sctp_socket) { 5251 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 5252 sctp_add_to_readq(stcb->sctp_ep, stcb, 5253 ctl, 5254 &stcb->sctp_socket->so_rcv, 1, 5255 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5256 5257 } 5258 tt = strmin->last_sequence_delivered + 1; 5259 } else { 5260 /* Its a fragmented message */ 5261 if (ctl->first_frag_seen) { 5262 /* 5263 * Make it so this is next to 5264 * deliver 5265 */ 5266 strmin->last_sequence_delivered = ctl->sinfo_ssn - 1; 5267 need_reasm_check = 1; 5268 break; 5269 } 5270 } 5271 } else { 5272 break; 5273 } 5274 } 5275 if (need_reasm_check) { 5276 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin); 5277 } 5278 } 5279 5280 static void 5281 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5282 struct sctp_association *asoc, 5283 uint16_t stream, uint32_t seq) 5284 { 5285 struct sctp_queued_to_read *control; 5286 struct sctp_stream_in *strm; 5287 struct sctp_tmit_chunk *chk, *nchk; 5288 5289 /* 5290 * For now large messages held on the stream reasm that are complete 5291 * will be tossed too. We could in theory do more work to spin 5292 * through and stop after dumping one msg aka seeing the start of a 5293 * new msg at the head, and call the delivery function... to see if 5294 * it can be delivered... But for now we just dump everything on the 5295 * queue. 5296 */ 5297 strm = &asoc->strmin[stream]; 5298 control = find_reasm_entry(strm, (uint32_t) seq, 0, 0); 5299 if (control == NULL) { 5300 /* Not found */ 5301 return; 5302 } 5303 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5304 /* Purge hanging chunks */ 5305 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5306 asoc->size_on_reasm_queue -= chk->send_size; 5307 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5308 if (chk->data) { 5309 sctp_m_freem(chk->data); 5310 chk->data = NULL; 5311 } 5312 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5313 } 5314 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5315 if (control->on_read_q == 0) { 5316 sctp_free_remote_addr(control->whoFrom); 5317 if (control->data) { 5318 sctp_m_freem(control->data); 5319 control->data = NULL; 5320 } 5321 sctp_free_a_readq(stcb, control); 5322 } 5323 } 5324 5325 5326 void 5327 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5328 struct sctp_forward_tsn_chunk *fwd, 5329 int *abort_flag, struct mbuf *m, int offset) 5330 { 5331 /* The pr-sctp fwd tsn */ 5332 /* 5333 * here we will perform all the data receiver side steps for 5334 * processing FwdTSN, as required in by pr-sctp draft: 5335 * 5336 * Assume we get FwdTSN(x): 5337 * 5338 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5339 * others we have 3) examine and update re-ordering queue on 5340 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5341 * report where we are. 5342 */ 5343 struct sctp_association *asoc; 5344 uint32_t new_cum_tsn, gap; 5345 unsigned int i, fwd_sz, m_size; 5346 uint32_t str_seq; 5347 struct sctp_stream_in *strm; 5348 struct sctp_queued_to_read *ctl, *sv; 5349 5350 asoc = &stcb->asoc; 5351 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5352 SCTPDBG(SCTP_DEBUG_INDATA1, 5353 "Bad size too small/big fwd-tsn\n"); 5354 return; 5355 } 5356 m_size = (stcb->asoc.mapping_array_size << 3); 5357 /*************************************************************/ 5358 /* 1. Here we update local cumTSN and shift the bitmap array */ 5359 /*************************************************************/ 5360 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5361 5362 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5363 /* Already got there ... */ 5364 return; 5365 } 5366 /* 5367 * now we know the new TSN is more advanced, let's find the actual 5368 * gap 5369 */ 5370 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5371 asoc->cumulative_tsn = new_cum_tsn; 5372 if (gap >= m_size) { 5373 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5374 struct mbuf *op_err; 5375 char msg[SCTP_DIAG_INFO_LEN]; 5376 5377 /* 5378 * out of range (of single byte chunks in the rwnd I 5379 * give out). This must be an attacker. 5380 */ 5381 *abort_flag = 1; 5382 snprintf(msg, sizeof(msg), 5383 "New cum ack %8.8x too high, highest TSN %8.8x", 5384 new_cum_tsn, asoc->highest_tsn_inside_map); 5385 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5386 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5387 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5388 return; 5389 } 5390 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5391 5392 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5393 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5394 asoc->highest_tsn_inside_map = new_cum_tsn; 5395 5396 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5397 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5398 5399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5400 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5401 } 5402 } else { 5403 SCTP_TCB_LOCK_ASSERT(stcb); 5404 for (i = 0; i <= gap; i++) { 5405 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5406 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5407 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5408 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5409 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5410 } 5411 } 5412 } 5413 } 5414 /*************************************************************/ 5415 /* 2. Clear up re-assembly queue */ 5416 /*************************************************************/ 5417 5418 /* This is now done as part of clearing up the stream/seq */ 5419 5420 /*******************************************************/ 5421 /* 3. Update the PR-stream re-ordering queues and fix */ 5422 /* delivery issues as needed. */ 5423 /*******************************************************/ 5424 fwd_sz -= sizeof(*fwd); 5425 if (m && fwd_sz) { 5426 /* New method. */ 5427 unsigned int num_str; 5428 uint32_t sequence; 5429 uint16_t stream; 5430 int old; 5431 struct sctp_strseq *stseq, strseqbuf; 5432 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5433 5434 offset += sizeof(*fwd); 5435 5436 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5437 if (asoc->idata_supported) { 5438 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5439 old = 0; 5440 } else { 5441 num_str = fwd_sz / sizeof(struct sctp_strseq); 5442 old = 1; 5443 } 5444 for (i = 0; i < num_str; i++) { 5445 if (asoc->idata_supported) { 5446 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5447 sizeof(struct sctp_strseq_mid), 5448 (uint8_t *) & strseqbuf_m); 5449 offset += sizeof(struct sctp_strseq_mid); 5450 if (stseq_m == NULL) { 5451 break; 5452 } 5453 stream = ntohs(stseq_m->stream); 5454 sequence = ntohl(stseq_m->msg_id); 5455 } else { 5456 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5457 sizeof(struct sctp_strseq), 5458 (uint8_t *) & strseqbuf); 5459 offset += sizeof(struct sctp_strseq); 5460 if (stseq == NULL) { 5461 break; 5462 } 5463 stream = ntohs(stseq->stream); 5464 sequence = (uint32_t) ntohs(stseq->sequence); 5465 } 5466 /* Convert */ 5467 5468 /* now process */ 5469 5470 /* 5471 * Ok we now look for the stream/seq on the read 5472 * queue where its not all delivered. If we find it 5473 * we transmute the read entry into a PDI_ABORTED. 5474 */ 5475 if (stream >= asoc->streamincnt) { 5476 /* screwed up streams, stop! */ 5477 break; 5478 } 5479 if ((asoc->str_of_pdapi == stream) && 5480 (asoc->ssn_of_pdapi == sequence)) { 5481 /* 5482 * If this is the one we were partially 5483 * delivering now then we no longer are. 5484 * Note this will change with the reassembly 5485 * re-write. 5486 */ 5487 asoc->fragmented_delivery_inprogress = 0; 5488 } 5489 strm = &asoc->strmin[stream]; 5490 sctp_flush_reassm_for_str_seq(stcb, asoc, stream, sequence); 5491 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { 5492 if ((ctl->sinfo_stream == stream) && 5493 (ctl->sinfo_ssn == sequence)) { 5494 str_seq = (stream << 16) | (0x0000ffff & sequence); 5495 ctl->pdapi_aborted = 1; 5496 sv = stcb->asoc.control_pdapi; 5497 ctl->end_added = 1; 5498 if (ctl->on_strm_q == SCTP_ON_ORDERED) { 5499 TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm); 5500 } else if (ctl->on_strm_q == SCTP_ON_UNORDERED) { 5501 TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm); 5502 #ifdef INVARIANTS 5503 } else if (ctl->on_strm_q) { 5504 panic("strm: %p ctl: %p unknown %d", 5505 strm, ctl, ctl->on_strm_q); 5506 #endif 5507 } 5508 ctl->on_strm_q = 0; 5509 stcb->asoc.control_pdapi = ctl; 5510 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5511 stcb, 5512 SCTP_PARTIAL_DELIVERY_ABORTED, 5513 (void *)&str_seq, 5514 SCTP_SO_NOT_LOCKED); 5515 stcb->asoc.control_pdapi = sv; 5516 break; 5517 } else if ((ctl->sinfo_stream == stream) && 5518 SCTP_MSGID_GT(old, ctl->sinfo_ssn, sequence)) { 5519 /* We are past our victim SSN */ 5520 break; 5521 } 5522 } 5523 if (SCTP_MSGID_GT(old, sequence, strm->last_sequence_delivered)) { 5524 /* Update the sequence number */ 5525 strm->last_sequence_delivered = sequence; 5526 } 5527 /* now kick the stream the new way */ 5528 /* sa_ignore NO_NULL_CHK */ 5529 sctp_kick_prsctp_reorder_queue(stcb, strm); 5530 } 5531 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5532 } 5533 /* 5534 * Now slide thing forward. 5535 */ 5536 sctp_slide_mapping_arrays(stcb); 5537 } 5538