1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_indata.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctp_timer.h> 47 48 49 /* 50 * NOTES: On the outbound side of things I need to check the sack timer to 51 * see if I should generate a sack into the chunk queue (if I have data to 52 * send that is and will be sending it .. for bundling. 53 * 54 * The callback in sctp_usrreq.c will get called when the socket is read from. 55 * This will cause sctp_service_queues() to get called on the top entry in 56 * the list. 57 */ 58 59 void 60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 61 { 62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 63 } 64 65 /* Calculate what the rwnd would be */ 66 uint32_t 67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 68 { 69 uint32_t calc = 0; 70 71 /* 72 * This is really set wrong with respect to a 1-2-m socket. Since 73 * the sb_cc is the count that everyone as put up. When we re-write 74 * sctp_soreceive then we will fix this so that ONLY this 75 * associations data is taken into account. 76 */ 77 if (stcb->sctp_socket == NULL) 78 return (calc); 79 80 if (stcb->asoc.sb_cc == 0 && 81 asoc->size_on_reasm_queue == 0 && 82 asoc->size_on_all_streams == 0) { 83 /* Full rwnd granted */ 84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 85 return (calc); 86 } 87 /* get actual space */ 88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 89 90 /* 91 * take out what has NOT been put on socket queue and we yet hold 92 * for putting up. 93 */ 94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue + 95 asoc->cnt_on_reasm_queue * MSIZE)); 96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams + 97 asoc->cnt_on_all_streams * MSIZE)); 98 99 if (calc == 0) { 100 /* out of space */ 101 return (calc); 102 } 103 /* what is the overhead of all these rwnd's */ 104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 105 /* 106 * If the window gets too small due to ctrl-stuff, reduce it to 1, 107 * even it is 0. SWS engaged 108 */ 109 if (calc < stcb->asoc.my_rwnd_control_len) { 110 calc = 1; 111 } 112 return (calc); 113 } 114 115 116 117 /* 118 * Build out our readq entry based on the incoming packet. 119 */ 120 struct sctp_queued_to_read * 121 sctp_build_readq_entry(struct sctp_tcb *stcb, 122 struct sctp_nets *net, 123 uint32_t tsn, uint32_t ppid, 124 uint32_t context, uint16_t stream_no, 125 uint16_t stream_seq, uint8_t flags, 126 struct mbuf *dm) 127 { 128 struct sctp_queued_to_read *read_queue_e = NULL; 129 130 sctp_alloc_a_readq(stcb, read_queue_e); 131 if (read_queue_e == NULL) { 132 goto failed_build; 133 } 134 read_queue_e->sinfo_stream = stream_no; 135 read_queue_e->sinfo_ssn = stream_seq; 136 read_queue_e->sinfo_flags = (flags << 8); 137 read_queue_e->sinfo_ppid = ppid; 138 read_queue_e->sinfo_context = context; 139 read_queue_e->sinfo_timetolive = 0; 140 read_queue_e->sinfo_tsn = tsn; 141 read_queue_e->sinfo_cumtsn = tsn; 142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 143 read_queue_e->whoFrom = net; 144 read_queue_e->length = 0; 145 atomic_add_int(&net->ref_count, 1); 146 read_queue_e->data = dm; 147 read_queue_e->spec_flags = 0; 148 read_queue_e->tail_mbuf = NULL; 149 read_queue_e->aux_data = NULL; 150 read_queue_e->stcb = stcb; 151 read_queue_e->port_from = stcb->rport; 152 read_queue_e->do_not_ref_stcb = 0; 153 read_queue_e->end_added = 0; 154 read_queue_e->some_taken = 0; 155 read_queue_e->pdapi_aborted = 0; 156 failed_build: 157 return (read_queue_e); 158 } 159 160 161 /* 162 * Build out our readq entry based on the incoming packet. 163 */ 164 static struct sctp_queued_to_read * 165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 166 struct sctp_tmit_chunk *chk) 167 { 168 struct sctp_queued_to_read *read_queue_e = NULL; 169 170 sctp_alloc_a_readq(stcb, read_queue_e); 171 if (read_queue_e == NULL) { 172 goto failed_build; 173 } 174 read_queue_e->sinfo_stream = chk->rec.data.stream_number; 175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 178 read_queue_e->sinfo_context = stcb->asoc.context; 179 read_queue_e->sinfo_timetolive = 0; 180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 183 read_queue_e->whoFrom = chk->whoTo; 184 read_queue_e->aux_data = NULL; 185 read_queue_e->length = 0; 186 atomic_add_int(&chk->whoTo->ref_count, 1); 187 read_queue_e->data = chk->data; 188 read_queue_e->tail_mbuf = NULL; 189 read_queue_e->stcb = stcb; 190 read_queue_e->port_from = stcb->rport; 191 read_queue_e->spec_flags = 0; 192 read_queue_e->do_not_ref_stcb = 0; 193 read_queue_e->end_added = 0; 194 read_queue_e->some_taken = 0; 195 read_queue_e->pdapi_aborted = 0; 196 failed_build: 197 return (read_queue_e); 198 } 199 200 201 struct mbuf * 202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 203 { 204 struct sctp_extrcvinfo *seinfo; 205 struct sctp_sndrcvinfo *outinfo; 206 struct sctp_rcvinfo *rcvinfo; 207 struct sctp_nxtinfo *nxtinfo; 208 struct cmsghdr *cmh; 209 struct mbuf *ret; 210 int len; 211 int use_extended; 212 int provide_nxt; 213 214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 217 /* user does not want any ancillary data */ 218 return (NULL); 219 } 220 len = 0; 221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 223 } 224 seinfo = (struct sctp_extrcvinfo *)sinfo; 225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 227 provide_nxt = 1; 228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 229 } else { 230 provide_nxt = 0; 231 } 232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 234 use_extended = 1; 235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 236 } else { 237 use_extended = 0; 238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 239 } 240 } else { 241 use_extended = 0; 242 } 243 244 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 245 if (ret == NULL) { 246 /* No space */ 247 return (ret); 248 } 249 SCTP_BUF_LEN(ret) = 0; 250 251 /* We need a CMSG header followed by the struct */ 252 cmh = mtod(ret, struct cmsghdr *); 253 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 254 cmh->cmsg_level = IPPROTO_SCTP; 255 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 256 cmh->cmsg_type = SCTP_RCVINFO; 257 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 258 rcvinfo->rcv_sid = sinfo->sinfo_stream; 259 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 260 rcvinfo->rcv_flags = sinfo->sinfo_flags; 261 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 262 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 263 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 264 rcvinfo->rcv_context = sinfo->sinfo_context; 265 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 266 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 268 } 269 if (provide_nxt) { 270 cmh->cmsg_level = IPPROTO_SCTP; 271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 272 cmh->cmsg_type = SCTP_NXTINFO; 273 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 274 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream; 275 nxtinfo->nxt_flags = 0; 276 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 277 nxtinfo->nxt_flags |= SCTP_UNORDERED; 278 } 279 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 280 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 281 } 282 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 283 nxtinfo->nxt_flags |= SCTP_COMPLETE; 284 } 285 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid; 286 nxtinfo->nxt_length = seinfo->sreinfo_next_length; 287 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid; 288 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 290 } 291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 292 cmh->cmsg_level = IPPROTO_SCTP; 293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 294 if (use_extended) { 295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 296 cmh->cmsg_type = SCTP_EXTRCV; 297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 299 } else { 300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 301 cmh->cmsg_type = SCTP_SNDRCV; 302 *outinfo = *sinfo; 303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 304 } 305 } 306 return (ret); 307 } 308 309 310 static void 311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 312 { 313 uint32_t gap, i, cumackp1; 314 int fnd = 0; 315 316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 317 return; 318 } 319 cumackp1 = asoc->cumulative_tsn + 1; 320 if (SCTP_TSN_GT(cumackp1, tsn)) { 321 /* 322 * this tsn is behind the cum ack and thus we don't need to 323 * worry about it being moved from one to the other. 324 */ 325 return; 326 } 327 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 328 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 329 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 330 sctp_print_mapping_array(asoc); 331 #ifdef INVARIANTS 332 panic("Things are really messed up now!!"); 333 #endif 334 } 335 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 336 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 337 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 338 asoc->highest_tsn_inside_nr_map = tsn; 339 } 340 if (tsn == asoc->highest_tsn_inside_map) { 341 /* We must back down to see what the new highest is */ 342 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 343 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 344 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 345 asoc->highest_tsn_inside_map = i; 346 fnd = 1; 347 break; 348 } 349 } 350 if (!fnd) { 351 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 352 } 353 } 354 } 355 356 357 /* 358 * We are delivering currently from the reassembly queue. We must continue to 359 * deliver until we either: 1) run out of space. 2) run out of sequential 360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 361 */ 362 static void 363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 364 { 365 struct sctp_tmit_chunk *chk, *nchk; 366 uint16_t nxt_todel; 367 uint16_t stream_no; 368 int end = 0; 369 int cntDel; 370 struct sctp_queued_to_read *control, *ctl, *nctl; 371 372 if (stcb == NULL) 373 return; 374 375 cntDel = stream_no = 0; 376 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 377 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || 378 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 379 /* socket above is long gone or going.. */ 380 abandon: 381 asoc->fragmented_delivery_inprogress = 0; 382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 383 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 384 asoc->size_on_reasm_queue -= chk->send_size; 385 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 386 /* 387 * Lose the data pointer, since its in the socket 388 * buffer 389 */ 390 if (chk->data) { 391 sctp_m_freem(chk->data); 392 chk->data = NULL; 393 } 394 /* Now free the address and data */ 395 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 396 /* sa_ignore FREED_MEMORY */ 397 } 398 return; 399 } 400 SCTP_TCB_LOCK_ASSERT(stcb); 401 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 403 /* Can't deliver more :< */ 404 return; 405 } 406 stream_no = chk->rec.data.stream_number; 407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 408 if (nxt_todel != chk->rec.data.stream_seq && 409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 410 /* 411 * Not the next sequence to deliver in its stream OR 412 * unordered 413 */ 414 return; 415 } 416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 417 418 control = sctp_build_readq_entry_chk(stcb, chk); 419 if (control == NULL) { 420 /* out of memory? */ 421 return; 422 } 423 /* save it off for our future deliveries */ 424 stcb->asoc.control_pdapi = control; 425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 426 end = 1; 427 else 428 end = 0; 429 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 430 sctp_add_to_readq(stcb->sctp_ep, 431 stcb, control, &stcb->sctp_socket->so_rcv, end, 432 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 433 cntDel++; 434 } else { 435 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 436 end = 1; 437 else 438 end = 0; 439 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 440 if (sctp_append_to_readq(stcb->sctp_ep, stcb, 441 stcb->asoc.control_pdapi, 442 chk->data, end, chk->rec.data.TSN_seq, 443 &stcb->sctp_socket->so_rcv)) { 444 /* 445 * something is very wrong, either 446 * control_pdapi is NULL, or the tail_mbuf 447 * is corrupt, or there is a EOM already on 448 * the mbuf chain. 449 */ 450 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 451 goto abandon; 452 } else { 453 #ifdef INVARIANTS 454 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 455 panic("This should not happen control_pdapi NULL?"); 456 } 457 /* if we did not panic, it was a EOM */ 458 panic("Bad chunking ??"); 459 #else 460 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 461 SCTP_PRINTF("This should not happen control_pdapi NULL?\n"); 462 } 463 SCTP_PRINTF("Bad chunking ??\n"); 464 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n"); 465 466 #endif 467 goto abandon; 468 } 469 } 470 cntDel++; 471 } 472 /* pull it we did it */ 473 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 474 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 475 asoc->fragmented_delivery_inprogress = 0; 476 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 477 asoc->strmin[stream_no].last_sequence_delivered++; 478 } 479 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 480 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 481 } 482 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 483 /* 484 * turn the flag back on since we just delivered 485 * yet another one. 486 */ 487 asoc->fragmented_delivery_inprogress = 1; 488 } 489 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 490 asoc->last_flags_delivered = chk->rec.data.rcv_flags; 491 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 492 asoc->last_strm_no_delivered = chk->rec.data.stream_number; 493 494 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 495 asoc->size_on_reasm_queue -= chk->send_size; 496 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 497 /* free up the chk */ 498 chk->data = NULL; 499 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 500 501 if (asoc->fragmented_delivery_inprogress == 0) { 502 /* 503 * Now lets see if we can deliver the next one on 504 * the stream 505 */ 506 struct sctp_stream_in *strm; 507 508 strm = &asoc->strmin[stream_no]; 509 nxt_todel = strm->last_sequence_delivered + 1; 510 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) { 511 /* Deliver more if we can. */ 512 if (nxt_todel == ctl->sinfo_ssn) { 513 TAILQ_REMOVE(&strm->inqueue, ctl, next); 514 asoc->size_on_all_streams -= ctl->length; 515 sctp_ucount_decr(asoc->cnt_on_all_streams); 516 strm->last_sequence_delivered++; 517 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 518 sctp_add_to_readq(stcb->sctp_ep, stcb, 519 ctl, 520 &stcb->sctp_socket->so_rcv, 1, 521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 522 } else { 523 break; 524 } 525 nxt_todel = strm->last_sequence_delivered + 1; 526 } 527 break; 528 } 529 } 530 } 531 532 /* 533 * Queue the chunk either right into the socket buffer if it is the next one 534 * to go OR put it in the correct place in the delivery queue. If we do 535 * append to the so_buf, keep doing so until we are out of order. One big 536 * question still remains, what to do when the socket buffer is FULL?? 537 */ 538 static void 539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 540 struct sctp_queued_to_read *control, int *abort_flag) 541 { 542 /* 543 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 544 * all the data in one stream this could happen quite rapidly. One 545 * could use the TSN to keep track of things, but this scheme breaks 546 * down in the other type of stream useage that could occur. Send a 547 * single msg to stream 0, send 4Billion messages to stream 1, now 548 * send a message to stream 0. You have a situation where the TSN 549 * has wrapped but not in the stream. Is this worth worrying about 550 * or should we just change our queue sort at the bottom to be by 551 * TSN. 552 * 553 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 554 * with TSN 1? If the peer is doing some sort of funky TSN/SSN 555 * assignment this could happen... and I don't see how this would be 556 * a violation. So for now I am undecided an will leave the sort by 557 * SSN alone. Maybe a hybred approach is the answer 558 * 559 */ 560 struct sctp_stream_in *strm; 561 struct sctp_queued_to_read *at; 562 int queue_needed; 563 uint16_t nxt_todel; 564 struct mbuf *oper; 565 566 queue_needed = 1; 567 asoc->size_on_all_streams += control->length; 568 sctp_ucount_incr(asoc->cnt_on_all_streams); 569 strm = &asoc->strmin[control->sinfo_stream]; 570 nxt_todel = strm->last_sequence_delivered + 1; 571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 572 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 573 } 574 SCTPDBG(SCTP_DEBUG_INDATA1, 575 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 576 (uint32_t) control->sinfo_stream, 577 (uint32_t) strm->last_sequence_delivered, 578 (uint32_t) nxt_todel); 579 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) { 580 /* The incoming sseq is behind where we last delivered? */ 581 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 582 control->sinfo_ssn, strm->last_sequence_delivered); 583 protocol_error: 584 /* 585 * throw it in the stream so it gets cleaned up in 586 * association destruction 587 */ 588 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 589 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 590 0, M_NOWAIT, 1, MT_DATA); 591 if (oper) { 592 struct sctp_paramhdr *ph; 593 uint32_t *ippp; 594 595 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 596 (sizeof(uint32_t) * 3); 597 ph = mtod(oper, struct sctp_paramhdr *); 598 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 599 ph->param_length = htons(SCTP_BUF_LEN(oper)); 600 ippp = (uint32_t *) (ph + 1); 601 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1); 602 ippp++; 603 *ippp = control->sinfo_tsn; 604 ippp++; 605 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 606 } 607 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 608 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 609 *abort_flag = 1; 610 return; 611 612 } 613 if (nxt_todel == control->sinfo_ssn) { 614 /* can be delivered right away? */ 615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 616 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 617 } 618 /* EY it wont be queued if it could be delivered directly */ 619 queue_needed = 0; 620 asoc->size_on_all_streams -= control->length; 621 sctp_ucount_decr(asoc->cnt_on_all_streams); 622 strm->last_sequence_delivered++; 623 624 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 625 sctp_add_to_readq(stcb->sctp_ep, stcb, 626 control, 627 &stcb->sctp_socket->so_rcv, 1, 628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 629 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) { 630 /* all delivered */ 631 nxt_todel = strm->last_sequence_delivered + 1; 632 if (nxt_todel == control->sinfo_ssn) { 633 TAILQ_REMOVE(&strm->inqueue, control, next); 634 asoc->size_on_all_streams -= control->length; 635 sctp_ucount_decr(asoc->cnt_on_all_streams); 636 strm->last_sequence_delivered++; 637 /* 638 * We ignore the return of deliver_data here 639 * since we always can hold the chunk on the 640 * d-queue. And we have a finite number that 641 * can be delivered from the strq. 642 */ 643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 644 sctp_log_strm_del(control, NULL, 645 SCTP_STR_LOG_FROM_IMMED_DEL); 646 } 647 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 648 sctp_add_to_readq(stcb->sctp_ep, stcb, 649 control, 650 &stcb->sctp_socket->so_rcv, 1, 651 SCTP_READ_LOCK_NOT_HELD, 652 SCTP_SO_NOT_LOCKED); 653 continue; 654 } 655 break; 656 } 657 } 658 if (queue_needed) { 659 /* 660 * Ok, we did not deliver this guy, find the correct place 661 * to put it on the queue. 662 */ 663 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) { 664 goto protocol_error; 665 } 666 if (TAILQ_EMPTY(&strm->inqueue)) { 667 /* Empty queue */ 668 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 669 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 670 } 671 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 672 } else { 673 TAILQ_FOREACH(at, &strm->inqueue, next) { 674 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) { 675 /* 676 * one in queue is bigger than the 677 * new one, insert before this one 678 */ 679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 680 sctp_log_strm_del(control, at, 681 SCTP_STR_LOG_FROM_INSERT_MD); 682 } 683 TAILQ_INSERT_BEFORE(at, control, next); 684 break; 685 } else if (at->sinfo_ssn == control->sinfo_ssn) { 686 /* 687 * Gak, He sent me a duplicate str 688 * seq number 689 */ 690 /* 691 * foo bar, I guess I will just free 692 * this new guy, should we abort 693 * too? FIX ME MAYBE? Or it COULD be 694 * that the SSN's have wrapped. 695 * Maybe I should compare to TSN 696 * somehow... sigh for now just blow 697 * away the chunk! 698 */ 699 700 if (control->data) 701 sctp_m_freem(control->data); 702 control->data = NULL; 703 asoc->size_on_all_streams -= control->length; 704 sctp_ucount_decr(asoc->cnt_on_all_streams); 705 if (control->whoFrom) { 706 sctp_free_remote_addr(control->whoFrom); 707 control->whoFrom = NULL; 708 } 709 sctp_free_a_readq(stcb, control); 710 return; 711 } else { 712 if (TAILQ_NEXT(at, next) == NULL) { 713 /* 714 * We are at the end, insert 715 * it after this one 716 */ 717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 718 sctp_log_strm_del(control, at, 719 SCTP_STR_LOG_FROM_INSERT_TL); 720 } 721 TAILQ_INSERT_AFTER(&strm->inqueue, 722 at, control, next); 723 break; 724 } 725 } 726 } 727 } 728 } 729 } 730 731 /* 732 * Returns two things: You get the total size of the deliverable parts of the 733 * first fragmented message on the reassembly queue. And you get a 1 back if 734 * all of the message is ready or a 0 back if the message is still incomplete 735 */ 736 static int 737 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size) 738 { 739 struct sctp_tmit_chunk *chk; 740 uint32_t tsn; 741 742 *t_size = 0; 743 chk = TAILQ_FIRST(&asoc->reasmqueue); 744 if (chk == NULL) { 745 /* nothing on the queue */ 746 return (0); 747 } 748 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 749 /* Not a first on the queue */ 750 return (0); 751 } 752 tsn = chk->rec.data.TSN_seq; 753 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) { 754 if (tsn != chk->rec.data.TSN_seq) { 755 return (0); 756 } 757 *t_size += chk->send_size; 758 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 759 return (1); 760 } 761 tsn++; 762 } 763 return (0); 764 } 765 766 static void 767 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 768 { 769 struct sctp_tmit_chunk *chk; 770 uint16_t nxt_todel; 771 uint32_t tsize, pd_point; 772 773 doit_again: 774 chk = TAILQ_FIRST(&asoc->reasmqueue); 775 if (chk == NULL) { 776 /* Huh? */ 777 asoc->size_on_reasm_queue = 0; 778 asoc->cnt_on_reasm_queue = 0; 779 return; 780 } 781 if (asoc->fragmented_delivery_inprogress == 0) { 782 nxt_todel = 783 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 784 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 785 (nxt_todel == chk->rec.data.stream_seq || 786 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 787 /* 788 * Yep the first one is here and its ok to deliver 789 * but should we? 790 */ 791 if (stcb->sctp_socket) { 792 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 793 stcb->sctp_ep->partial_delivery_point); 794 } else { 795 pd_point = stcb->sctp_ep->partial_delivery_point; 796 } 797 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 798 799 /* 800 * Yes, we setup to start reception, by 801 * backing down the TSN just in case we 802 * can't deliver. If we 803 */ 804 asoc->fragmented_delivery_inprogress = 1; 805 asoc->tsn_last_delivered = 806 chk->rec.data.TSN_seq - 1; 807 asoc->str_of_pdapi = 808 chk->rec.data.stream_number; 809 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 810 asoc->pdapi_ppid = chk->rec.data.payloadtype; 811 asoc->fragment_flags = chk->rec.data.rcv_flags; 812 sctp_service_reassembly(stcb, asoc); 813 } 814 } 815 } else { 816 /* 817 * Service re-assembly will deliver stream data queued at 818 * the end of fragmented delivery.. but it wont know to go 819 * back and call itself again... we do that here with the 820 * got doit_again 821 */ 822 sctp_service_reassembly(stcb, asoc); 823 if (asoc->fragmented_delivery_inprogress == 0) { 824 /* 825 * finished our Fragmented delivery, could be more 826 * waiting? 827 */ 828 goto doit_again; 829 } 830 } 831 } 832 833 /* 834 * Dump onto the re-assembly queue, in its proper place. After dumping on the 835 * queue, see if anthing can be delivered. If so pull it off (or as much as 836 * we can. If we run out of space then we must dump what we can and set the 837 * appropriate flag to say we queued what we could. 838 */ 839 static void 840 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 841 struct sctp_tmit_chunk *chk, int *abort_flag) 842 { 843 struct mbuf *oper; 844 uint32_t cum_ackp1, prev_tsn, post_tsn; 845 struct sctp_tmit_chunk *at, *prev, *next; 846 847 prev = next = NULL; 848 cum_ackp1 = asoc->tsn_last_delivered + 1; 849 if (TAILQ_EMPTY(&asoc->reasmqueue)) { 850 /* This is the first one on the queue */ 851 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 852 /* 853 * we do not check for delivery of anything when only one 854 * fragment is here 855 */ 856 asoc->size_on_reasm_queue = chk->send_size; 857 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 858 if (chk->rec.data.TSN_seq == cum_ackp1) { 859 if (asoc->fragmented_delivery_inprogress == 0 && 860 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 861 SCTP_DATA_FIRST_FRAG) { 862 /* 863 * An empty queue, no delivery inprogress, 864 * we hit the next one and it does NOT have 865 * a FIRST fragment mark. 866 */ 867 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 868 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 869 0, M_NOWAIT, 1, MT_DATA); 870 871 if (oper) { 872 struct sctp_paramhdr *ph; 873 uint32_t *ippp; 874 875 SCTP_BUF_LEN(oper) = 876 sizeof(struct sctp_paramhdr) + 877 (sizeof(uint32_t) * 3); 878 ph = mtod(oper, struct sctp_paramhdr *); 879 ph->param_type = 880 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 881 ph->param_length = htons(SCTP_BUF_LEN(oper)); 882 ippp = (uint32_t *) (ph + 1); 883 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2); 884 ippp++; 885 *ippp = chk->rec.data.TSN_seq; 886 ippp++; 887 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 888 889 } 890 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 891 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 892 *abort_flag = 1; 893 } else if (asoc->fragmented_delivery_inprogress && 894 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 895 /* 896 * We are doing a partial delivery and the 897 * NEXT chunk MUST be either the LAST or 898 * MIDDLE fragment NOT a FIRST 899 */ 900 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 901 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 902 0, M_NOWAIT, 1, MT_DATA); 903 if (oper) { 904 struct sctp_paramhdr *ph; 905 uint32_t *ippp; 906 907 SCTP_BUF_LEN(oper) = 908 sizeof(struct sctp_paramhdr) + 909 (3 * sizeof(uint32_t)); 910 ph = mtod(oper, struct sctp_paramhdr *); 911 ph->param_type = 912 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 913 ph->param_length = htons(SCTP_BUF_LEN(oper)); 914 ippp = (uint32_t *) (ph + 1); 915 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3); 916 ippp++; 917 *ippp = chk->rec.data.TSN_seq; 918 ippp++; 919 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 920 } 921 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 922 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 923 *abort_flag = 1; 924 } else if (asoc->fragmented_delivery_inprogress) { 925 /* 926 * Here we are ok with a MIDDLE or LAST 927 * piece 928 */ 929 if (chk->rec.data.stream_number != 930 asoc->str_of_pdapi) { 931 /* Got to be the right STR No */ 932 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 933 chk->rec.data.stream_number, 934 asoc->str_of_pdapi); 935 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 936 0, M_NOWAIT, 1, MT_DATA); 937 if (oper) { 938 struct sctp_paramhdr *ph; 939 uint32_t *ippp; 940 941 SCTP_BUF_LEN(oper) = 942 sizeof(struct sctp_paramhdr) + 943 (sizeof(uint32_t) * 3); 944 ph = mtod(oper, 945 struct sctp_paramhdr *); 946 ph->param_type = 947 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 948 ph->param_length = 949 htons(SCTP_BUF_LEN(oper)); 950 ippp = (uint32_t *) (ph + 1); 951 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 952 ippp++; 953 *ippp = chk->rec.data.TSN_seq; 954 ippp++; 955 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 956 } 957 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4; 958 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 959 *abort_flag = 1; 960 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 961 SCTP_DATA_UNORDERED && 962 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) { 963 /* Got to be the right STR Seq */ 964 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 965 chk->rec.data.stream_seq, 966 asoc->ssn_of_pdapi); 967 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 968 0, M_NOWAIT, 1, MT_DATA); 969 if (oper) { 970 struct sctp_paramhdr *ph; 971 uint32_t *ippp; 972 973 SCTP_BUF_LEN(oper) = 974 sizeof(struct sctp_paramhdr) + 975 (3 * sizeof(uint32_t)); 976 ph = mtod(oper, 977 struct sctp_paramhdr *); 978 ph->param_type = 979 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 980 ph->param_length = 981 htons(SCTP_BUF_LEN(oper)); 982 ippp = (uint32_t *) (ph + 1); 983 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 984 ippp++; 985 *ippp = chk->rec.data.TSN_seq; 986 ippp++; 987 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 988 989 } 990 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5; 991 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 992 *abort_flag = 1; 993 } 994 } 995 } 996 return; 997 } 998 /* Find its place */ 999 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1000 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) { 1001 /* 1002 * one in queue is bigger than the new one, insert 1003 * before this one 1004 */ 1005 /* A check */ 1006 asoc->size_on_reasm_queue += chk->send_size; 1007 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1008 next = at; 1009 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1010 break; 1011 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1012 /* Gak, He sent me a duplicate str seq number */ 1013 /* 1014 * foo bar, I guess I will just free this new guy, 1015 * should we abort too? FIX ME MAYBE? Or it COULD be 1016 * that the SSN's have wrapped. Maybe I should 1017 * compare to TSN somehow... sigh for now just blow 1018 * away the chunk! 1019 */ 1020 if (chk->data) { 1021 sctp_m_freem(chk->data); 1022 chk->data = NULL; 1023 } 1024 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1025 return; 1026 } else { 1027 prev = at; 1028 if (TAILQ_NEXT(at, sctp_next) == NULL) { 1029 /* 1030 * We are at the end, insert it after this 1031 * one 1032 */ 1033 /* check it first */ 1034 asoc->size_on_reasm_queue += chk->send_size; 1035 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1036 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1037 break; 1038 } 1039 } 1040 } 1041 /* Now the audits */ 1042 if (prev) { 1043 prev_tsn = chk->rec.data.TSN_seq - 1; 1044 if (prev_tsn == prev->rec.data.TSN_seq) { 1045 /* 1046 * Ok the one I am dropping onto the end is the 1047 * NEXT. A bit of valdiation here. 1048 */ 1049 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1050 SCTP_DATA_FIRST_FRAG || 1051 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1052 SCTP_DATA_MIDDLE_FRAG) { 1053 /* 1054 * Insert chk MUST be a MIDDLE or LAST 1055 * fragment 1056 */ 1057 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1058 SCTP_DATA_FIRST_FRAG) { 1059 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 1060 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 1061 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1062 0, M_NOWAIT, 1, MT_DATA); 1063 if (oper) { 1064 struct sctp_paramhdr *ph; 1065 uint32_t *ippp; 1066 1067 SCTP_BUF_LEN(oper) = 1068 sizeof(struct sctp_paramhdr) + 1069 (3 * sizeof(uint32_t)); 1070 ph = mtod(oper, 1071 struct sctp_paramhdr *); 1072 ph->param_type = 1073 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1074 ph->param_length = 1075 htons(SCTP_BUF_LEN(oper)); 1076 ippp = (uint32_t *) (ph + 1); 1077 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1078 ippp++; 1079 *ippp = chk->rec.data.TSN_seq; 1080 ippp++; 1081 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1082 1083 } 1084 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6; 1085 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1086 *abort_flag = 1; 1087 return; 1088 } 1089 if (chk->rec.data.stream_number != 1090 prev->rec.data.stream_number) { 1091 /* 1092 * Huh, need the correct STR here, 1093 * they must be the same. 1094 */ 1095 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1096 chk->rec.data.stream_number, 1097 prev->rec.data.stream_number); 1098 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1099 0, M_NOWAIT, 1, MT_DATA); 1100 if (oper) { 1101 struct sctp_paramhdr *ph; 1102 uint32_t *ippp; 1103 1104 SCTP_BUF_LEN(oper) = 1105 sizeof(struct sctp_paramhdr) + 1106 (3 * sizeof(uint32_t)); 1107 ph = mtod(oper, 1108 struct sctp_paramhdr *); 1109 ph->param_type = 1110 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1111 ph->param_length = 1112 htons(SCTP_BUF_LEN(oper)); 1113 ippp = (uint32_t *) (ph + 1); 1114 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1115 ippp++; 1116 *ippp = chk->rec.data.TSN_seq; 1117 ippp++; 1118 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1119 } 1120 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7; 1121 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1122 *abort_flag = 1; 1123 return; 1124 } 1125 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1126 chk->rec.data.stream_seq != 1127 prev->rec.data.stream_seq) { 1128 /* 1129 * Huh, need the correct STR here, 1130 * they must be the same. 1131 */ 1132 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1133 chk->rec.data.stream_seq, 1134 prev->rec.data.stream_seq); 1135 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1136 0, M_NOWAIT, 1, MT_DATA); 1137 if (oper) { 1138 struct sctp_paramhdr *ph; 1139 uint32_t *ippp; 1140 1141 SCTP_BUF_LEN(oper) = 1142 sizeof(struct sctp_paramhdr) + 1143 (3 * sizeof(uint32_t)); 1144 ph = mtod(oper, 1145 struct sctp_paramhdr *); 1146 ph->param_type = 1147 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1148 ph->param_length = 1149 htons(SCTP_BUF_LEN(oper)); 1150 ippp = (uint32_t *) (ph + 1); 1151 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1152 ippp++; 1153 *ippp = chk->rec.data.TSN_seq; 1154 ippp++; 1155 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1156 } 1157 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8; 1158 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1159 *abort_flag = 1; 1160 return; 1161 } 1162 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1163 SCTP_DATA_LAST_FRAG) { 1164 /* Insert chk MUST be a FIRST */ 1165 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1166 SCTP_DATA_FIRST_FRAG) { 1167 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1168 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1169 0, M_NOWAIT, 1, MT_DATA); 1170 if (oper) { 1171 struct sctp_paramhdr *ph; 1172 uint32_t *ippp; 1173 1174 SCTP_BUF_LEN(oper) = 1175 sizeof(struct sctp_paramhdr) + 1176 (3 * sizeof(uint32_t)); 1177 ph = mtod(oper, 1178 struct sctp_paramhdr *); 1179 ph->param_type = 1180 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1181 ph->param_length = 1182 htons(SCTP_BUF_LEN(oper)); 1183 ippp = (uint32_t *) (ph + 1); 1184 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1185 ippp++; 1186 *ippp = chk->rec.data.TSN_seq; 1187 ippp++; 1188 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1189 1190 } 1191 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9; 1192 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1193 *abort_flag = 1; 1194 return; 1195 } 1196 } 1197 } 1198 } 1199 if (next) { 1200 post_tsn = chk->rec.data.TSN_seq + 1; 1201 if (post_tsn == next->rec.data.TSN_seq) { 1202 /* 1203 * Ok the one I am inserting ahead of is my NEXT 1204 * one. A bit of valdiation here. 1205 */ 1206 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1207 /* Insert chk MUST be a last fragment */ 1208 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1209 != SCTP_DATA_LAST_FRAG) { 1210 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1211 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1212 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1213 0, M_NOWAIT, 1, MT_DATA); 1214 if (oper) { 1215 struct sctp_paramhdr *ph; 1216 uint32_t *ippp; 1217 1218 SCTP_BUF_LEN(oper) = 1219 sizeof(struct sctp_paramhdr) + 1220 (3 * sizeof(uint32_t)); 1221 ph = mtod(oper, 1222 struct sctp_paramhdr *); 1223 ph->param_type = 1224 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1225 ph->param_length = 1226 htons(SCTP_BUF_LEN(oper)); 1227 ippp = (uint32_t *) (ph + 1); 1228 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1229 ippp++; 1230 *ippp = chk->rec.data.TSN_seq; 1231 ippp++; 1232 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1233 } 1234 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10; 1235 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1236 *abort_flag = 1; 1237 return; 1238 } 1239 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1240 SCTP_DATA_MIDDLE_FRAG || 1241 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1242 SCTP_DATA_LAST_FRAG) { 1243 /* 1244 * Insert chk CAN be MIDDLE or FIRST NOT 1245 * LAST 1246 */ 1247 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1248 SCTP_DATA_LAST_FRAG) { 1249 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1250 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1251 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1252 0, M_NOWAIT, 1, MT_DATA); 1253 if (oper) { 1254 struct sctp_paramhdr *ph; 1255 uint32_t *ippp; 1256 1257 SCTP_BUF_LEN(oper) = 1258 sizeof(struct sctp_paramhdr) + 1259 (3 * sizeof(uint32_t)); 1260 ph = mtod(oper, 1261 struct sctp_paramhdr *); 1262 ph->param_type = 1263 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1264 ph->param_length = 1265 htons(SCTP_BUF_LEN(oper)); 1266 ippp = (uint32_t *) (ph + 1); 1267 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1268 ippp++; 1269 *ippp = chk->rec.data.TSN_seq; 1270 ippp++; 1271 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1272 1273 } 1274 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11; 1275 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1276 *abort_flag = 1; 1277 return; 1278 } 1279 if (chk->rec.data.stream_number != 1280 next->rec.data.stream_number) { 1281 /* 1282 * Huh, need the correct STR here, 1283 * they must be the same. 1284 */ 1285 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1286 chk->rec.data.stream_number, 1287 next->rec.data.stream_number); 1288 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1289 0, M_NOWAIT, 1, MT_DATA); 1290 if (oper) { 1291 struct sctp_paramhdr *ph; 1292 uint32_t *ippp; 1293 1294 SCTP_BUF_LEN(oper) = 1295 sizeof(struct sctp_paramhdr) + 1296 (3 * sizeof(uint32_t)); 1297 ph = mtod(oper, 1298 struct sctp_paramhdr *); 1299 ph->param_type = 1300 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1301 ph->param_length = 1302 htons(SCTP_BUF_LEN(oper)); 1303 ippp = (uint32_t *) (ph + 1); 1304 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1305 ippp++; 1306 *ippp = chk->rec.data.TSN_seq; 1307 ippp++; 1308 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1309 1310 } 1311 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12; 1312 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1313 *abort_flag = 1; 1314 return; 1315 } 1316 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1317 chk->rec.data.stream_seq != 1318 next->rec.data.stream_seq) { 1319 /* 1320 * Huh, need the correct STR here, 1321 * they must be the same. 1322 */ 1323 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1324 chk->rec.data.stream_seq, 1325 next->rec.data.stream_seq); 1326 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1327 0, M_NOWAIT, 1, MT_DATA); 1328 if (oper) { 1329 struct sctp_paramhdr *ph; 1330 uint32_t *ippp; 1331 1332 SCTP_BUF_LEN(oper) = 1333 sizeof(struct sctp_paramhdr) + 1334 (3 * sizeof(uint32_t)); 1335 ph = mtod(oper, 1336 struct sctp_paramhdr *); 1337 ph->param_type = 1338 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1339 ph->param_length = 1340 htons(SCTP_BUF_LEN(oper)); 1341 ippp = (uint32_t *) (ph + 1); 1342 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1343 ippp++; 1344 *ippp = chk->rec.data.TSN_seq; 1345 ippp++; 1346 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1347 } 1348 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13; 1349 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1350 *abort_flag = 1; 1351 return; 1352 } 1353 } 1354 } 1355 } 1356 /* Do we need to do some delivery? check */ 1357 sctp_deliver_reasm_check(stcb, asoc); 1358 } 1359 1360 /* 1361 * This is an unfortunate routine. It checks to make sure a evil guy is not 1362 * stuffing us full of bad packet fragments. A broken peer could also do this 1363 * but this is doubtful. It is to bad I must worry about evil crackers sigh 1364 * :< more cycles. 1365 */ 1366 static int 1367 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1368 uint32_t TSN_seq) 1369 { 1370 struct sctp_tmit_chunk *at; 1371 uint32_t tsn_est; 1372 1373 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1374 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) { 1375 /* is it one bigger? */ 1376 tsn_est = at->rec.data.TSN_seq + 1; 1377 if (tsn_est == TSN_seq) { 1378 /* yep. It better be a last then */ 1379 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1380 SCTP_DATA_LAST_FRAG) { 1381 /* 1382 * Ok this guy belongs next to a guy 1383 * that is NOT last, it should be a 1384 * middle/last, not a complete 1385 * chunk. 1386 */ 1387 return (1); 1388 } else { 1389 /* 1390 * This guy is ok since its a LAST 1391 * and the new chunk is a fully 1392 * self- contained one. 1393 */ 1394 return (0); 1395 } 1396 } 1397 } else if (TSN_seq == at->rec.data.TSN_seq) { 1398 /* Software error since I have a dup? */ 1399 return (1); 1400 } else { 1401 /* 1402 * Ok, 'at' is larger than new chunk but does it 1403 * need to be right before it. 1404 */ 1405 tsn_est = TSN_seq + 1; 1406 if (tsn_est == at->rec.data.TSN_seq) { 1407 /* Yep, It better be a first */ 1408 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1409 SCTP_DATA_FIRST_FRAG) { 1410 return (1); 1411 } else { 1412 return (0); 1413 } 1414 } 1415 } 1416 } 1417 return (0); 1418 } 1419 1420 1421 static int 1422 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1423 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1424 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag, 1425 int *break_flag, int last_chunk) 1426 { 1427 /* Process a data chunk */ 1428 /* struct sctp_tmit_chunk *chk; */ 1429 struct sctp_tmit_chunk *chk; 1430 uint32_t tsn, gap; 1431 struct mbuf *dmbuf; 1432 int the_len; 1433 int need_reasm_check = 0; 1434 uint16_t strmno, strmseq; 1435 struct mbuf *oper; 1436 struct sctp_queued_to_read *control; 1437 int ordered; 1438 uint32_t protocol_id; 1439 uint8_t chunk_flags; 1440 struct sctp_stream_reset_list *liste; 1441 1442 chk = NULL; 1443 tsn = ntohl(ch->dp.tsn); 1444 chunk_flags = ch->ch.chunk_flags; 1445 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1446 asoc->send_sack = 1; 1447 } 1448 protocol_id = ch->dp.protocol_id; 1449 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0); 1450 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1451 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1452 } 1453 if (stcb == NULL) { 1454 return (0); 1455 } 1456 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1457 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1458 /* It is a duplicate */ 1459 SCTP_STAT_INCR(sctps_recvdupdata); 1460 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1461 /* Record a dup for the next outbound sack */ 1462 asoc->dup_tsns[asoc->numduptsns] = tsn; 1463 asoc->numduptsns++; 1464 } 1465 asoc->send_sack = 1; 1466 return (0); 1467 } 1468 /* Calculate the number of TSN's between the base and this TSN */ 1469 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1470 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1471 /* Can't hold the bit in the mapping at max array, toss it */ 1472 return (0); 1473 } 1474 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1475 SCTP_TCB_LOCK_ASSERT(stcb); 1476 if (sctp_expand_mapping_array(asoc, gap)) { 1477 /* Can't expand, drop it */ 1478 return (0); 1479 } 1480 } 1481 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1482 *high_tsn = tsn; 1483 } 1484 /* See if we have received this one already */ 1485 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1486 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1487 SCTP_STAT_INCR(sctps_recvdupdata); 1488 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1489 /* Record a dup for the next outbound sack */ 1490 asoc->dup_tsns[asoc->numduptsns] = tsn; 1491 asoc->numduptsns++; 1492 } 1493 asoc->send_sack = 1; 1494 return (0); 1495 } 1496 /* 1497 * Check to see about the GONE flag, duplicates would cause a sack 1498 * to be sent up above 1499 */ 1500 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1501 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1502 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1503 ) { 1504 /* 1505 * wait a minute, this guy is gone, there is no longer a 1506 * receiver. Send peer an ABORT! 1507 */ 1508 struct mbuf *op_err; 1509 1510 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1511 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1512 *abort_flag = 1; 1513 return (0); 1514 } 1515 /* 1516 * Now before going further we see if there is room. If NOT then we 1517 * MAY let one through only IF this TSN is the one we are waiting 1518 * for on a partial delivery API. 1519 */ 1520 1521 /* now do the tests */ 1522 if (((asoc->cnt_on_all_streams + 1523 asoc->cnt_on_reasm_queue + 1524 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1525 (((int)asoc->my_rwnd) <= 0)) { 1526 /* 1527 * When we have NO room in the rwnd we check to make sure 1528 * the reader is doing its job... 1529 */ 1530 if (stcb->sctp_socket->so_rcv.sb_cc) { 1531 /* some to read, wake-up */ 1532 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1533 struct socket *so; 1534 1535 so = SCTP_INP_SO(stcb->sctp_ep); 1536 atomic_add_int(&stcb->asoc.refcnt, 1); 1537 SCTP_TCB_UNLOCK(stcb); 1538 SCTP_SOCKET_LOCK(so, 1); 1539 SCTP_TCB_LOCK(stcb); 1540 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1541 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1542 /* assoc was freed while we were unlocked */ 1543 SCTP_SOCKET_UNLOCK(so, 1); 1544 return (0); 1545 } 1546 #endif 1547 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1548 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1549 SCTP_SOCKET_UNLOCK(so, 1); 1550 #endif 1551 } 1552 /* now is it in the mapping array of what we have accepted? */ 1553 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1554 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1555 /* Nope not in the valid range dump it */ 1556 sctp_set_rwnd(stcb, asoc); 1557 if ((asoc->cnt_on_all_streams + 1558 asoc->cnt_on_reasm_queue + 1559 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1560 SCTP_STAT_INCR(sctps_datadropchklmt); 1561 } else { 1562 SCTP_STAT_INCR(sctps_datadroprwnd); 1563 } 1564 *break_flag = 1; 1565 return (0); 1566 } 1567 } 1568 strmno = ntohs(ch->dp.stream_id); 1569 if (strmno >= asoc->streamincnt) { 1570 struct sctp_paramhdr *phdr; 1571 struct mbuf *mb; 1572 1573 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1574 0, M_NOWAIT, 1, MT_DATA); 1575 if (mb != NULL) { 1576 /* add some space up front so prepend will work well */ 1577 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1578 phdr = mtod(mb, struct sctp_paramhdr *); 1579 /* 1580 * Error causes are just param's and this one has 1581 * two back to back phdr, one with the error type 1582 * and size, the other with the streamid and a rsvd 1583 */ 1584 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1585 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1586 phdr->param_length = 1587 htons(sizeof(struct sctp_paramhdr) * 2); 1588 phdr++; 1589 /* We insert the stream in the type field */ 1590 phdr->param_type = ch->dp.stream_id; 1591 /* And set the length to 0 for the rsvd field */ 1592 phdr->param_length = 0; 1593 sctp_queue_op_err(stcb, mb); 1594 } 1595 SCTP_STAT_INCR(sctps_badsid); 1596 SCTP_TCB_LOCK_ASSERT(stcb); 1597 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1598 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1599 asoc->highest_tsn_inside_nr_map = tsn; 1600 } 1601 if (tsn == (asoc->cumulative_tsn + 1)) { 1602 /* Update cum-ack */ 1603 asoc->cumulative_tsn = tsn; 1604 } 1605 return (0); 1606 } 1607 /* 1608 * Before we continue lets validate that we are not being fooled by 1609 * an evil attacker. We can only have 4k chunks based on our TSN 1610 * spread allowed by the mapping array 512 * 8 bits, so there is no 1611 * way our stream sequence numbers could have wrapped. We of course 1612 * only validate the FIRST fragment so the bit must be set. 1613 */ 1614 strmseq = ntohs(ch->dp.stream_sequence); 1615 #ifdef SCTP_ASOCLOG_OF_TSNS 1616 SCTP_TCB_LOCK_ASSERT(stcb); 1617 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1618 asoc->tsn_in_at = 0; 1619 asoc->tsn_in_wrapped = 1; 1620 } 1621 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1622 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1623 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1624 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1625 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1626 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1627 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1628 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1629 asoc->tsn_in_at++; 1630 #endif 1631 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1632 (TAILQ_EMPTY(&asoc->resetHead)) && 1633 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1634 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) { 1635 /* The incoming sseq is behind where we last delivered? */ 1636 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1637 strmseq, asoc->strmin[strmno].last_sequence_delivered); 1638 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1639 0, M_NOWAIT, 1, MT_DATA); 1640 if (oper) { 1641 struct sctp_paramhdr *ph; 1642 uint32_t *ippp; 1643 1644 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1645 (3 * sizeof(uint32_t)); 1646 ph = mtod(oper, struct sctp_paramhdr *); 1647 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1648 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1649 ippp = (uint32_t *) (ph + 1); 1650 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); 1651 ippp++; 1652 *ippp = tsn; 1653 ippp++; 1654 *ippp = ((strmno << 16) | strmseq); 1655 1656 } 1657 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1658 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1659 *abort_flag = 1; 1660 return (0); 1661 } 1662 /************************************ 1663 * From here down we may find ch-> invalid 1664 * so its a good idea NOT to use it. 1665 *************************************/ 1666 1667 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1668 if (last_chunk == 0) { 1669 dmbuf = SCTP_M_COPYM(*m, 1670 (offset + sizeof(struct sctp_data_chunk)), 1671 the_len, M_NOWAIT); 1672 #ifdef SCTP_MBUF_LOGGING 1673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1674 struct mbuf *mat; 1675 1676 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) { 1677 if (SCTP_BUF_IS_EXTENDED(mat)) { 1678 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1679 } 1680 } 1681 } 1682 #endif 1683 } else { 1684 /* We can steal the last chunk */ 1685 int l_len; 1686 1687 dmbuf = *m; 1688 /* lop off the top part */ 1689 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1690 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1691 l_len = SCTP_BUF_LEN(dmbuf); 1692 } else { 1693 /* 1694 * need to count up the size hopefully does not hit 1695 * this to often :-0 1696 */ 1697 struct mbuf *lat; 1698 1699 l_len = 0; 1700 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 1701 l_len += SCTP_BUF_LEN(lat); 1702 } 1703 } 1704 if (l_len > the_len) { 1705 /* Trim the end round bytes off too */ 1706 m_adj(dmbuf, -(l_len - the_len)); 1707 } 1708 } 1709 if (dmbuf == NULL) { 1710 SCTP_STAT_INCR(sctps_nomem); 1711 return (0); 1712 } 1713 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1714 asoc->fragmented_delivery_inprogress == 0 && 1715 TAILQ_EMPTY(&asoc->resetHead) && 1716 ((ordered == 0) || 1717 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1718 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1719 /* Candidate for express delivery */ 1720 /* 1721 * Its not fragmented, No PD-API is up, Nothing in the 1722 * delivery queue, Its un-ordered OR ordered and the next to 1723 * deliver AND nothing else is stuck on the stream queue, 1724 * And there is room for it in the socket buffer. Lets just 1725 * stuff it up the buffer.... 1726 */ 1727 1728 /* It would be nice to avoid this copy if we could :< */ 1729 sctp_alloc_a_readq(stcb, control); 1730 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1731 protocol_id, 1732 stcb->asoc.context, 1733 strmno, strmseq, 1734 chunk_flags, 1735 dmbuf); 1736 if (control == NULL) { 1737 goto failed_express_del; 1738 } 1739 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1740 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1741 asoc->highest_tsn_inside_nr_map = tsn; 1742 } 1743 sctp_add_to_readq(stcb->sctp_ep, stcb, 1744 control, &stcb->sctp_socket->so_rcv, 1745 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1746 1747 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1748 /* for ordered, bump what we delivered */ 1749 asoc->strmin[strmno].last_sequence_delivered++; 1750 } 1751 SCTP_STAT_INCR(sctps_recvexpress); 1752 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1753 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1754 SCTP_STR_LOG_FROM_EXPRS_DEL); 1755 } 1756 control = NULL; 1757 1758 goto finish_express_del; 1759 } 1760 failed_express_del: 1761 /* If we reach here this is a new chunk */ 1762 chk = NULL; 1763 control = NULL; 1764 /* Express for fragmented delivery? */ 1765 if ((asoc->fragmented_delivery_inprogress) && 1766 (stcb->asoc.control_pdapi) && 1767 (asoc->str_of_pdapi == strmno) && 1768 (asoc->ssn_of_pdapi == strmseq) 1769 ) { 1770 control = stcb->asoc.control_pdapi; 1771 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1772 /* Can't be another first? */ 1773 goto failed_pdapi_express_del; 1774 } 1775 if (tsn == (control->sinfo_tsn + 1)) { 1776 /* Yep, we can add it on */ 1777 int end = 0; 1778 1779 if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1780 end = 1; 1781 } 1782 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1783 tsn, 1784 &stcb->sctp_socket->so_rcv)) { 1785 SCTP_PRINTF("Append fails end:%d\n", end); 1786 goto failed_pdapi_express_del; 1787 } 1788 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1789 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1790 asoc->highest_tsn_inside_nr_map = tsn; 1791 } 1792 SCTP_STAT_INCR(sctps_recvexpressm); 1793 control->sinfo_tsn = tsn; 1794 asoc->tsn_last_delivered = tsn; 1795 asoc->fragment_flags = chunk_flags; 1796 asoc->tsn_of_pdapi_last_delivered = tsn; 1797 asoc->last_flags_delivered = chunk_flags; 1798 asoc->last_strm_seq_delivered = strmseq; 1799 asoc->last_strm_no_delivered = strmno; 1800 if (end) { 1801 /* clean up the flags and such */ 1802 asoc->fragmented_delivery_inprogress = 0; 1803 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1804 asoc->strmin[strmno].last_sequence_delivered++; 1805 } 1806 stcb->asoc.control_pdapi = NULL; 1807 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1808 /* 1809 * There could be another message 1810 * ready 1811 */ 1812 need_reasm_check = 1; 1813 } 1814 } 1815 control = NULL; 1816 goto finish_express_del; 1817 } 1818 } 1819 failed_pdapi_express_del: 1820 control = NULL; 1821 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 1822 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1823 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1824 asoc->highest_tsn_inside_nr_map = tsn; 1825 } 1826 } else { 1827 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1828 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 1829 asoc->highest_tsn_inside_map = tsn; 1830 } 1831 } 1832 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1833 sctp_alloc_a_chunk(stcb, chk); 1834 if (chk == NULL) { 1835 /* No memory so we drop the chunk */ 1836 SCTP_STAT_INCR(sctps_nomem); 1837 if (last_chunk == 0) { 1838 /* we copied it, free the copy */ 1839 sctp_m_freem(dmbuf); 1840 } 1841 return (0); 1842 } 1843 chk->rec.data.TSN_seq = tsn; 1844 chk->no_fr_allowed = 0; 1845 chk->rec.data.stream_seq = strmseq; 1846 chk->rec.data.stream_number = strmno; 1847 chk->rec.data.payloadtype = protocol_id; 1848 chk->rec.data.context = stcb->asoc.context; 1849 chk->rec.data.doing_fast_retransmit = 0; 1850 chk->rec.data.rcv_flags = chunk_flags; 1851 chk->asoc = asoc; 1852 chk->send_size = the_len; 1853 chk->whoTo = net; 1854 atomic_add_int(&net->ref_count, 1); 1855 chk->data = dmbuf; 1856 } else { 1857 sctp_alloc_a_readq(stcb, control); 1858 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1859 protocol_id, 1860 stcb->asoc.context, 1861 strmno, strmseq, 1862 chunk_flags, 1863 dmbuf); 1864 if (control == NULL) { 1865 /* No memory so we drop the chunk */ 1866 SCTP_STAT_INCR(sctps_nomem); 1867 if (last_chunk == 0) { 1868 /* we copied it, free the copy */ 1869 sctp_m_freem(dmbuf); 1870 } 1871 return (0); 1872 } 1873 control->length = the_len; 1874 } 1875 1876 /* Mark it as received */ 1877 /* Now queue it where it belongs */ 1878 if (control != NULL) { 1879 /* First a sanity check */ 1880 if (asoc->fragmented_delivery_inprogress) { 1881 /* 1882 * Ok, we have a fragmented delivery in progress if 1883 * this chunk is next to deliver OR belongs in our 1884 * view to the reassembly, the peer is evil or 1885 * broken. 1886 */ 1887 uint32_t estimate_tsn; 1888 1889 estimate_tsn = asoc->tsn_last_delivered + 1; 1890 if (TAILQ_EMPTY(&asoc->reasmqueue) && 1891 (estimate_tsn == control->sinfo_tsn)) { 1892 /* Evil/Broke peer */ 1893 sctp_m_freem(control->data); 1894 control->data = NULL; 1895 if (control->whoFrom) { 1896 sctp_free_remote_addr(control->whoFrom); 1897 control->whoFrom = NULL; 1898 } 1899 sctp_free_a_readq(stcb, control); 1900 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1901 0, M_NOWAIT, 1, MT_DATA); 1902 if (oper) { 1903 struct sctp_paramhdr *ph; 1904 uint32_t *ippp; 1905 1906 SCTP_BUF_LEN(oper) = 1907 sizeof(struct sctp_paramhdr) + 1908 (3 * sizeof(uint32_t)); 1909 ph = mtod(oper, struct sctp_paramhdr *); 1910 ph->param_type = 1911 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1912 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1913 ippp = (uint32_t *) (ph + 1); 1914 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); 1915 ippp++; 1916 *ippp = tsn; 1917 ippp++; 1918 *ippp = ((strmno << 16) | strmseq); 1919 } 1920 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1921 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1922 *abort_flag = 1; 1923 return (0); 1924 } else { 1925 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1926 sctp_m_freem(control->data); 1927 control->data = NULL; 1928 if (control->whoFrom) { 1929 sctp_free_remote_addr(control->whoFrom); 1930 control->whoFrom = NULL; 1931 } 1932 sctp_free_a_readq(stcb, control); 1933 1934 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1935 0, M_NOWAIT, 1, MT_DATA); 1936 if (oper) { 1937 struct sctp_paramhdr *ph; 1938 uint32_t *ippp; 1939 1940 SCTP_BUF_LEN(oper) = 1941 sizeof(struct sctp_paramhdr) + 1942 (3 * sizeof(uint32_t)); 1943 ph = mtod(oper, 1944 struct sctp_paramhdr *); 1945 ph->param_type = 1946 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1947 ph->param_length = 1948 htons(SCTP_BUF_LEN(oper)); 1949 ippp = (uint32_t *) (ph + 1); 1950 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16); 1951 ippp++; 1952 *ippp = tsn; 1953 ippp++; 1954 *ippp = ((strmno << 16) | strmseq); 1955 } 1956 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 1957 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1958 *abort_flag = 1; 1959 return (0); 1960 } 1961 } 1962 } else { 1963 /* No PDAPI running */ 1964 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1965 /* 1966 * Reassembly queue is NOT empty validate 1967 * that this tsn does not need to be in 1968 * reasembly queue. If it does then our peer 1969 * is broken or evil. 1970 */ 1971 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1972 sctp_m_freem(control->data); 1973 control->data = NULL; 1974 if (control->whoFrom) { 1975 sctp_free_remote_addr(control->whoFrom); 1976 control->whoFrom = NULL; 1977 } 1978 sctp_free_a_readq(stcb, control); 1979 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1980 0, M_NOWAIT, 1, MT_DATA); 1981 if (oper) { 1982 struct sctp_paramhdr *ph; 1983 uint32_t *ippp; 1984 1985 SCTP_BUF_LEN(oper) = 1986 sizeof(struct sctp_paramhdr) + 1987 (3 * sizeof(uint32_t)); 1988 ph = mtod(oper, 1989 struct sctp_paramhdr *); 1990 ph->param_type = 1991 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1992 ph->param_length = 1993 htons(SCTP_BUF_LEN(oper)); 1994 ippp = (uint32_t *) (ph + 1); 1995 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 1996 ippp++; 1997 *ippp = tsn; 1998 ippp++; 1999 *ippp = ((strmno << 16) | strmseq); 2000 } 2001 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; 2002 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 2003 *abort_flag = 1; 2004 return (0); 2005 } 2006 } 2007 } 2008 /* ok, if we reach here we have passed the sanity checks */ 2009 if (chunk_flags & SCTP_DATA_UNORDERED) { 2010 /* queue directly into socket buffer */ 2011 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2012 sctp_add_to_readq(stcb->sctp_ep, stcb, 2013 control, 2014 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2015 } else { 2016 /* 2017 * Special check for when streams are resetting. We 2018 * could be more smart about this and check the 2019 * actual stream to see if it is not being reset.. 2020 * that way we would not create a HOLB when amongst 2021 * streams being reset and those not being reset. 2022 * 2023 * We take complete messages that have a stream reset 2024 * intervening (aka the TSN is after where our 2025 * cum-ack needs to be) off and put them on a 2026 * pending_reply_queue. The reassembly ones we do 2027 * not have to worry about since they are all sorted 2028 * and proceessed by TSN order. It is only the 2029 * singletons I must worry about. 2030 */ 2031 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2032 SCTP_TSN_GT(tsn, liste->tsn)) { 2033 /* 2034 * yep its past where we need to reset... go 2035 * ahead and queue it. 2036 */ 2037 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2038 /* first one on */ 2039 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2040 } else { 2041 struct sctp_queued_to_read *ctlOn, 2042 *nctlOn; 2043 unsigned char inserted = 0; 2044 2045 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) { 2046 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) { 2047 continue; 2048 } else { 2049 /* found it */ 2050 TAILQ_INSERT_BEFORE(ctlOn, control, next); 2051 inserted = 1; 2052 break; 2053 } 2054 } 2055 if (inserted == 0) { 2056 /* 2057 * must be put at end, use 2058 * prevP (all setup from 2059 * loop) to setup nextP. 2060 */ 2061 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2062 } 2063 } 2064 } else { 2065 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 2066 if (*abort_flag) { 2067 return (0); 2068 } 2069 } 2070 } 2071 } else { 2072 /* Into the re-assembly queue */ 2073 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2074 if (*abort_flag) { 2075 /* 2076 * the assoc is now gone and chk was put onto the 2077 * reasm queue, which has all been freed. 2078 */ 2079 *m = NULL; 2080 return (0); 2081 } 2082 } 2083 finish_express_del: 2084 if (tsn == (asoc->cumulative_tsn + 1)) { 2085 /* Update cum-ack */ 2086 asoc->cumulative_tsn = tsn; 2087 } 2088 if (last_chunk) { 2089 *m = NULL; 2090 } 2091 if (ordered) { 2092 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2093 } else { 2094 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2095 } 2096 SCTP_STAT_INCR(sctps_recvdata); 2097 /* Set it present please */ 2098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2099 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 2100 } 2101 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2102 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2103 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2104 } 2105 /* check the special flag for stream resets */ 2106 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2107 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2108 /* 2109 * we have finished working through the backlogged TSN's now 2110 * time to reset streams. 1: call reset function. 2: free 2111 * pending_reply space 3: distribute any chunks in 2112 * pending_reply_queue. 2113 */ 2114 struct sctp_queued_to_read *ctl, *nctl; 2115 2116 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2117 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2118 SCTP_FREE(liste, SCTP_M_STRESET); 2119 /* sa_ignore FREED_MEMORY */ 2120 liste = TAILQ_FIRST(&asoc->resetHead); 2121 if (TAILQ_EMPTY(&asoc->resetHead)) { 2122 /* All can be removed */ 2123 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 2124 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2125 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2126 if (*abort_flag) { 2127 return (0); 2128 } 2129 } 2130 } else { 2131 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 2132 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) { 2133 break; 2134 } 2135 /* 2136 * if ctl->sinfo_tsn is <= liste->tsn we can 2137 * process it which is the NOT of 2138 * ctl->sinfo_tsn > liste->tsn 2139 */ 2140 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 2141 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 2142 if (*abort_flag) { 2143 return (0); 2144 } 2145 } 2146 } 2147 /* 2148 * Now service re-assembly to pick up anything that has been 2149 * held on reassembly queue? 2150 */ 2151 sctp_deliver_reasm_check(stcb, asoc); 2152 need_reasm_check = 0; 2153 } 2154 if (need_reasm_check) { 2155 /* Another one waits ? */ 2156 sctp_deliver_reasm_check(stcb, asoc); 2157 } 2158 return (1); 2159 } 2160 2161 int8_t sctp_map_lookup_tab[256] = { 2162 0, 1, 0, 2, 0, 1, 0, 3, 2163 0, 1, 0, 2, 0, 1, 0, 4, 2164 0, 1, 0, 2, 0, 1, 0, 3, 2165 0, 1, 0, 2, 0, 1, 0, 5, 2166 0, 1, 0, 2, 0, 1, 0, 3, 2167 0, 1, 0, 2, 0, 1, 0, 4, 2168 0, 1, 0, 2, 0, 1, 0, 3, 2169 0, 1, 0, 2, 0, 1, 0, 6, 2170 0, 1, 0, 2, 0, 1, 0, 3, 2171 0, 1, 0, 2, 0, 1, 0, 4, 2172 0, 1, 0, 2, 0, 1, 0, 3, 2173 0, 1, 0, 2, 0, 1, 0, 5, 2174 0, 1, 0, 2, 0, 1, 0, 3, 2175 0, 1, 0, 2, 0, 1, 0, 4, 2176 0, 1, 0, 2, 0, 1, 0, 3, 2177 0, 1, 0, 2, 0, 1, 0, 7, 2178 0, 1, 0, 2, 0, 1, 0, 3, 2179 0, 1, 0, 2, 0, 1, 0, 4, 2180 0, 1, 0, 2, 0, 1, 0, 3, 2181 0, 1, 0, 2, 0, 1, 0, 5, 2182 0, 1, 0, 2, 0, 1, 0, 3, 2183 0, 1, 0, 2, 0, 1, 0, 4, 2184 0, 1, 0, 2, 0, 1, 0, 3, 2185 0, 1, 0, 2, 0, 1, 0, 6, 2186 0, 1, 0, 2, 0, 1, 0, 3, 2187 0, 1, 0, 2, 0, 1, 0, 4, 2188 0, 1, 0, 2, 0, 1, 0, 3, 2189 0, 1, 0, 2, 0, 1, 0, 5, 2190 0, 1, 0, 2, 0, 1, 0, 3, 2191 0, 1, 0, 2, 0, 1, 0, 4, 2192 0, 1, 0, 2, 0, 1, 0, 3, 2193 0, 1, 0, 2, 0, 1, 0, 8 2194 }; 2195 2196 2197 void 2198 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2199 { 2200 /* 2201 * Now we also need to check the mapping array in a couple of ways. 2202 * 1) Did we move the cum-ack point? 2203 * 2204 * When you first glance at this you might think that all entries that 2205 * make up the postion of the cum-ack would be in the nr-mapping 2206 * array only.. i.e. things up to the cum-ack are always 2207 * deliverable. Thats true with one exception, when its a fragmented 2208 * message we may not deliver the data until some threshold (or all 2209 * of it) is in place. So we must OR the nr_mapping_array and 2210 * mapping_array to get a true picture of the cum-ack. 2211 */ 2212 struct sctp_association *asoc; 2213 int at; 2214 uint8_t val; 2215 int slide_from, slide_end, lgap, distance; 2216 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2217 2218 asoc = &stcb->asoc; 2219 2220 old_cumack = asoc->cumulative_tsn; 2221 old_base = asoc->mapping_array_base_tsn; 2222 old_highest = asoc->highest_tsn_inside_map; 2223 /* 2224 * We could probably improve this a small bit by calculating the 2225 * offset of the current cum-ack as the starting point. 2226 */ 2227 at = 0; 2228 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2229 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2230 if (val == 0xff) { 2231 at += 8; 2232 } else { 2233 /* there is a 0 bit */ 2234 at += sctp_map_lookup_tab[val]; 2235 break; 2236 } 2237 } 2238 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2239 2240 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2241 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2242 #ifdef INVARIANTS 2243 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2244 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2245 #else 2246 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2247 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2248 sctp_print_mapping_array(asoc); 2249 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2250 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2251 } 2252 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2253 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2254 #endif 2255 } 2256 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2257 highest_tsn = asoc->highest_tsn_inside_nr_map; 2258 } else { 2259 highest_tsn = asoc->highest_tsn_inside_map; 2260 } 2261 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2262 /* The complete array was completed by a single FR */ 2263 /* highest becomes the cum-ack */ 2264 int clr; 2265 2266 #ifdef INVARIANTS 2267 unsigned int i; 2268 2269 #endif 2270 2271 /* clear the array */ 2272 clr = ((at + 7) >> 3); 2273 if (clr > asoc->mapping_array_size) { 2274 clr = asoc->mapping_array_size; 2275 } 2276 memset(asoc->mapping_array, 0, clr); 2277 memset(asoc->nr_mapping_array, 0, clr); 2278 #ifdef INVARIANTS 2279 for (i = 0; i < asoc->mapping_array_size; i++) { 2280 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2281 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2282 sctp_print_mapping_array(asoc); 2283 } 2284 } 2285 #endif 2286 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2287 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2288 } else if (at >= 8) { 2289 /* we can slide the mapping array down */ 2290 /* slide_from holds where we hit the first NON 0xff byte */ 2291 2292 /* 2293 * now calculate the ceiling of the move using our highest 2294 * TSN value 2295 */ 2296 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2297 slide_end = (lgap >> 3); 2298 if (slide_end < slide_from) { 2299 sctp_print_mapping_array(asoc); 2300 #ifdef INVARIANTS 2301 panic("impossible slide"); 2302 #else 2303 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n", 2304 lgap, slide_end, slide_from, at); 2305 return; 2306 #endif 2307 } 2308 if (slide_end > asoc->mapping_array_size) { 2309 #ifdef INVARIANTS 2310 panic("would overrun buffer"); 2311 #else 2312 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n", 2313 asoc->mapping_array_size, slide_end); 2314 slide_end = asoc->mapping_array_size; 2315 #endif 2316 } 2317 distance = (slide_end - slide_from) + 1; 2318 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2319 sctp_log_map(old_base, old_cumack, old_highest, 2320 SCTP_MAP_PREPARE_SLIDE); 2321 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 2322 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 2323 } 2324 if (distance + slide_from > asoc->mapping_array_size || 2325 distance < 0) { 2326 /* 2327 * Here we do NOT slide forward the array so that 2328 * hopefully when more data comes in to fill it up 2329 * we will be able to slide it forward. Really I 2330 * don't think this should happen :-0 2331 */ 2332 2333 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2334 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 2335 (uint32_t) asoc->mapping_array_size, 2336 SCTP_MAP_SLIDE_NONE); 2337 } 2338 } else { 2339 int ii; 2340 2341 for (ii = 0; ii < distance; ii++) { 2342 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2343 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2344 2345 } 2346 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2347 asoc->mapping_array[ii] = 0; 2348 asoc->nr_mapping_array[ii] = 0; 2349 } 2350 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2351 asoc->highest_tsn_inside_map += (slide_from << 3); 2352 } 2353 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2354 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2355 } 2356 asoc->mapping_array_base_tsn += (slide_from << 3); 2357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2358 sctp_log_map(asoc->mapping_array_base_tsn, 2359 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2360 SCTP_MAP_SLIDE_RESULT); 2361 } 2362 } 2363 } 2364 } 2365 2366 void 2367 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2368 { 2369 struct sctp_association *asoc; 2370 uint32_t highest_tsn; 2371 2372 asoc = &stcb->asoc; 2373 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2374 highest_tsn = asoc->highest_tsn_inside_nr_map; 2375 } else { 2376 highest_tsn = asoc->highest_tsn_inside_map; 2377 } 2378 2379 /* 2380 * Now we need to see if we need to queue a sack or just start the 2381 * timer (if allowed). 2382 */ 2383 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2384 /* 2385 * Ok special case, in SHUTDOWN-SENT case. here we maker 2386 * sure SACK timer is off and instead send a SHUTDOWN and a 2387 * SACK 2388 */ 2389 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2390 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2391 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18); 2392 } 2393 sctp_send_shutdown(stcb, 2394 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2395 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2396 } else { 2397 int is_a_gap; 2398 2399 /* is there a gap now ? */ 2400 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2401 2402 /* 2403 * CMT DAC algorithm: increase number of packets received 2404 * since last ack 2405 */ 2406 stcb->asoc.cmt_dac_pkts_rcvd++; 2407 2408 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2409 * SACK */ 2410 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2411 * longer is one */ 2412 (stcb->asoc.numduptsns) || /* we have dup's */ 2413 (is_a_gap) || /* is still a gap */ 2414 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2415 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2416 ) { 2417 2418 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2419 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2420 (stcb->asoc.send_sack == 0) && 2421 (stcb->asoc.numduptsns == 0) && 2422 (stcb->asoc.delayed_ack) && 2423 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2424 2425 /* 2426 * CMT DAC algorithm: With CMT, delay acks 2427 * even in the face of 2428 * 2429 * reordering. Therefore, if acks that do not 2430 * have to be sent because of the above 2431 * reasons, will be delayed. That is, acks 2432 * that would have been sent due to gap 2433 * reports will be delayed with DAC. Start 2434 * the delayed ack timer. 2435 */ 2436 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2437 stcb->sctp_ep, stcb, NULL); 2438 } else { 2439 /* 2440 * Ok we must build a SACK since the timer 2441 * is pending, we got our first packet OR 2442 * there are gaps or duplicates. 2443 */ 2444 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2445 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2446 } 2447 } else { 2448 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2449 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2450 stcb->sctp_ep, stcb, NULL); 2451 } 2452 } 2453 } 2454 } 2455 2456 void 2457 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 2458 { 2459 struct sctp_tmit_chunk *chk; 2460 uint32_t tsize, pd_point; 2461 uint16_t nxt_todel; 2462 2463 if (asoc->fragmented_delivery_inprogress) { 2464 sctp_service_reassembly(stcb, asoc); 2465 } 2466 /* Can we proceed further, i.e. the PD-API is complete */ 2467 if (asoc->fragmented_delivery_inprogress) { 2468 /* no */ 2469 return; 2470 } 2471 /* 2472 * Now is there some other chunk I can deliver from the reassembly 2473 * queue. 2474 */ 2475 doit_again: 2476 chk = TAILQ_FIRST(&asoc->reasmqueue); 2477 if (chk == NULL) { 2478 asoc->size_on_reasm_queue = 0; 2479 asoc->cnt_on_reasm_queue = 0; 2480 return; 2481 } 2482 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2483 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2484 ((nxt_todel == chk->rec.data.stream_seq) || 2485 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2486 /* 2487 * Yep the first one is here. We setup to start reception, 2488 * by backing down the TSN just in case we can't deliver. 2489 */ 2490 2491 /* 2492 * Before we start though either all of the message should 2493 * be here or the socket buffer max or nothing on the 2494 * delivery queue and something can be delivered. 2495 */ 2496 if (stcb->sctp_socket) { 2497 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), 2498 stcb->sctp_ep->partial_delivery_point); 2499 } else { 2500 pd_point = stcb->sctp_ep->partial_delivery_point; 2501 } 2502 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 2503 asoc->fragmented_delivery_inprogress = 1; 2504 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 2505 asoc->str_of_pdapi = chk->rec.data.stream_number; 2506 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2507 asoc->pdapi_ppid = chk->rec.data.payloadtype; 2508 asoc->fragment_flags = chk->rec.data.rcv_flags; 2509 sctp_service_reassembly(stcb, asoc); 2510 if (asoc->fragmented_delivery_inprogress == 0) { 2511 goto doit_again; 2512 } 2513 } 2514 } 2515 } 2516 2517 int 2518 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2519 struct sockaddr *src, struct sockaddr *dst, 2520 struct sctphdr *sh, struct sctp_inpcb *inp, 2521 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn, 2522 uint8_t use_mflowid, uint32_t mflowid, 2523 uint32_t vrf_id, uint16_t port) 2524 { 2525 struct sctp_data_chunk *ch, chunk_buf; 2526 struct sctp_association *asoc; 2527 int num_chunks = 0; /* number of control chunks processed */ 2528 int stop_proc = 0; 2529 int chk_length, break_flag, last_chunk; 2530 int abort_flag = 0, was_a_gap; 2531 struct mbuf *m; 2532 uint32_t highest_tsn; 2533 2534 /* set the rwnd */ 2535 sctp_set_rwnd(stcb, &stcb->asoc); 2536 2537 m = *mm; 2538 SCTP_TCB_LOCK_ASSERT(stcb); 2539 asoc = &stcb->asoc; 2540 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2541 highest_tsn = asoc->highest_tsn_inside_nr_map; 2542 } else { 2543 highest_tsn = asoc->highest_tsn_inside_map; 2544 } 2545 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2546 /* 2547 * setup where we got the last DATA packet from for any SACK that 2548 * may need to go out. Don't bump the net. This is done ONLY when a 2549 * chunk is assigned. 2550 */ 2551 asoc->last_data_chunk_from = net; 2552 2553 /*- 2554 * Now before we proceed we must figure out if this is a wasted 2555 * cluster... i.e. it is a small packet sent in and yet the driver 2556 * underneath allocated a full cluster for it. If so we must copy it 2557 * to a smaller mbuf and free up the cluster mbuf. This will help 2558 * with cluster starvation. Note for __Panda__ we don't do this 2559 * since it has clusters all the way down to 64 bytes. 2560 */ 2561 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2562 /* we only handle mbufs that are singletons.. not chains */ 2563 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2564 if (m) { 2565 /* ok lets see if we can copy the data up */ 2566 caddr_t *from, *to; 2567 2568 /* get the pointers and copy */ 2569 to = mtod(m, caddr_t *); 2570 from = mtod((*mm), caddr_t *); 2571 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2572 /* copy the length and free up the old */ 2573 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2574 sctp_m_freem(*mm); 2575 /* sucess, back copy */ 2576 *mm = m; 2577 } else { 2578 /* We are in trouble in the mbuf world .. yikes */ 2579 m = *mm; 2580 } 2581 } 2582 /* get pointer to the first chunk header */ 2583 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2584 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2585 if (ch == NULL) { 2586 return (1); 2587 } 2588 /* 2589 * process all DATA chunks... 2590 */ 2591 *high_tsn = asoc->cumulative_tsn; 2592 break_flag = 0; 2593 asoc->data_pkts_seen++; 2594 while (stop_proc == 0) { 2595 /* validate chunk length */ 2596 chk_length = ntohs(ch->ch.chunk_length); 2597 if (length - *offset < chk_length) { 2598 /* all done, mutulated chunk */ 2599 stop_proc = 1; 2600 continue; 2601 } 2602 if (ch->ch.chunk_type == SCTP_DATA) { 2603 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 2604 /* 2605 * Need to send an abort since we had a 2606 * invalid data chunk. 2607 */ 2608 struct mbuf *op_err; 2609 2610 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 2611 0, M_NOWAIT, 1, MT_DATA); 2612 2613 if (op_err) { 2614 struct sctp_paramhdr *ph; 2615 uint32_t *ippp; 2616 2617 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 2618 (2 * sizeof(uint32_t)); 2619 ph = mtod(op_err, struct sctp_paramhdr *); 2620 ph->param_type = 2621 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2622 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2623 ippp = (uint32_t *) (ph + 1); 2624 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); 2625 ippp++; 2626 *ippp = asoc->cumulative_tsn; 2627 2628 } 2629 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2630 sctp_abort_association(inp, stcb, m, iphlen, 2631 src, dst, sh, op_err, 2632 use_mflowid, mflowid, 2633 vrf_id, port); 2634 return (2); 2635 } 2636 #ifdef SCTP_AUDITING_ENABLED 2637 sctp_audit_log(0xB1, 0); 2638 #endif 2639 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2640 last_chunk = 1; 2641 } else { 2642 last_chunk = 0; 2643 } 2644 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2645 chk_length, net, high_tsn, &abort_flag, &break_flag, 2646 last_chunk)) { 2647 num_chunks++; 2648 } 2649 if (abort_flag) 2650 return (2); 2651 2652 if (break_flag) { 2653 /* 2654 * Set because of out of rwnd space and no 2655 * drop rep space left. 2656 */ 2657 stop_proc = 1; 2658 continue; 2659 } 2660 } else { 2661 /* not a data chunk in the data region */ 2662 switch (ch->ch.chunk_type) { 2663 case SCTP_INITIATION: 2664 case SCTP_INITIATION_ACK: 2665 case SCTP_SELECTIVE_ACK: 2666 case SCTP_NR_SELECTIVE_ACK: 2667 case SCTP_HEARTBEAT_REQUEST: 2668 case SCTP_HEARTBEAT_ACK: 2669 case SCTP_ABORT_ASSOCIATION: 2670 case SCTP_SHUTDOWN: 2671 case SCTP_SHUTDOWN_ACK: 2672 case SCTP_OPERATION_ERROR: 2673 case SCTP_COOKIE_ECHO: 2674 case SCTP_COOKIE_ACK: 2675 case SCTP_ECN_ECHO: 2676 case SCTP_ECN_CWR: 2677 case SCTP_SHUTDOWN_COMPLETE: 2678 case SCTP_AUTHENTICATION: 2679 case SCTP_ASCONF_ACK: 2680 case SCTP_PACKET_DROPPED: 2681 case SCTP_STREAM_RESET: 2682 case SCTP_FORWARD_CUM_TSN: 2683 case SCTP_ASCONF: 2684 /* 2685 * Now, what do we do with KNOWN chunks that 2686 * are NOT in the right place? 2687 * 2688 * For now, I do nothing but ignore them. We 2689 * may later want to add sysctl stuff to 2690 * switch out and do either an ABORT() or 2691 * possibly process them. 2692 */ 2693 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) { 2694 struct mbuf *op_err; 2695 2696 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 2697 sctp_abort_association(inp, stcb, 2698 m, iphlen, 2699 src, dst, 2700 sh, op_err, 2701 use_mflowid, mflowid, 2702 vrf_id, port); 2703 return (2); 2704 } 2705 break; 2706 default: 2707 /* unknown chunk type, use bit rules */ 2708 if (ch->ch.chunk_type & 0x40) { 2709 /* Add a error report to the queue */ 2710 struct mbuf *merr; 2711 struct sctp_paramhdr *phd; 2712 2713 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA); 2714 if (merr) { 2715 phd = mtod(merr, struct sctp_paramhdr *); 2716 /* 2717 * We cheat and use param 2718 * type since we did not 2719 * bother to define a error 2720 * cause struct. They are 2721 * the same basic format 2722 * with different names. 2723 */ 2724 phd->param_type = 2725 htons(SCTP_CAUSE_UNRECOG_CHUNK); 2726 phd->param_length = 2727 htons(chk_length + sizeof(*phd)); 2728 SCTP_BUF_LEN(merr) = sizeof(*phd); 2729 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2730 if (SCTP_BUF_NEXT(merr)) { 2731 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) { 2732 sctp_m_freem(merr); 2733 } else { 2734 sctp_queue_op_err(stcb, merr); 2735 } 2736 } else { 2737 sctp_m_freem(merr); 2738 } 2739 } 2740 } 2741 if ((ch->ch.chunk_type & 0x80) == 0) { 2742 /* discard the rest of this packet */ 2743 stop_proc = 1; 2744 } /* else skip this bad chunk and 2745 * continue... */ 2746 break; 2747 } /* switch of chunk type */ 2748 } 2749 *offset += SCTP_SIZE32(chk_length); 2750 if ((*offset >= length) || stop_proc) { 2751 /* no more data left in the mbuf chain */ 2752 stop_proc = 1; 2753 continue; 2754 } 2755 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2756 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 2757 if (ch == NULL) { 2758 *offset = length; 2759 stop_proc = 1; 2760 continue; 2761 } 2762 } 2763 if (break_flag) { 2764 /* 2765 * we need to report rwnd overrun drops. 2766 */ 2767 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2768 } 2769 if (num_chunks) { 2770 /* 2771 * Did we get data, if so update the time for auto-close and 2772 * give peer credit for being alive. 2773 */ 2774 SCTP_STAT_INCR(sctps_recvpktwithdata); 2775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2776 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2777 stcb->asoc.overall_error_count, 2778 0, 2779 SCTP_FROM_SCTP_INDATA, 2780 __LINE__); 2781 } 2782 stcb->asoc.overall_error_count = 0; 2783 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2784 } 2785 /* now service all of the reassm queue if needed */ 2786 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 2787 sctp_service_queues(stcb, asoc); 2788 2789 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2790 /* Assure that we ack right away */ 2791 stcb->asoc.send_sack = 1; 2792 } 2793 /* Start a sack timer or QUEUE a SACK for sending */ 2794 sctp_sack_check(stcb, was_a_gap); 2795 return (0); 2796 } 2797 2798 static int 2799 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2800 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2801 int *num_frs, 2802 uint32_t * biggest_newly_acked_tsn, 2803 uint32_t * this_sack_lowest_newack, 2804 int *rto_ok) 2805 { 2806 struct sctp_tmit_chunk *tp1; 2807 unsigned int theTSN; 2808 int j, wake_him = 0, circled = 0; 2809 2810 /* Recover the tp1 we last saw */ 2811 tp1 = *p_tp1; 2812 if (tp1 == NULL) { 2813 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2814 } 2815 for (j = frag_strt; j <= frag_end; j++) { 2816 theTSN = j + last_tsn; 2817 while (tp1) { 2818 if (tp1->rec.data.doing_fast_retransmit) 2819 (*num_frs) += 1; 2820 2821 /*- 2822 * CMT: CUCv2 algorithm. For each TSN being 2823 * processed from the sent queue, track the 2824 * next expected pseudo-cumack, or 2825 * rtx_pseudo_cumack, if required. Separate 2826 * cumack trackers for first transmissions, 2827 * and retransmissions. 2828 */ 2829 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2830 (tp1->snd_count == 1)) { 2831 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 2832 tp1->whoTo->find_pseudo_cumack = 0; 2833 } 2834 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 2835 (tp1->snd_count > 1)) { 2836 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 2837 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2838 } 2839 if (tp1->rec.data.TSN_seq == theTSN) { 2840 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2841 /*- 2842 * must be held until 2843 * cum-ack passes 2844 */ 2845 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2846 /*- 2847 * If it is less than RESEND, it is 2848 * now no-longer in flight. 2849 * Higher values may already be set 2850 * via previous Gap Ack Blocks... 2851 * i.e. ACKED or RESEND. 2852 */ 2853 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2854 *biggest_newly_acked_tsn)) { 2855 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 2856 } 2857 /*- 2858 * CMT: SFR algo (and HTNA) - set 2859 * saw_newack to 1 for dest being 2860 * newly acked. update 2861 * this_sack_highest_newack if 2862 * appropriate. 2863 */ 2864 if (tp1->rec.data.chunk_was_revoked == 0) 2865 tp1->whoTo->saw_newack = 1; 2866 2867 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2868 tp1->whoTo->this_sack_highest_newack)) { 2869 tp1->whoTo->this_sack_highest_newack = 2870 tp1->rec.data.TSN_seq; 2871 } 2872 /*- 2873 * CMT DAC algo: also update 2874 * this_sack_lowest_newack 2875 */ 2876 if (*this_sack_lowest_newack == 0) { 2877 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2878 sctp_log_sack(*this_sack_lowest_newack, 2879 last_tsn, 2880 tp1->rec.data.TSN_seq, 2881 0, 2882 0, 2883 SCTP_LOG_TSN_ACKED); 2884 } 2885 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 2886 } 2887 /*- 2888 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 2889 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 2890 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 2891 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 2892 * Separate pseudo_cumack trackers for first transmissions and 2893 * retransmissions. 2894 */ 2895 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 2896 if (tp1->rec.data.chunk_was_revoked == 0) { 2897 tp1->whoTo->new_pseudo_cumack = 1; 2898 } 2899 tp1->whoTo->find_pseudo_cumack = 1; 2900 } 2901 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 2902 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 2903 } 2904 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 2905 if (tp1->rec.data.chunk_was_revoked == 0) { 2906 tp1->whoTo->new_pseudo_cumack = 1; 2907 } 2908 tp1->whoTo->find_rtx_pseudo_cumack = 1; 2909 } 2910 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 2911 sctp_log_sack(*biggest_newly_acked_tsn, 2912 last_tsn, 2913 tp1->rec.data.TSN_seq, 2914 frag_strt, 2915 frag_end, 2916 SCTP_LOG_TSN_ACKED); 2917 } 2918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2919 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 2920 tp1->whoTo->flight_size, 2921 tp1->book_size, 2922 (uintptr_t) tp1->whoTo, 2923 tp1->rec.data.TSN_seq); 2924 } 2925 sctp_flight_size_decrease(tp1); 2926 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 2927 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 2928 tp1); 2929 } 2930 sctp_total_flight_decrease(stcb, tp1); 2931 2932 tp1->whoTo->net_ack += tp1->send_size; 2933 if (tp1->snd_count < 2) { 2934 /*- 2935 * True non-retransmited chunk 2936 */ 2937 tp1->whoTo->net_ack2 += tp1->send_size; 2938 2939 /*- 2940 * update RTO too ? 2941 */ 2942 if (tp1->do_rtt) { 2943 if (*rto_ok) { 2944 tp1->whoTo->RTO = 2945 sctp_calculate_rto(stcb, 2946 &stcb->asoc, 2947 tp1->whoTo, 2948 &tp1->sent_rcv_time, 2949 sctp_align_safe_nocopy, 2950 SCTP_RTT_FROM_DATA); 2951 *rto_ok = 0; 2952 } 2953 if (tp1->whoTo->rto_needed == 0) { 2954 tp1->whoTo->rto_needed = 1; 2955 } 2956 tp1->do_rtt = 0; 2957 } 2958 } 2959 } 2960 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 2961 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 2962 stcb->asoc.this_sack_highest_gap)) { 2963 stcb->asoc.this_sack_highest_gap = 2964 tp1->rec.data.TSN_seq; 2965 } 2966 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 2967 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 2968 #ifdef SCTP_AUDITING_ENABLED 2969 sctp_audit_log(0xB2, 2970 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 2971 #endif 2972 } 2973 } 2974 /*- 2975 * All chunks NOT UNSENT fall through here and are marked 2976 * (leave PR-SCTP ones that are to skip alone though) 2977 */ 2978 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 2979 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2980 tp1->sent = SCTP_DATAGRAM_MARKED; 2981 } 2982 if (tp1->rec.data.chunk_was_revoked) { 2983 /* deflate the cwnd */ 2984 tp1->whoTo->cwnd -= tp1->book_size; 2985 tp1->rec.data.chunk_was_revoked = 0; 2986 } 2987 /* NR Sack code here */ 2988 if (nr_sacking && 2989 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 2990 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 2991 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--; 2992 #ifdef INVARIANTS 2993 } else { 2994 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 2995 #endif 2996 } 2997 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 2998 if (tp1->data) { 2999 /* 3000 * sa_ignore 3001 * NO_NULL_CHK 3002 */ 3003 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3004 sctp_m_freem(tp1->data); 3005 tp1->data = NULL; 3006 } 3007 wake_him++; 3008 } 3009 } 3010 break; 3011 } /* if (tp1->TSN_seq == theTSN) */ 3012 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) { 3013 break; 3014 } 3015 tp1 = TAILQ_NEXT(tp1, sctp_next); 3016 if ((tp1 == NULL) && (circled == 0)) { 3017 circled++; 3018 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3019 } 3020 } /* end while (tp1) */ 3021 if (tp1 == NULL) { 3022 circled = 0; 3023 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3024 } 3025 /* In case the fragments were not in order we must reset */ 3026 } /* end for (j = fragStart */ 3027 *p_tp1 = tp1; 3028 return (wake_him); /* Return value only used for nr-sack */ 3029 } 3030 3031 3032 static int 3033 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3034 uint32_t last_tsn, uint32_t * biggest_tsn_acked, 3035 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack, 3036 int num_seg, int num_nr_seg, int *rto_ok) 3037 { 3038 struct sctp_gap_ack_block *frag, block; 3039 struct sctp_tmit_chunk *tp1; 3040 int i; 3041 int num_frs = 0; 3042 int chunk_freed; 3043 int non_revocable; 3044 uint16_t frag_strt, frag_end, prev_frag_end; 3045 3046 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3047 prev_frag_end = 0; 3048 chunk_freed = 0; 3049 3050 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3051 if (i == num_seg) { 3052 prev_frag_end = 0; 3053 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3054 } 3055 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3056 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block); 3057 *offset += sizeof(block); 3058 if (frag == NULL) { 3059 return (chunk_freed); 3060 } 3061 frag_strt = ntohs(frag->start); 3062 frag_end = ntohs(frag->end); 3063 3064 if (frag_strt > frag_end) { 3065 /* This gap report is malformed, skip it. */ 3066 continue; 3067 } 3068 if (frag_strt <= prev_frag_end) { 3069 /* This gap report is not in order, so restart. */ 3070 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3071 } 3072 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3073 *biggest_tsn_acked = last_tsn + frag_end; 3074 } 3075 if (i < num_seg) { 3076 non_revocable = 0; 3077 } else { 3078 non_revocable = 1; 3079 } 3080 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3081 non_revocable, &num_frs, biggest_newly_acked_tsn, 3082 this_sack_lowest_newack, rto_ok)) { 3083 chunk_freed = 1; 3084 } 3085 prev_frag_end = frag_end; 3086 } 3087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3088 if (num_frs) 3089 sctp_log_fr(*biggest_tsn_acked, 3090 *biggest_newly_acked_tsn, 3091 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3092 } 3093 return (chunk_freed); 3094 } 3095 3096 static void 3097 sctp_check_for_revoked(struct sctp_tcb *stcb, 3098 struct sctp_association *asoc, uint32_t cumack, 3099 uint32_t biggest_tsn_acked) 3100 { 3101 struct sctp_tmit_chunk *tp1; 3102 3103 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3104 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) { 3105 /* 3106 * ok this guy is either ACK or MARKED. If it is 3107 * ACKED it has been previously acked but not this 3108 * time i.e. revoked. If it is MARKED it was ACK'ed 3109 * again. 3110 */ 3111 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) { 3112 break; 3113 } 3114 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3115 /* it has been revoked */ 3116 tp1->sent = SCTP_DATAGRAM_SENT; 3117 tp1->rec.data.chunk_was_revoked = 1; 3118 /* 3119 * We must add this stuff back in to assure 3120 * timers and such get started. 3121 */ 3122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3123 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3124 tp1->whoTo->flight_size, 3125 tp1->book_size, 3126 (uintptr_t) tp1->whoTo, 3127 tp1->rec.data.TSN_seq); 3128 } 3129 sctp_flight_size_increase(tp1); 3130 sctp_total_flight_increase(stcb, tp1); 3131 /* 3132 * We inflate the cwnd to compensate for our 3133 * artificial inflation of the flight_size. 3134 */ 3135 tp1->whoTo->cwnd += tp1->book_size; 3136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3137 sctp_log_sack(asoc->last_acked_seq, 3138 cumack, 3139 tp1->rec.data.TSN_seq, 3140 0, 3141 0, 3142 SCTP_LOG_TSN_REVOKED); 3143 } 3144 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3145 /* it has been re-acked in this SACK */ 3146 tp1->sent = SCTP_DATAGRAM_ACKED; 3147 } 3148 } 3149 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3150 break; 3151 } 3152 } 3153 3154 3155 static void 3156 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3157 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3158 { 3159 struct sctp_tmit_chunk *tp1; 3160 int strike_flag = 0; 3161 struct timeval now; 3162 int tot_retrans = 0; 3163 uint32_t sending_seq; 3164 struct sctp_nets *net; 3165 int num_dests_sacked = 0; 3166 3167 /* 3168 * select the sending_seq, this is either the next thing ready to be 3169 * sent but not transmitted, OR, the next seq we assign. 3170 */ 3171 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3172 if (tp1 == NULL) { 3173 sending_seq = asoc->sending_seq; 3174 } else { 3175 sending_seq = tp1->rec.data.TSN_seq; 3176 } 3177 3178 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3179 if ((asoc->sctp_cmt_on_off > 0) && 3180 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3181 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3182 if (net->saw_newack) 3183 num_dests_sacked++; 3184 } 3185 } 3186 if (stcb->asoc.peer_supports_prsctp) { 3187 (void)SCTP_GETTIME_TIMEVAL(&now); 3188 } 3189 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3190 strike_flag = 0; 3191 if (tp1->no_fr_allowed) { 3192 /* this one had a timeout or something */ 3193 continue; 3194 } 3195 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3196 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3197 sctp_log_fr(biggest_tsn_newly_acked, 3198 tp1->rec.data.TSN_seq, 3199 tp1->sent, 3200 SCTP_FR_LOG_CHECK_STRIKE); 3201 } 3202 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) || 3203 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3204 /* done */ 3205 break; 3206 } 3207 if (stcb->asoc.peer_supports_prsctp) { 3208 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3209 /* Is it expired? */ 3210 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3211 /* Yes so drop it */ 3212 if (tp1->data != NULL) { 3213 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3214 SCTP_SO_NOT_LOCKED); 3215 } 3216 continue; 3217 } 3218 } 3219 } 3220 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) { 3221 /* we are beyond the tsn in the sack */ 3222 break; 3223 } 3224 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3225 /* either a RESEND, ACKED, or MARKED */ 3226 /* skip */ 3227 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3228 /* Continue strikin FWD-TSN chunks */ 3229 tp1->rec.data.fwd_tsn_cnt++; 3230 } 3231 continue; 3232 } 3233 /* 3234 * CMT : SFR algo (covers part of DAC and HTNA as well) 3235 */ 3236 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3237 /* 3238 * No new acks were receieved for data sent to this 3239 * dest. Therefore, according to the SFR algo for 3240 * CMT, no data sent to this dest can be marked for 3241 * FR using this SACK. 3242 */ 3243 continue; 3244 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3245 tp1->whoTo->this_sack_highest_newack)) { 3246 /* 3247 * CMT: New acks were receieved for data sent to 3248 * this dest. But no new acks were seen for data 3249 * sent after tp1. Therefore, according to the SFR 3250 * algo for CMT, tp1 cannot be marked for FR using 3251 * this SACK. This step covers part of the DAC algo 3252 * and the HTNA algo as well. 3253 */ 3254 continue; 3255 } 3256 /* 3257 * Here we check to see if we were have already done a FR 3258 * and if so we see if the biggest TSN we saw in the sack is 3259 * smaller than the recovery point. If so we don't strike 3260 * the tsn... otherwise we CAN strike the TSN. 3261 */ 3262 /* 3263 * @@@ JRI: Check for CMT if (accum_moved && 3264 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3265 * 0)) { 3266 */ 3267 if (accum_moved && asoc->fast_retran_loss_recovery) { 3268 /* 3269 * Strike the TSN if in fast-recovery and cum-ack 3270 * moved. 3271 */ 3272 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3273 sctp_log_fr(biggest_tsn_newly_acked, 3274 tp1->rec.data.TSN_seq, 3275 tp1->sent, 3276 SCTP_FR_LOG_STRIKE_CHUNK); 3277 } 3278 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3279 tp1->sent++; 3280 } 3281 if ((asoc->sctp_cmt_on_off > 0) && 3282 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3283 /* 3284 * CMT DAC algorithm: If SACK flag is set to 3285 * 0, then lowest_newack test will not pass 3286 * because it would have been set to the 3287 * cumack earlier. If not already to be 3288 * rtx'd, If not a mixed sack and if tp1 is 3289 * not between two sacked TSNs, then mark by 3290 * one more. NOTE that we are marking by one 3291 * additional time since the SACK DAC flag 3292 * indicates that two packets have been 3293 * received after this missing TSN. 3294 */ 3295 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3296 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3297 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3298 sctp_log_fr(16 + num_dests_sacked, 3299 tp1->rec.data.TSN_seq, 3300 tp1->sent, 3301 SCTP_FR_LOG_STRIKE_CHUNK); 3302 } 3303 tp1->sent++; 3304 } 3305 } 3306 } else if ((tp1->rec.data.doing_fast_retransmit) && 3307 (asoc->sctp_cmt_on_off == 0)) { 3308 /* 3309 * For those that have done a FR we must take 3310 * special consideration if we strike. I.e the 3311 * biggest_newly_acked must be higher than the 3312 * sending_seq at the time we did the FR. 3313 */ 3314 if ( 3315 #ifdef SCTP_FR_TO_ALTERNATE 3316 /* 3317 * If FR's go to new networks, then we must only do 3318 * this for singly homed asoc's. However if the FR's 3319 * go to the same network (Armando's work) then its 3320 * ok to FR multiple times. 3321 */ 3322 (asoc->numnets < 2) 3323 #else 3324 (1) 3325 #endif 3326 ) { 3327 3328 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3329 tp1->rec.data.fast_retran_tsn)) { 3330 /* 3331 * Strike the TSN, since this ack is 3332 * beyond where things were when we 3333 * did a FR. 3334 */ 3335 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3336 sctp_log_fr(biggest_tsn_newly_acked, 3337 tp1->rec.data.TSN_seq, 3338 tp1->sent, 3339 SCTP_FR_LOG_STRIKE_CHUNK); 3340 } 3341 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3342 tp1->sent++; 3343 } 3344 strike_flag = 1; 3345 if ((asoc->sctp_cmt_on_off > 0) && 3346 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3347 /* 3348 * CMT DAC algorithm: If 3349 * SACK flag is set to 0, 3350 * then lowest_newack test 3351 * will not pass because it 3352 * would have been set to 3353 * the cumack earlier. If 3354 * not already to be rtx'd, 3355 * If not a mixed sack and 3356 * if tp1 is not between two 3357 * sacked TSNs, then mark by 3358 * one more. NOTE that we 3359 * are marking by one 3360 * additional time since the 3361 * SACK DAC flag indicates 3362 * that two packets have 3363 * been received after this 3364 * missing TSN. 3365 */ 3366 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3367 (num_dests_sacked == 1) && 3368 SCTP_TSN_GT(this_sack_lowest_newack, 3369 tp1->rec.data.TSN_seq)) { 3370 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3371 sctp_log_fr(32 + num_dests_sacked, 3372 tp1->rec.data.TSN_seq, 3373 tp1->sent, 3374 SCTP_FR_LOG_STRIKE_CHUNK); 3375 } 3376 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3377 tp1->sent++; 3378 } 3379 } 3380 } 3381 } 3382 } 3383 /* 3384 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3385 * algo covers HTNA. 3386 */ 3387 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 3388 biggest_tsn_newly_acked)) { 3389 /* 3390 * We don't strike these: This is the HTNA 3391 * algorithm i.e. we don't strike If our TSN is 3392 * larger than the Highest TSN Newly Acked. 3393 */ 3394 ; 3395 } else { 3396 /* Strike the TSN */ 3397 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3398 sctp_log_fr(biggest_tsn_newly_acked, 3399 tp1->rec.data.TSN_seq, 3400 tp1->sent, 3401 SCTP_FR_LOG_STRIKE_CHUNK); 3402 } 3403 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3404 tp1->sent++; 3405 } 3406 if ((asoc->sctp_cmt_on_off > 0) && 3407 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3408 /* 3409 * CMT DAC algorithm: If SACK flag is set to 3410 * 0, then lowest_newack test will not pass 3411 * because it would have been set to the 3412 * cumack earlier. If not already to be 3413 * rtx'd, If not a mixed sack and if tp1 is 3414 * not between two sacked TSNs, then mark by 3415 * one more. NOTE that we are marking by one 3416 * additional time since the SACK DAC flag 3417 * indicates that two packets have been 3418 * received after this missing TSN. 3419 */ 3420 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3421 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 3422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3423 sctp_log_fr(48 + num_dests_sacked, 3424 tp1->rec.data.TSN_seq, 3425 tp1->sent, 3426 SCTP_FR_LOG_STRIKE_CHUNK); 3427 } 3428 tp1->sent++; 3429 } 3430 } 3431 } 3432 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3433 struct sctp_nets *alt; 3434 3435 /* fix counts and things */ 3436 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3437 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3438 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3439 tp1->book_size, 3440 (uintptr_t) tp1->whoTo, 3441 tp1->rec.data.TSN_seq); 3442 } 3443 if (tp1->whoTo) { 3444 tp1->whoTo->net_ack++; 3445 sctp_flight_size_decrease(tp1); 3446 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3447 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3448 tp1); 3449 } 3450 } 3451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3452 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3453 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3454 } 3455 /* add back to the rwnd */ 3456 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3457 3458 /* remove from the total flight */ 3459 sctp_total_flight_decrease(stcb, tp1); 3460 3461 if ((stcb->asoc.peer_supports_prsctp) && 3462 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3463 /* 3464 * Has it been retransmitted tv_sec times? - 3465 * we store the retran count there. 3466 */ 3467 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3468 /* Yes, so drop it */ 3469 if (tp1->data != NULL) { 3470 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3471 SCTP_SO_NOT_LOCKED); 3472 } 3473 /* Make sure to flag we had a FR */ 3474 tp1->whoTo->net_ack++; 3475 continue; 3476 } 3477 } 3478 /* 3479 * SCTP_PRINTF("OK, we are now ready to FR this 3480 * guy\n"); 3481 */ 3482 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3483 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3484 0, SCTP_FR_MARKED); 3485 } 3486 if (strike_flag) { 3487 /* This is a subsequent FR */ 3488 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3489 } 3490 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3491 if (asoc->sctp_cmt_on_off > 0) { 3492 /* 3493 * CMT: Using RTX_SSTHRESH policy for CMT. 3494 * If CMT is being used, then pick dest with 3495 * largest ssthresh for any retransmission. 3496 */ 3497 tp1->no_fr_allowed = 1; 3498 alt = tp1->whoTo; 3499 /* sa_ignore NO_NULL_CHK */ 3500 if (asoc->sctp_cmt_pf > 0) { 3501 /* 3502 * JRS 5/18/07 - If CMT PF is on, 3503 * use the PF version of 3504 * find_alt_net() 3505 */ 3506 alt = sctp_find_alternate_net(stcb, alt, 2); 3507 } else { 3508 /* 3509 * JRS 5/18/07 - If only CMT is on, 3510 * use the CMT version of 3511 * find_alt_net() 3512 */ 3513 /* sa_ignore NO_NULL_CHK */ 3514 alt = sctp_find_alternate_net(stcb, alt, 1); 3515 } 3516 if (alt == NULL) { 3517 alt = tp1->whoTo; 3518 } 3519 /* 3520 * CUCv2: If a different dest is picked for 3521 * the retransmission, then new 3522 * (rtx-)pseudo_cumack needs to be tracked 3523 * for orig dest. Let CUCv2 track new (rtx-) 3524 * pseudo-cumack always. 3525 */ 3526 if (tp1->whoTo) { 3527 tp1->whoTo->find_pseudo_cumack = 1; 3528 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3529 } 3530 } else {/* CMT is OFF */ 3531 3532 #ifdef SCTP_FR_TO_ALTERNATE 3533 /* Can we find an alternate? */ 3534 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3535 #else 3536 /* 3537 * default behavior is to NOT retransmit 3538 * FR's to an alternate. Armando Caro's 3539 * paper details why. 3540 */ 3541 alt = tp1->whoTo; 3542 #endif 3543 } 3544 3545 tp1->rec.data.doing_fast_retransmit = 1; 3546 tot_retrans++; 3547 /* mark the sending seq for possible subsequent FR's */ 3548 /* 3549 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3550 * (uint32_t)tpi->rec.data.TSN_seq); 3551 */ 3552 if (TAILQ_EMPTY(&asoc->send_queue)) { 3553 /* 3554 * If the queue of send is empty then its 3555 * the next sequence number that will be 3556 * assigned so we subtract one from this to 3557 * get the one we last sent. 3558 */ 3559 tp1->rec.data.fast_retran_tsn = sending_seq; 3560 } else { 3561 /* 3562 * If there are chunks on the send queue 3563 * (unsent data that has made it from the 3564 * stream queues but not out the door, we 3565 * take the first one (which will have the 3566 * lowest TSN) and subtract one to get the 3567 * one we last sent. 3568 */ 3569 struct sctp_tmit_chunk *ttt; 3570 3571 ttt = TAILQ_FIRST(&asoc->send_queue); 3572 tp1->rec.data.fast_retran_tsn = 3573 ttt->rec.data.TSN_seq; 3574 } 3575 3576 if (tp1->do_rtt) { 3577 /* 3578 * this guy had a RTO calculation pending on 3579 * it, cancel it 3580 */ 3581 if ((tp1->whoTo != NULL) && 3582 (tp1->whoTo->rto_needed == 0)) { 3583 tp1->whoTo->rto_needed = 1; 3584 } 3585 tp1->do_rtt = 0; 3586 } 3587 if (alt != tp1->whoTo) { 3588 /* yes, there is an alternate. */ 3589 sctp_free_remote_addr(tp1->whoTo); 3590 /* sa_ignore FREED_MEMORY */ 3591 tp1->whoTo = alt; 3592 atomic_add_int(&alt->ref_count, 1); 3593 } 3594 } 3595 } 3596 } 3597 3598 struct sctp_tmit_chunk * 3599 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3600 struct sctp_association *asoc) 3601 { 3602 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3603 struct timeval now; 3604 int now_filled = 0; 3605 3606 if (asoc->peer_supports_prsctp == 0) { 3607 return (NULL); 3608 } 3609 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3610 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3611 tp1->sent != SCTP_DATAGRAM_RESEND && 3612 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3613 /* no chance to advance, out of here */ 3614 break; 3615 } 3616 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3617 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3618 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3619 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3620 asoc->advanced_peer_ack_point, 3621 tp1->rec.data.TSN_seq, 0, 0); 3622 } 3623 } 3624 if (!PR_SCTP_ENABLED(tp1->flags)) { 3625 /* 3626 * We can't fwd-tsn past any that are reliable aka 3627 * retransmitted until the asoc fails. 3628 */ 3629 break; 3630 } 3631 if (!now_filled) { 3632 (void)SCTP_GETTIME_TIMEVAL(&now); 3633 now_filled = 1; 3634 } 3635 /* 3636 * now we got a chunk which is marked for another 3637 * retransmission to a PR-stream but has run out its chances 3638 * already maybe OR has been marked to skip now. Can we skip 3639 * it if its a resend? 3640 */ 3641 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3642 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3643 /* 3644 * Now is this one marked for resend and its time is 3645 * now up? 3646 */ 3647 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3648 /* Yes so drop it */ 3649 if (tp1->data) { 3650 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3651 1, SCTP_SO_NOT_LOCKED); 3652 } 3653 } else { 3654 /* 3655 * No, we are done when hit one for resend 3656 * whos time as not expired. 3657 */ 3658 break; 3659 } 3660 } 3661 /* 3662 * Ok now if this chunk is marked to drop it we can clean up 3663 * the chunk, advance our peer ack point and we can check 3664 * the next chunk. 3665 */ 3666 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3667 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3668 /* advance PeerAckPoint goes forward */ 3669 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) { 3670 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3671 a_adv = tp1; 3672 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { 3673 /* No update but we do save the chk */ 3674 a_adv = tp1; 3675 } 3676 } else { 3677 /* 3678 * If it is still in RESEND we can advance no 3679 * further 3680 */ 3681 break; 3682 } 3683 } 3684 return (a_adv); 3685 } 3686 3687 static int 3688 sctp_fs_audit(struct sctp_association *asoc) 3689 { 3690 struct sctp_tmit_chunk *chk; 3691 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3692 int entry_flight, entry_cnt, ret; 3693 3694 entry_flight = asoc->total_flight; 3695 entry_cnt = asoc->total_flight_count; 3696 ret = 0; 3697 3698 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3699 return (0); 3700 3701 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3702 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3703 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n", 3704 chk->rec.data.TSN_seq, 3705 chk->send_size, 3706 chk->snd_count); 3707 inflight++; 3708 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3709 resend++; 3710 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3711 inbetween++; 3712 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3713 above++; 3714 } else { 3715 acked++; 3716 } 3717 } 3718 3719 if ((inflight > 0) || (inbetween > 0)) { 3720 #ifdef INVARIANTS 3721 panic("Flight size-express incorrect? \n"); 3722 #else 3723 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n", 3724 entry_flight, entry_cnt); 3725 3726 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n", 3727 inflight, inbetween, resend, above, acked); 3728 ret = 1; 3729 #endif 3730 } 3731 return (ret); 3732 } 3733 3734 3735 static void 3736 sctp_window_probe_recovery(struct sctp_tcb *stcb, 3737 struct sctp_association *asoc, 3738 struct sctp_tmit_chunk *tp1) 3739 { 3740 tp1->window_probe = 0; 3741 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3742 /* TSN's skipped we do NOT move back. */ 3743 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3744 tp1->whoTo->flight_size, 3745 tp1->book_size, 3746 (uintptr_t) tp1->whoTo, 3747 tp1->rec.data.TSN_seq); 3748 return; 3749 } 3750 /* First setup this by shrinking flight */ 3751 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3752 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3753 tp1); 3754 } 3755 sctp_flight_size_decrease(tp1); 3756 sctp_total_flight_decrease(stcb, tp1); 3757 /* Now mark for resend */ 3758 tp1->sent = SCTP_DATAGRAM_RESEND; 3759 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3760 3761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3762 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3763 tp1->whoTo->flight_size, 3764 tp1->book_size, 3765 (uintptr_t) tp1->whoTo, 3766 tp1->rec.data.TSN_seq); 3767 } 3768 } 3769 3770 void 3771 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3772 uint32_t rwnd, int *abort_now, int ecne_seen) 3773 { 3774 struct sctp_nets *net; 3775 struct sctp_association *asoc; 3776 struct sctp_tmit_chunk *tp1, *tp2; 3777 uint32_t old_rwnd; 3778 int win_probe_recovery = 0; 3779 int win_probe_recovered = 0; 3780 int j, done_once = 0; 3781 int rto_ok = 1; 3782 3783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3784 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3785 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3786 } 3787 SCTP_TCB_LOCK_ASSERT(stcb); 3788 #ifdef SCTP_ASOCLOG_OF_TSNS 3789 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3790 stcb->asoc.cumack_log_at++; 3791 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3792 stcb->asoc.cumack_log_at = 0; 3793 } 3794 #endif 3795 asoc = &stcb->asoc; 3796 old_rwnd = asoc->peers_rwnd; 3797 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3798 /* old ack */ 3799 return; 3800 } else if (asoc->last_acked_seq == cumack) { 3801 /* Window update sack */ 3802 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3803 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3804 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3805 /* SWS sender side engages */ 3806 asoc->peers_rwnd = 0; 3807 } 3808 if (asoc->peers_rwnd > old_rwnd) { 3809 goto again; 3810 } 3811 return; 3812 } 3813 /* First setup for CC stuff */ 3814 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3815 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3816 /* Drag along the window_tsn for cwr's */ 3817 net->cwr_window_tsn = cumack; 3818 } 3819 net->prev_cwnd = net->cwnd; 3820 net->net_ack = 0; 3821 net->net_ack2 = 0; 3822 3823 /* 3824 * CMT: Reset CUC and Fast recovery algo variables before 3825 * SACK processing 3826 */ 3827 net->new_pseudo_cumack = 0; 3828 net->will_exit_fast_recovery = 0; 3829 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3830 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3831 } 3832 } 3833 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 3834 uint32_t send_s; 3835 3836 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3837 tp1 = TAILQ_LAST(&asoc->sent_queue, 3838 sctpchunk_listhead); 3839 send_s = tp1->rec.data.TSN_seq + 1; 3840 } else { 3841 send_s = asoc->sending_seq; 3842 } 3843 if (SCTP_TSN_GE(cumack, send_s)) { 3844 #ifndef INVARIANTS 3845 struct mbuf *oper; 3846 3847 #endif 3848 #ifdef INVARIANTS 3849 panic("Impossible sack 1"); 3850 #else 3851 3852 *abort_now = 1; 3853 /* XXX */ 3854 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 3855 0, M_NOWAIT, 1, MT_DATA); 3856 if (oper) { 3857 struct sctp_paramhdr *ph; 3858 uint32_t *ippp; 3859 3860 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 3861 sizeof(uint32_t); 3862 ph = mtod(oper, struct sctp_paramhdr *); 3863 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 3864 ph->param_length = htons(SCTP_BUF_LEN(oper)); 3865 ippp = (uint32_t *) (ph + 1); 3866 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 3867 } 3868 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 3869 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 3870 return; 3871 #endif 3872 } 3873 } 3874 asoc->this_sack_highest_gap = cumack; 3875 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 3876 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3877 stcb->asoc.overall_error_count, 3878 0, 3879 SCTP_FROM_SCTP_INDATA, 3880 __LINE__); 3881 } 3882 stcb->asoc.overall_error_count = 0; 3883 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 3884 /* process the new consecutive TSN first */ 3885 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3886 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) { 3887 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3888 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 3889 } 3890 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3891 /* 3892 * If it is less than ACKED, it is 3893 * now no-longer in flight. Higher 3894 * values may occur during marking 3895 */ 3896 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3897 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3898 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 3899 tp1->whoTo->flight_size, 3900 tp1->book_size, 3901 (uintptr_t) tp1->whoTo, 3902 tp1->rec.data.TSN_seq); 3903 } 3904 sctp_flight_size_decrease(tp1); 3905 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3906 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3907 tp1); 3908 } 3909 /* sa_ignore NO_NULL_CHK */ 3910 sctp_total_flight_decrease(stcb, tp1); 3911 } 3912 tp1->whoTo->net_ack += tp1->send_size; 3913 if (tp1->snd_count < 2) { 3914 /* 3915 * True non-retransmited 3916 * chunk 3917 */ 3918 tp1->whoTo->net_ack2 += 3919 tp1->send_size; 3920 3921 /* update RTO too? */ 3922 if (tp1->do_rtt) { 3923 if (rto_ok) { 3924 tp1->whoTo->RTO = 3925 /* 3926 * sa_ignore 3927 * NO_NULL_CH 3928 * K 3929 */ 3930 sctp_calculate_rto(stcb, 3931 asoc, tp1->whoTo, 3932 &tp1->sent_rcv_time, 3933 sctp_align_safe_nocopy, 3934 SCTP_RTT_FROM_DATA); 3935 rto_ok = 0; 3936 } 3937 if (tp1->whoTo->rto_needed == 0) { 3938 tp1->whoTo->rto_needed = 1; 3939 } 3940 tp1->do_rtt = 0; 3941 } 3942 } 3943 /* 3944 * CMT: CUCv2 algorithm. From the 3945 * cumack'd TSNs, for each TSN being 3946 * acked for the first time, set the 3947 * following variables for the 3948 * corresp destination. 3949 * new_pseudo_cumack will trigger a 3950 * cwnd update. 3951 * find_(rtx_)pseudo_cumack will 3952 * trigger search for the next 3953 * expected (rtx-)pseudo-cumack. 3954 */ 3955 tp1->whoTo->new_pseudo_cumack = 1; 3956 tp1->whoTo->find_pseudo_cumack = 1; 3957 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3958 3959 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3960 /* sa_ignore NO_NULL_CHK */ 3961 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 3962 } 3963 } 3964 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3965 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3966 } 3967 if (tp1->rec.data.chunk_was_revoked) { 3968 /* deflate the cwnd */ 3969 tp1->whoTo->cwnd -= tp1->book_size; 3970 tp1->rec.data.chunk_was_revoked = 0; 3971 } 3972 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3973 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 3974 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 3975 #ifdef INVARIANTS 3976 } else { 3977 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 3978 #endif 3979 } 3980 } 3981 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3982 if (tp1->data) { 3983 /* sa_ignore NO_NULL_CHK */ 3984 sctp_free_bufspace(stcb, asoc, tp1, 1); 3985 sctp_m_freem(tp1->data); 3986 tp1->data = NULL; 3987 } 3988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3989 sctp_log_sack(asoc->last_acked_seq, 3990 cumack, 3991 tp1->rec.data.TSN_seq, 3992 0, 3993 0, 3994 SCTP_LOG_FREE_SENT); 3995 } 3996 asoc->sent_queue_cnt--; 3997 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 3998 } else { 3999 break; 4000 } 4001 } 4002 4003 } 4004 /* sa_ignore NO_NULL_CHK */ 4005 if (stcb->sctp_socket) { 4006 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4007 struct socket *so; 4008 4009 #endif 4010 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4012 /* sa_ignore NO_NULL_CHK */ 4013 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4014 } 4015 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4016 so = SCTP_INP_SO(stcb->sctp_ep); 4017 atomic_add_int(&stcb->asoc.refcnt, 1); 4018 SCTP_TCB_UNLOCK(stcb); 4019 SCTP_SOCKET_LOCK(so, 1); 4020 SCTP_TCB_LOCK(stcb); 4021 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4022 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4023 /* assoc was freed while we were unlocked */ 4024 SCTP_SOCKET_UNLOCK(so, 1); 4025 return; 4026 } 4027 #endif 4028 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4029 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4030 SCTP_SOCKET_UNLOCK(so, 1); 4031 #endif 4032 } else { 4033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4034 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4035 } 4036 } 4037 4038 /* JRS - Use the congestion control given in the CC module */ 4039 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4040 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4041 if (net->net_ack2 > 0) { 4042 /* 4043 * Karn's rule applies to clearing error 4044 * count, this is optional. 4045 */ 4046 net->error_count = 0; 4047 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4048 /* addr came good */ 4049 net->dest_state |= SCTP_ADDR_REACHABLE; 4050 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4051 0, (void *)net, SCTP_SO_NOT_LOCKED); 4052 } 4053 if (net == stcb->asoc.primary_destination) { 4054 if (stcb->asoc.alternate) { 4055 /* 4056 * release the alternate, 4057 * primary is good 4058 */ 4059 sctp_free_remote_addr(stcb->asoc.alternate); 4060 stcb->asoc.alternate = NULL; 4061 } 4062 } 4063 if (net->dest_state & SCTP_ADDR_PF) { 4064 net->dest_state &= ~SCTP_ADDR_PF; 4065 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 4066 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4067 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4068 /* Done with this net */ 4069 net->net_ack = 0; 4070 } 4071 /* restore any doubled timers */ 4072 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4073 if (net->RTO < stcb->asoc.minrto) { 4074 net->RTO = stcb->asoc.minrto; 4075 } 4076 if (net->RTO > stcb->asoc.maxrto) { 4077 net->RTO = stcb->asoc.maxrto; 4078 } 4079 } 4080 } 4081 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4082 } 4083 asoc->last_acked_seq = cumack; 4084 4085 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4086 /* nothing left in-flight */ 4087 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4088 net->flight_size = 0; 4089 net->partial_bytes_acked = 0; 4090 } 4091 asoc->total_flight = 0; 4092 asoc->total_flight_count = 0; 4093 } 4094 /* RWND update */ 4095 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4096 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4097 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4098 /* SWS sender side engages */ 4099 asoc->peers_rwnd = 0; 4100 } 4101 if (asoc->peers_rwnd > old_rwnd) { 4102 win_probe_recovery = 1; 4103 } 4104 /* Now assure a timer where data is queued at */ 4105 again: 4106 j = 0; 4107 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4108 int to_ticks; 4109 4110 if (win_probe_recovery && (net->window_probe)) { 4111 win_probe_recovered = 1; 4112 /* 4113 * Find first chunk that was used with window probe 4114 * and clear the sent 4115 */ 4116 /* sa_ignore FREED_MEMORY */ 4117 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4118 if (tp1->window_probe) { 4119 /* move back to data send queue */ 4120 sctp_window_probe_recovery(stcb, asoc, tp1); 4121 break; 4122 } 4123 } 4124 } 4125 if (net->RTO == 0) { 4126 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 4127 } else { 4128 to_ticks = MSEC_TO_TICKS(net->RTO); 4129 } 4130 if (net->flight_size) { 4131 j++; 4132 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4133 sctp_timeout_handler, &net->rxt_timer); 4134 if (net->window_probe) { 4135 net->window_probe = 0; 4136 } 4137 } else { 4138 if (net->window_probe) { 4139 /* 4140 * In window probes we must assure a timer 4141 * is still running there 4142 */ 4143 net->window_probe = 0; 4144 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4145 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 4146 sctp_timeout_handler, &net->rxt_timer); 4147 } 4148 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4149 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4150 stcb, net, 4151 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4152 } 4153 } 4154 } 4155 if ((j == 0) && 4156 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4157 (asoc->sent_queue_retran_cnt == 0) && 4158 (win_probe_recovered == 0) && 4159 (done_once == 0)) { 4160 /* 4161 * huh, this should not happen unless all packets are 4162 * PR-SCTP and marked to skip of course. 4163 */ 4164 if (sctp_fs_audit(asoc)) { 4165 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4166 net->flight_size = 0; 4167 } 4168 asoc->total_flight = 0; 4169 asoc->total_flight_count = 0; 4170 asoc->sent_queue_retran_cnt = 0; 4171 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4172 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4173 sctp_flight_size_increase(tp1); 4174 sctp_total_flight_increase(stcb, tp1); 4175 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4176 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4177 } 4178 } 4179 } 4180 done_once = 1; 4181 goto again; 4182 } 4183 /**********************************/ 4184 /* Now what about shutdown issues */ 4185 /**********************************/ 4186 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4187 /* nothing left on sendqueue.. consider done */ 4188 /* clean up */ 4189 if ((asoc->stream_queue_cnt == 1) && 4190 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4191 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4192 (asoc->locked_on_sending) 4193 ) { 4194 struct sctp_stream_queue_pending *sp; 4195 4196 /* 4197 * I may be in a state where we got all across.. but 4198 * cannot write more due to a shutdown... we abort 4199 * since the user did not indicate EOR in this case. 4200 * The sp will be cleaned during free of the asoc. 4201 */ 4202 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4203 sctp_streamhead); 4204 if ((sp) && (sp->length == 0)) { 4205 /* Let cleanup code purge it */ 4206 if (sp->msg_is_complete) { 4207 asoc->stream_queue_cnt--; 4208 } else { 4209 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4210 asoc->locked_on_sending = NULL; 4211 asoc->stream_queue_cnt--; 4212 } 4213 } 4214 } 4215 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4216 (asoc->stream_queue_cnt == 0)) { 4217 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4218 /* Need to abort here */ 4219 struct mbuf *oper; 4220 4221 abort_out_now: 4222 *abort_now = 1; 4223 /* XXX */ 4224 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4225 0, M_NOWAIT, 1, MT_DATA); 4226 if (oper) { 4227 struct sctp_paramhdr *ph; 4228 uint32_t *ippp; 4229 4230 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4231 sizeof(uint32_t); 4232 ph = mtod(oper, struct sctp_paramhdr *); 4233 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4234 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4235 ippp = (uint32_t *) (ph + 1); 4236 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24); 4237 } 4238 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4239 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 4240 } else { 4241 struct sctp_nets *netp; 4242 4243 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4244 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4245 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4246 } 4247 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4248 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4249 sctp_stop_timers_for_shutdown(stcb); 4250 if (asoc->alternate) { 4251 netp = asoc->alternate; 4252 } else { 4253 netp = asoc->primary_destination; 4254 } 4255 sctp_send_shutdown(stcb, netp); 4256 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4257 stcb->sctp_ep, stcb, netp); 4258 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4259 stcb->sctp_ep, stcb, netp); 4260 } 4261 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4262 (asoc->stream_queue_cnt == 0)) { 4263 struct sctp_nets *netp; 4264 4265 if (asoc->alternate) { 4266 netp = asoc->alternate; 4267 } else { 4268 netp = asoc->primary_destination; 4269 } 4270 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4271 goto abort_out_now; 4272 } 4273 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4274 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4275 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4276 sctp_send_shutdown_ack(stcb, netp); 4277 sctp_stop_timers_for_shutdown(stcb); 4278 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4279 stcb->sctp_ep, stcb, netp); 4280 } 4281 } 4282 /*********************************************/ 4283 /* Here we perform PR-SCTP procedures */ 4284 /* (section 4.2) */ 4285 /*********************************************/ 4286 /* C1. update advancedPeerAckPoint */ 4287 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4288 asoc->advanced_peer_ack_point = cumack; 4289 } 4290 /* PR-Sctp issues need to be addressed too */ 4291 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 4292 struct sctp_tmit_chunk *lchk; 4293 uint32_t old_adv_peer_ack_point; 4294 4295 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4296 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4297 /* C3. See if we need to send a Fwd-TSN */ 4298 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4299 /* 4300 * ISSUE with ECN, see FWD-TSN processing. 4301 */ 4302 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4303 send_forward_tsn(stcb, asoc); 4304 } else if (lchk) { 4305 /* try to FR fwd-tsn's that get lost too */ 4306 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4307 send_forward_tsn(stcb, asoc); 4308 } 4309 } 4310 } 4311 if (lchk) { 4312 /* Assure a timer is up */ 4313 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4314 stcb->sctp_ep, stcb, lchk->whoTo); 4315 } 4316 } 4317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4318 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4319 rwnd, 4320 stcb->asoc.peers_rwnd, 4321 stcb->asoc.total_flight, 4322 stcb->asoc.total_output_queue_size); 4323 } 4324 } 4325 4326 void 4327 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4328 struct sctp_tcb *stcb, 4329 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4330 int *abort_now, uint8_t flags, 4331 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4332 { 4333 struct sctp_association *asoc; 4334 struct sctp_tmit_chunk *tp1, *tp2; 4335 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4336 uint16_t wake_him = 0; 4337 uint32_t send_s = 0; 4338 long j; 4339 int accum_moved = 0; 4340 int will_exit_fast_recovery = 0; 4341 uint32_t a_rwnd, old_rwnd; 4342 int win_probe_recovery = 0; 4343 int win_probe_recovered = 0; 4344 struct sctp_nets *net = NULL; 4345 int done_once; 4346 int rto_ok = 1; 4347 uint8_t reneged_all = 0; 4348 uint8_t cmt_dac_flag; 4349 4350 /* 4351 * we take any chance we can to service our queues since we cannot 4352 * get awoken when the socket is read from :< 4353 */ 4354 /* 4355 * Now perform the actual SACK handling: 1) Verify that it is not an 4356 * old sack, if so discard. 2) If there is nothing left in the send 4357 * queue (cum-ack is equal to last acked) then you have a duplicate 4358 * too, update any rwnd change and verify no timers are running. 4359 * then return. 3) Process any new consequtive data i.e. cum-ack 4360 * moved process these first and note that it moved. 4) Process any 4361 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4362 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4363 * sync up flightsizes and things, stop all timers and also check 4364 * for shutdown_pending state. If so then go ahead and send off the 4365 * shutdown. If in shutdown recv, send off the shutdown-ack and 4366 * start that timer, Ret. 9) Strike any non-acked things and do FR 4367 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4368 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4369 * if in shutdown_recv state. 4370 */ 4371 SCTP_TCB_LOCK_ASSERT(stcb); 4372 /* CMT DAC algo */ 4373 this_sack_lowest_newack = 0; 4374 SCTP_STAT_INCR(sctps_slowpath_sack); 4375 last_tsn = cum_ack; 4376 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4377 #ifdef SCTP_ASOCLOG_OF_TSNS 4378 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4379 stcb->asoc.cumack_log_at++; 4380 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4381 stcb->asoc.cumack_log_at = 0; 4382 } 4383 #endif 4384 a_rwnd = rwnd; 4385 4386 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4387 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4388 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4389 } 4390 old_rwnd = stcb->asoc.peers_rwnd; 4391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4392 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4393 stcb->asoc.overall_error_count, 4394 0, 4395 SCTP_FROM_SCTP_INDATA, 4396 __LINE__); 4397 } 4398 stcb->asoc.overall_error_count = 0; 4399 asoc = &stcb->asoc; 4400 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4401 sctp_log_sack(asoc->last_acked_seq, 4402 cum_ack, 4403 0, 4404 num_seg, 4405 num_dup, 4406 SCTP_LOG_NEW_SACK); 4407 } 4408 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4409 uint16_t i; 4410 uint32_t *dupdata, dblock; 4411 4412 for (i = 0; i < num_dup; i++) { 4413 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4414 sizeof(uint32_t), (uint8_t *) & dblock); 4415 if (dupdata == NULL) { 4416 break; 4417 } 4418 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4419 } 4420 } 4421 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4422 /* reality check */ 4423 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4424 tp1 = TAILQ_LAST(&asoc->sent_queue, 4425 sctpchunk_listhead); 4426 send_s = tp1->rec.data.TSN_seq + 1; 4427 } else { 4428 tp1 = NULL; 4429 send_s = asoc->sending_seq; 4430 } 4431 if (SCTP_TSN_GE(cum_ack, send_s)) { 4432 struct mbuf *oper; 4433 4434 /* 4435 * no way, we have not even sent this TSN out yet. 4436 * Peer is hopelessly messed up with us. 4437 */ 4438 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4439 cum_ack, send_s); 4440 if (tp1) { 4441 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n", 4442 tp1->rec.data.TSN_seq, (void *)tp1); 4443 } 4444 hopeless_peer: 4445 *abort_now = 1; 4446 /* XXX */ 4447 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4448 0, M_NOWAIT, 1, MT_DATA); 4449 if (oper) { 4450 struct sctp_paramhdr *ph; 4451 uint32_t *ippp; 4452 4453 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4454 sizeof(uint32_t); 4455 ph = mtod(oper, struct sctp_paramhdr *); 4456 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 4457 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4458 ippp = (uint32_t *) (ph + 1); 4459 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 4460 } 4461 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4462 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 4463 return; 4464 } 4465 } 4466 /**********************/ 4467 /* 1) check the range */ 4468 /**********************/ 4469 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4470 /* acking something behind */ 4471 return; 4472 } 4473 /* update the Rwnd of the peer */ 4474 if (TAILQ_EMPTY(&asoc->sent_queue) && 4475 TAILQ_EMPTY(&asoc->send_queue) && 4476 (asoc->stream_queue_cnt == 0)) { 4477 /* nothing left on send/sent and strmq */ 4478 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4479 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4480 asoc->peers_rwnd, 0, 0, a_rwnd); 4481 } 4482 asoc->peers_rwnd = a_rwnd; 4483 if (asoc->sent_queue_retran_cnt) { 4484 asoc->sent_queue_retran_cnt = 0; 4485 } 4486 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4487 /* SWS sender side engages */ 4488 asoc->peers_rwnd = 0; 4489 } 4490 /* stop any timers */ 4491 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4492 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4493 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4494 net->partial_bytes_acked = 0; 4495 net->flight_size = 0; 4496 } 4497 asoc->total_flight = 0; 4498 asoc->total_flight_count = 0; 4499 return; 4500 } 4501 /* 4502 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4503 * things. The total byte count acked is tracked in netAckSz AND 4504 * netAck2 is used to track the total bytes acked that are un- 4505 * amibguious and were never retransmitted. We track these on a per 4506 * destination address basis. 4507 */ 4508 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4509 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4510 /* Drag along the window_tsn for cwr's */ 4511 net->cwr_window_tsn = cum_ack; 4512 } 4513 net->prev_cwnd = net->cwnd; 4514 net->net_ack = 0; 4515 net->net_ack2 = 0; 4516 4517 /* 4518 * CMT: Reset CUC and Fast recovery algo variables before 4519 * SACK processing 4520 */ 4521 net->new_pseudo_cumack = 0; 4522 net->will_exit_fast_recovery = 0; 4523 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4524 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4525 } 4526 } 4527 /* process the new consecutive TSN first */ 4528 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4529 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) { 4530 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4531 accum_moved = 1; 4532 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4533 /* 4534 * If it is less than ACKED, it is 4535 * now no-longer in flight. Higher 4536 * values may occur during marking 4537 */ 4538 if ((tp1->whoTo->dest_state & 4539 SCTP_ADDR_UNCONFIRMED) && 4540 (tp1->snd_count < 2)) { 4541 /* 4542 * If there was no retran 4543 * and the address is 4544 * un-confirmed and we sent 4545 * there and are now 4546 * sacked.. its confirmed, 4547 * mark it so. 4548 */ 4549 tp1->whoTo->dest_state &= 4550 ~SCTP_ADDR_UNCONFIRMED; 4551 } 4552 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4553 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4554 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4555 tp1->whoTo->flight_size, 4556 tp1->book_size, 4557 (uintptr_t) tp1->whoTo, 4558 tp1->rec.data.TSN_seq); 4559 } 4560 sctp_flight_size_decrease(tp1); 4561 sctp_total_flight_decrease(stcb, tp1); 4562 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4563 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4564 tp1); 4565 } 4566 } 4567 tp1->whoTo->net_ack += tp1->send_size; 4568 4569 /* CMT SFR and DAC algos */ 4570 this_sack_lowest_newack = tp1->rec.data.TSN_seq; 4571 tp1->whoTo->saw_newack = 1; 4572 4573 if (tp1->snd_count < 2) { 4574 /* 4575 * True non-retransmited 4576 * chunk 4577 */ 4578 tp1->whoTo->net_ack2 += 4579 tp1->send_size; 4580 4581 /* update RTO too? */ 4582 if (tp1->do_rtt) { 4583 if (rto_ok) { 4584 tp1->whoTo->RTO = 4585 sctp_calculate_rto(stcb, 4586 asoc, tp1->whoTo, 4587 &tp1->sent_rcv_time, 4588 sctp_align_safe_nocopy, 4589 SCTP_RTT_FROM_DATA); 4590 rto_ok = 0; 4591 } 4592 if (tp1->whoTo->rto_needed == 0) { 4593 tp1->whoTo->rto_needed = 1; 4594 } 4595 tp1->do_rtt = 0; 4596 } 4597 } 4598 /* 4599 * CMT: CUCv2 algorithm. From the 4600 * cumack'd TSNs, for each TSN being 4601 * acked for the first time, set the 4602 * following variables for the 4603 * corresp destination. 4604 * new_pseudo_cumack will trigger a 4605 * cwnd update. 4606 * find_(rtx_)pseudo_cumack will 4607 * trigger search for the next 4608 * expected (rtx-)pseudo-cumack. 4609 */ 4610 tp1->whoTo->new_pseudo_cumack = 1; 4611 tp1->whoTo->find_pseudo_cumack = 1; 4612 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4613 4614 4615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4616 sctp_log_sack(asoc->last_acked_seq, 4617 cum_ack, 4618 tp1->rec.data.TSN_seq, 4619 0, 4620 0, 4621 SCTP_LOG_TSN_ACKED); 4622 } 4623 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4624 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 4625 } 4626 } 4627 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4628 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4629 #ifdef SCTP_AUDITING_ENABLED 4630 sctp_audit_log(0xB3, 4631 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4632 #endif 4633 } 4634 if (tp1->rec.data.chunk_was_revoked) { 4635 /* deflate the cwnd */ 4636 tp1->whoTo->cwnd -= tp1->book_size; 4637 tp1->rec.data.chunk_was_revoked = 0; 4638 } 4639 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4640 tp1->sent = SCTP_DATAGRAM_ACKED; 4641 } 4642 } 4643 } else { 4644 break; 4645 } 4646 } 4647 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4648 /* always set this up to cum-ack */ 4649 asoc->this_sack_highest_gap = last_tsn; 4650 4651 if ((num_seg > 0) || (num_nr_seg > 0)) { 4652 4653 /* 4654 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4655 * to be greater than the cumack. Also reset saw_newack to 0 4656 * for all dests. 4657 */ 4658 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4659 net->saw_newack = 0; 4660 net->this_sack_highest_newack = last_tsn; 4661 } 4662 4663 /* 4664 * thisSackHighestGap will increase while handling NEW 4665 * segments this_sack_highest_newack will increase while 4666 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4667 * used for CMT DAC algo. saw_newack will also change. 4668 */ 4669 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4670 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4671 num_seg, num_nr_seg, &rto_ok)) { 4672 wake_him++; 4673 } 4674 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 4675 /* 4676 * validate the biggest_tsn_acked in the gap acks if 4677 * strict adherence is wanted. 4678 */ 4679 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4680 /* 4681 * peer is either confused or we are under 4682 * attack. We must abort. 4683 */ 4684 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4685 biggest_tsn_acked, send_s); 4686 goto hopeless_peer; 4687 } 4688 } 4689 } 4690 /*******************************************/ 4691 /* cancel ALL T3-send timer if accum moved */ 4692 /*******************************************/ 4693 if (asoc->sctp_cmt_on_off > 0) { 4694 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4695 if (net->new_pseudo_cumack) 4696 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4697 stcb, net, 4698 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4699 4700 } 4701 } else { 4702 if (accum_moved) { 4703 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4704 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4705 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4706 } 4707 } 4708 } 4709 /********************************************/ 4710 /* drop the acked chunks from the sentqueue */ 4711 /********************************************/ 4712 asoc->last_acked_seq = cum_ack; 4713 4714 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4715 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) { 4716 break; 4717 } 4718 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4719 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 4720 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 4721 #ifdef INVARIANTS 4722 } else { 4723 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 4724 #endif 4725 } 4726 } 4727 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4728 if (tp1->pr_sctp_on) { 4729 if (asoc->pr_sctp_cnt != 0) 4730 asoc->pr_sctp_cnt--; 4731 } 4732 asoc->sent_queue_cnt--; 4733 if (tp1->data) { 4734 /* sa_ignore NO_NULL_CHK */ 4735 sctp_free_bufspace(stcb, asoc, tp1, 1); 4736 sctp_m_freem(tp1->data); 4737 tp1->data = NULL; 4738 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4739 asoc->sent_queue_cnt_removeable--; 4740 } 4741 } 4742 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4743 sctp_log_sack(asoc->last_acked_seq, 4744 cum_ack, 4745 tp1->rec.data.TSN_seq, 4746 0, 4747 0, 4748 SCTP_LOG_FREE_SENT); 4749 } 4750 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4751 wake_him++; 4752 } 4753 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4754 #ifdef INVARIANTS 4755 panic("Warning flight size is postive and should be 0"); 4756 #else 4757 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4758 asoc->total_flight); 4759 #endif 4760 asoc->total_flight = 0; 4761 } 4762 /* sa_ignore NO_NULL_CHK */ 4763 if ((wake_him) && (stcb->sctp_socket)) { 4764 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4765 struct socket *so; 4766 4767 #endif 4768 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4769 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4770 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4771 } 4772 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4773 so = SCTP_INP_SO(stcb->sctp_ep); 4774 atomic_add_int(&stcb->asoc.refcnt, 1); 4775 SCTP_TCB_UNLOCK(stcb); 4776 SCTP_SOCKET_LOCK(so, 1); 4777 SCTP_TCB_LOCK(stcb); 4778 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4779 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4780 /* assoc was freed while we were unlocked */ 4781 SCTP_SOCKET_UNLOCK(so, 1); 4782 return; 4783 } 4784 #endif 4785 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4786 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4787 SCTP_SOCKET_UNLOCK(so, 1); 4788 #endif 4789 } else { 4790 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4791 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4792 } 4793 } 4794 4795 if (asoc->fast_retran_loss_recovery && accum_moved) { 4796 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4797 /* Setup so we will exit RFC2582 fast recovery */ 4798 will_exit_fast_recovery = 1; 4799 } 4800 } 4801 /* 4802 * Check for revoked fragments: 4803 * 4804 * if Previous sack - Had no frags then we can't have any revoked if 4805 * Previous sack - Had frag's then - If we now have frags aka 4806 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4807 * some of them. else - The peer revoked all ACKED fragments, since 4808 * we had some before and now we have NONE. 4809 */ 4810 4811 if (num_seg) { 4812 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4813 asoc->saw_sack_with_frags = 1; 4814 } else if (asoc->saw_sack_with_frags) { 4815 int cnt_revoked = 0; 4816 4817 /* Peer revoked all dg's marked or acked */ 4818 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4819 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4820 tp1->sent = SCTP_DATAGRAM_SENT; 4821 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4822 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4823 tp1->whoTo->flight_size, 4824 tp1->book_size, 4825 (uintptr_t) tp1->whoTo, 4826 tp1->rec.data.TSN_seq); 4827 } 4828 sctp_flight_size_increase(tp1); 4829 sctp_total_flight_increase(stcb, tp1); 4830 tp1->rec.data.chunk_was_revoked = 1; 4831 /* 4832 * To ensure that this increase in 4833 * flightsize, which is artificial, does not 4834 * throttle the sender, we also increase the 4835 * cwnd artificially. 4836 */ 4837 tp1->whoTo->cwnd += tp1->book_size; 4838 cnt_revoked++; 4839 } 4840 } 4841 if (cnt_revoked) { 4842 reneged_all = 1; 4843 } 4844 asoc->saw_sack_with_frags = 0; 4845 } 4846 if (num_nr_seg > 0) 4847 asoc->saw_sack_with_nr_frags = 1; 4848 else 4849 asoc->saw_sack_with_nr_frags = 0; 4850 4851 /* JRS - Use the congestion control given in the CC module */ 4852 if (ecne_seen == 0) { 4853 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4854 if (net->net_ack2 > 0) { 4855 /* 4856 * Karn's rule applies to clearing error 4857 * count, this is optional. 4858 */ 4859 net->error_count = 0; 4860 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4861 /* addr came good */ 4862 net->dest_state |= SCTP_ADDR_REACHABLE; 4863 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4864 0, (void *)net, SCTP_SO_NOT_LOCKED); 4865 } 4866 if (net == stcb->asoc.primary_destination) { 4867 if (stcb->asoc.alternate) { 4868 /* 4869 * release the alternate, 4870 * primary is good 4871 */ 4872 sctp_free_remote_addr(stcb->asoc.alternate); 4873 stcb->asoc.alternate = NULL; 4874 } 4875 } 4876 if (net->dest_state & SCTP_ADDR_PF) { 4877 net->dest_state &= ~SCTP_ADDR_PF; 4878 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 4879 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4880 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4881 /* Done with this net */ 4882 net->net_ack = 0; 4883 } 4884 /* restore any doubled timers */ 4885 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4886 if (net->RTO < stcb->asoc.minrto) { 4887 net->RTO = stcb->asoc.minrto; 4888 } 4889 if (net->RTO > stcb->asoc.maxrto) { 4890 net->RTO = stcb->asoc.maxrto; 4891 } 4892 } 4893 } 4894 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4895 } 4896 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4897 /* nothing left in-flight */ 4898 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4899 /* stop all timers */ 4900 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4901 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 4902 net->flight_size = 0; 4903 net->partial_bytes_acked = 0; 4904 } 4905 asoc->total_flight = 0; 4906 asoc->total_flight_count = 0; 4907 } 4908 /**********************************/ 4909 /* Now what about shutdown issues */ 4910 /**********************************/ 4911 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4912 /* nothing left on sendqueue.. consider done */ 4913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4914 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4915 asoc->peers_rwnd, 0, 0, a_rwnd); 4916 } 4917 asoc->peers_rwnd = a_rwnd; 4918 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4919 /* SWS sender side engages */ 4920 asoc->peers_rwnd = 0; 4921 } 4922 /* clean up */ 4923 if ((asoc->stream_queue_cnt == 1) && 4924 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4925 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4926 (asoc->locked_on_sending) 4927 ) { 4928 struct sctp_stream_queue_pending *sp; 4929 4930 /* 4931 * I may be in a state where we got all across.. but 4932 * cannot write more due to a shutdown... we abort 4933 * since the user did not indicate EOR in this case. 4934 */ 4935 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 4936 sctp_streamhead); 4937 if ((sp) && (sp->length == 0)) { 4938 asoc->locked_on_sending = NULL; 4939 if (sp->msg_is_complete) { 4940 asoc->stream_queue_cnt--; 4941 } else { 4942 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4943 asoc->stream_queue_cnt--; 4944 } 4945 } 4946 } 4947 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4948 (asoc->stream_queue_cnt == 0)) { 4949 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 4950 /* Need to abort here */ 4951 struct mbuf *oper; 4952 4953 abort_out_now: 4954 *abort_now = 1; 4955 /* XXX */ 4956 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 4957 0, M_NOWAIT, 1, MT_DATA); 4958 if (oper) { 4959 struct sctp_paramhdr *ph; 4960 uint32_t *ippp; 4961 4962 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 4963 sizeof(uint32_t); 4964 ph = mtod(oper, struct sctp_paramhdr *); 4965 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 4966 ph->param_length = htons(SCTP_BUF_LEN(oper)); 4967 ippp = (uint32_t *) (ph + 1); 4968 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); 4969 } 4970 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 4971 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 4972 return; 4973 } else { 4974 struct sctp_nets *netp; 4975 4976 if (asoc->alternate) { 4977 netp = asoc->alternate; 4978 } else { 4979 netp = asoc->primary_destination; 4980 } 4981 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4982 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4983 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4984 } 4985 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4986 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4987 sctp_stop_timers_for_shutdown(stcb); 4988 sctp_send_shutdown(stcb, netp); 4989 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4990 stcb->sctp_ep, stcb, netp); 4991 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4992 stcb->sctp_ep, stcb, netp); 4993 } 4994 return; 4995 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4996 (asoc->stream_queue_cnt == 0)) { 4997 struct sctp_nets *netp; 4998 4999 if (asoc->alternate) { 5000 netp = asoc->alternate; 5001 } else { 5002 netp = asoc->primary_destination; 5003 } 5004 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 5005 goto abort_out_now; 5006 } 5007 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5008 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 5009 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5010 sctp_send_shutdown_ack(stcb, netp); 5011 sctp_stop_timers_for_shutdown(stcb); 5012 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5013 stcb->sctp_ep, stcb, netp); 5014 return; 5015 } 5016 } 5017 /* 5018 * Now here we are going to recycle net_ack for a different use... 5019 * HEADS UP. 5020 */ 5021 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5022 net->net_ack = 0; 5023 } 5024 5025 /* 5026 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5027 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5028 * automatically ensure that. 5029 */ 5030 if ((asoc->sctp_cmt_on_off > 0) && 5031 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5032 (cmt_dac_flag == 0)) { 5033 this_sack_lowest_newack = cum_ack; 5034 } 5035 if ((num_seg > 0) || (num_nr_seg > 0)) { 5036 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5037 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5038 } 5039 /* JRS - Use the congestion control given in the CC module */ 5040 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5041 5042 /* Now are we exiting loss recovery ? */ 5043 if (will_exit_fast_recovery) { 5044 /* Ok, we must exit fast recovery */ 5045 asoc->fast_retran_loss_recovery = 0; 5046 } 5047 if ((asoc->sat_t3_loss_recovery) && 5048 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5049 /* end satellite t3 loss recovery */ 5050 asoc->sat_t3_loss_recovery = 0; 5051 } 5052 /* 5053 * CMT Fast recovery 5054 */ 5055 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5056 if (net->will_exit_fast_recovery) { 5057 /* Ok, we must exit fast recovery */ 5058 net->fast_retran_loss_recovery = 0; 5059 } 5060 } 5061 5062 /* Adjust and set the new rwnd value */ 5063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5064 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5065 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5066 } 5067 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5068 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5069 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5070 /* SWS sender side engages */ 5071 asoc->peers_rwnd = 0; 5072 } 5073 if (asoc->peers_rwnd > old_rwnd) { 5074 win_probe_recovery = 1; 5075 } 5076 /* 5077 * Now we must setup so we have a timer up for anyone with 5078 * outstanding data. 5079 */ 5080 done_once = 0; 5081 again: 5082 j = 0; 5083 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5084 if (win_probe_recovery && (net->window_probe)) { 5085 win_probe_recovered = 1; 5086 /*- 5087 * Find first chunk that was used with 5088 * window probe and clear the event. Put 5089 * it back into the send queue as if has 5090 * not been sent. 5091 */ 5092 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5093 if (tp1->window_probe) { 5094 sctp_window_probe_recovery(stcb, asoc, tp1); 5095 break; 5096 } 5097 } 5098 } 5099 if (net->flight_size) { 5100 j++; 5101 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5102 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5103 stcb->sctp_ep, stcb, net); 5104 } 5105 if (net->window_probe) { 5106 net->window_probe = 0; 5107 } 5108 } else { 5109 if (net->window_probe) { 5110 /* 5111 * In window probes we must assure a timer 5112 * is still running there 5113 */ 5114 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5115 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5116 stcb->sctp_ep, stcb, net); 5117 5118 } 5119 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5120 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5121 stcb, net, 5122 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 5123 } 5124 } 5125 } 5126 if ((j == 0) && 5127 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5128 (asoc->sent_queue_retran_cnt == 0) && 5129 (win_probe_recovered == 0) && 5130 (done_once == 0)) { 5131 /* 5132 * huh, this should not happen unless all packets are 5133 * PR-SCTP and marked to skip of course. 5134 */ 5135 if (sctp_fs_audit(asoc)) { 5136 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5137 net->flight_size = 0; 5138 } 5139 asoc->total_flight = 0; 5140 asoc->total_flight_count = 0; 5141 asoc->sent_queue_retran_cnt = 0; 5142 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5143 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5144 sctp_flight_size_increase(tp1); 5145 sctp_total_flight_increase(stcb, tp1); 5146 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5147 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5148 } 5149 } 5150 } 5151 done_once = 1; 5152 goto again; 5153 } 5154 /*********************************************/ 5155 /* Here we perform PR-SCTP procedures */ 5156 /* (section 4.2) */ 5157 /*********************************************/ 5158 /* C1. update advancedPeerAckPoint */ 5159 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5160 asoc->advanced_peer_ack_point = cum_ack; 5161 } 5162 /* C2. try to further move advancedPeerAckPoint ahead */ 5163 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 5164 struct sctp_tmit_chunk *lchk; 5165 uint32_t old_adv_peer_ack_point; 5166 5167 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5168 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5169 /* C3. See if we need to send a Fwd-TSN */ 5170 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5171 /* 5172 * ISSUE with ECN, see FWD-TSN processing. 5173 */ 5174 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5175 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5176 0xee, cum_ack, asoc->advanced_peer_ack_point, 5177 old_adv_peer_ack_point); 5178 } 5179 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5180 send_forward_tsn(stcb, asoc); 5181 } else if (lchk) { 5182 /* try to FR fwd-tsn's that get lost too */ 5183 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5184 send_forward_tsn(stcb, asoc); 5185 } 5186 } 5187 } 5188 if (lchk) { 5189 /* Assure a timer is up */ 5190 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5191 stcb->sctp_ep, stcb, lchk->whoTo); 5192 } 5193 } 5194 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5195 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5196 a_rwnd, 5197 stcb->asoc.peers_rwnd, 5198 stcb->asoc.total_flight, 5199 stcb->asoc.total_output_queue_size); 5200 } 5201 } 5202 5203 void 5204 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5205 { 5206 /* Copy cum-ack */ 5207 uint32_t cum_ack, a_rwnd; 5208 5209 cum_ack = ntohl(cp->cumulative_tsn_ack); 5210 /* Arrange so a_rwnd does NOT change */ 5211 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5212 5213 /* Now call the express sack handling */ 5214 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5215 } 5216 5217 static void 5218 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5219 struct sctp_stream_in *strmin) 5220 { 5221 struct sctp_queued_to_read *ctl, *nctl; 5222 struct sctp_association *asoc; 5223 uint16_t tt; 5224 5225 asoc = &stcb->asoc; 5226 tt = strmin->last_sequence_delivered; 5227 /* 5228 * First deliver anything prior to and including the stream no that 5229 * came in 5230 */ 5231 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { 5232 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) { 5233 /* this is deliverable now */ 5234 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5235 /* subtract pending on streams */ 5236 asoc->size_on_all_streams -= ctl->length; 5237 sctp_ucount_decr(asoc->cnt_on_all_streams); 5238 /* deliver it to at least the delivery-q */ 5239 if (stcb->sctp_socket) { 5240 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 5241 sctp_add_to_readq(stcb->sctp_ep, stcb, 5242 ctl, 5243 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5244 } 5245 } else { 5246 /* no more delivery now. */ 5247 break; 5248 } 5249 } 5250 /* 5251 * now we must deliver things in queue the normal way if any are 5252 * now ready. 5253 */ 5254 tt = strmin->last_sequence_delivered + 1; 5255 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { 5256 if (tt == ctl->sinfo_ssn) { 5257 /* this is deliverable now */ 5258 TAILQ_REMOVE(&strmin->inqueue, ctl, next); 5259 /* subtract pending on streams */ 5260 asoc->size_on_all_streams -= ctl->length; 5261 sctp_ucount_decr(asoc->cnt_on_all_streams); 5262 /* deliver it to at least the delivery-q */ 5263 strmin->last_sequence_delivered = ctl->sinfo_ssn; 5264 if (stcb->sctp_socket) { 5265 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 5266 sctp_add_to_readq(stcb->sctp_ep, stcb, 5267 ctl, 5268 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5269 5270 } 5271 tt = strmin->last_sequence_delivered + 1; 5272 } else { 5273 break; 5274 } 5275 } 5276 } 5277 5278 static void 5279 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5280 struct sctp_association *asoc, 5281 uint16_t stream, uint16_t seq) 5282 { 5283 struct sctp_tmit_chunk *chk, *nchk; 5284 5285 /* For each one on here see if we need to toss it */ 5286 /* 5287 * For now large messages held on the reasmqueue that are complete 5288 * will be tossed too. We could in theory do more work to spin 5289 * through and stop after dumping one msg aka seeing the start of a 5290 * new msg at the head, and call the delivery function... to see if 5291 * it can be delivered... But for now we just dump everything on the 5292 * queue. 5293 */ 5294 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 5295 /* 5296 * Do not toss it if on a different stream or marked for 5297 * unordered delivery in which case the stream sequence 5298 * number has no meaning. 5299 */ 5300 if ((chk->rec.data.stream_number != stream) || 5301 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) { 5302 continue; 5303 } 5304 if (chk->rec.data.stream_seq == seq) { 5305 /* It needs to be tossed */ 5306 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5307 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { 5308 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 5309 asoc->str_of_pdapi = chk->rec.data.stream_number; 5310 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 5311 asoc->fragment_flags = chk->rec.data.rcv_flags; 5312 } 5313 asoc->size_on_reasm_queue -= chk->send_size; 5314 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5315 5316 /* Clear up any stream problem */ 5317 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && 5318 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { 5319 /* 5320 * We must dump forward this streams 5321 * sequence number if the chunk is not 5322 * unordered that is being skipped. There is 5323 * a chance that if the peer does not 5324 * include the last fragment in its FWD-TSN 5325 * we WILL have a problem here since you 5326 * would have a partial chunk in queue that 5327 * may not be deliverable. Also if a Partial 5328 * delivery API as started the user may get 5329 * a partial chunk. The next read returning 5330 * a new chunk... really ugly but I see no 5331 * way around it! Maybe a notify?? 5332 */ 5333 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; 5334 } 5335 if (chk->data) { 5336 sctp_m_freem(chk->data); 5337 chk->data = NULL; 5338 } 5339 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5340 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) { 5341 /* 5342 * If the stream_seq is > than the purging one, we 5343 * are done 5344 */ 5345 break; 5346 } 5347 } 5348 } 5349 5350 5351 void 5352 sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5353 struct sctp_forward_tsn_chunk *fwd, 5354 int *abort_flag, struct mbuf *m, int offset) 5355 { 5356 /* The pr-sctp fwd tsn */ 5357 /* 5358 * here we will perform all the data receiver side steps for 5359 * processing FwdTSN, as required in by pr-sctp draft: 5360 * 5361 * Assume we get FwdTSN(x): 5362 * 5363 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 5364 * others we have 3) examine and update re-ordering queue on 5365 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5366 * report where we are. 5367 */ 5368 struct sctp_association *asoc; 5369 uint32_t new_cum_tsn, gap; 5370 unsigned int i, fwd_sz, m_size; 5371 uint32_t str_seq; 5372 struct sctp_stream_in *strm; 5373 struct sctp_tmit_chunk *chk, *nchk; 5374 struct sctp_queued_to_read *ctl, *sv; 5375 5376 asoc = &stcb->asoc; 5377 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5378 SCTPDBG(SCTP_DEBUG_INDATA1, 5379 "Bad size too small/big fwd-tsn\n"); 5380 return; 5381 } 5382 m_size = (stcb->asoc.mapping_array_size << 3); 5383 /*************************************************************/ 5384 /* 1. Here we update local cumTSN and shift the bitmap array */ 5385 /*************************************************************/ 5386 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5387 5388 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5389 /* Already got there ... */ 5390 return; 5391 } 5392 /* 5393 * now we know the new TSN is more advanced, let's find the actual 5394 * gap 5395 */ 5396 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5397 asoc->cumulative_tsn = new_cum_tsn; 5398 if (gap >= m_size) { 5399 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5400 struct mbuf *oper; 5401 5402 /* 5403 * out of range (of single byte chunks in the rwnd I 5404 * give out). This must be an attacker. 5405 */ 5406 *abort_flag = 1; 5407 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 5408 0, M_NOWAIT, 1, MT_DATA); 5409 if (oper) { 5410 struct sctp_paramhdr *ph; 5411 uint32_t *ippp; 5412 5413 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 5414 (sizeof(uint32_t) * 3); 5415 ph = mtod(oper, struct sctp_paramhdr *); 5416 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 5417 ph->param_length = htons(SCTP_BUF_LEN(oper)); 5418 ippp = (uint32_t *) (ph + 1); 5419 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); 5420 ippp++; 5421 *ippp = asoc->highest_tsn_inside_map; 5422 ippp++; 5423 *ippp = new_cum_tsn; 5424 } 5425 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5426 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 5427 return; 5428 } 5429 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5430 5431 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5432 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5433 asoc->highest_tsn_inside_map = new_cum_tsn; 5434 5435 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5436 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5437 5438 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5439 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5440 } 5441 } else { 5442 SCTP_TCB_LOCK_ASSERT(stcb); 5443 for (i = 0; i <= gap; i++) { 5444 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5445 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5446 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5447 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5448 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5449 } 5450 } 5451 } 5452 } 5453 /*************************************************************/ 5454 /* 2. Clear up re-assembly queue */ 5455 /*************************************************************/ 5456 /* 5457 * First service it if pd-api is up, just in case we can progress it 5458 * forward 5459 */ 5460 if (asoc->fragmented_delivery_inprogress) { 5461 sctp_service_reassembly(stcb, asoc); 5462 } 5463 /* For each one on here see if we need to toss it */ 5464 /* 5465 * For now large messages held on the reasmqueue that are complete 5466 * will be tossed too. We could in theory do more work to spin 5467 * through and stop after dumping one msg aka seeing the start of a 5468 * new msg at the head, and call the delivery function... to see if 5469 * it can be delivered... But for now we just dump everything on the 5470 * queue. 5471 */ 5472 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 5473 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) { 5474 /* It needs to be tossed */ 5475 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5476 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { 5477 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 5478 asoc->str_of_pdapi = chk->rec.data.stream_number; 5479 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 5480 asoc->fragment_flags = chk->rec.data.rcv_flags; 5481 } 5482 asoc->size_on_reasm_queue -= chk->send_size; 5483 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5484 5485 /* Clear up any stream problem */ 5486 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && 5487 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { 5488 /* 5489 * We must dump forward this streams 5490 * sequence number if the chunk is not 5491 * unordered that is being skipped. There is 5492 * a chance that if the peer does not 5493 * include the last fragment in its FWD-TSN 5494 * we WILL have a problem here since you 5495 * would have a partial chunk in queue that 5496 * may not be deliverable. Also if a Partial 5497 * delivery API as started the user may get 5498 * a partial chunk. The next read returning 5499 * a new chunk... really ugly but I see no 5500 * way around it! Maybe a notify?? 5501 */ 5502 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; 5503 } 5504 if (chk->data) { 5505 sctp_m_freem(chk->data); 5506 chk->data = NULL; 5507 } 5508 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5509 } else { 5510 /* 5511 * Ok we have gone beyond the end of the fwd-tsn's 5512 * mark. 5513 */ 5514 break; 5515 } 5516 } 5517 /*******************************************************/ 5518 /* 3. Update the PR-stream re-ordering queues and fix */ 5519 /* delivery issues as needed. */ 5520 /*******************************************************/ 5521 fwd_sz -= sizeof(*fwd); 5522 if (m && fwd_sz) { 5523 /* New method. */ 5524 unsigned int num_str; 5525 struct sctp_strseq *stseq, strseqbuf; 5526 5527 offset += sizeof(*fwd); 5528 5529 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5530 num_str = fwd_sz / sizeof(struct sctp_strseq); 5531 for (i = 0; i < num_str; i++) { 5532 uint16_t st; 5533 5534 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5535 sizeof(struct sctp_strseq), 5536 (uint8_t *) & strseqbuf); 5537 offset += sizeof(struct sctp_strseq); 5538 if (stseq == NULL) { 5539 break; 5540 } 5541 /* Convert */ 5542 st = ntohs(stseq->stream); 5543 stseq->stream = st; 5544 st = ntohs(stseq->sequence); 5545 stseq->sequence = st; 5546 5547 /* now process */ 5548 5549 /* 5550 * Ok we now look for the stream/seq on the read 5551 * queue where its not all delivered. If we find it 5552 * we transmute the read entry into a PDI_ABORTED. 5553 */ 5554 if (stseq->stream >= asoc->streamincnt) { 5555 /* screwed up streams, stop! */ 5556 break; 5557 } 5558 if ((asoc->str_of_pdapi == stseq->stream) && 5559 (asoc->ssn_of_pdapi == stseq->sequence)) { 5560 /* 5561 * If this is the one we were partially 5562 * delivering now then we no longer are. 5563 * Note this will change with the reassembly 5564 * re-write. 5565 */ 5566 asoc->fragmented_delivery_inprogress = 0; 5567 } 5568 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence); 5569 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { 5570 if ((ctl->sinfo_stream == stseq->stream) && 5571 (ctl->sinfo_ssn == stseq->sequence)) { 5572 str_seq = (stseq->stream << 16) | stseq->sequence; 5573 ctl->end_added = 1; 5574 ctl->pdapi_aborted = 1; 5575 sv = stcb->asoc.control_pdapi; 5576 stcb->asoc.control_pdapi = ctl; 5577 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5578 stcb, 5579 SCTP_PARTIAL_DELIVERY_ABORTED, 5580 (void *)&str_seq, 5581 SCTP_SO_NOT_LOCKED); 5582 stcb->asoc.control_pdapi = sv; 5583 break; 5584 } else if ((ctl->sinfo_stream == stseq->stream) && 5585 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) { 5586 /* We are past our victim SSN */ 5587 break; 5588 } 5589 } 5590 strm = &asoc->strmin[stseq->stream]; 5591 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) { 5592 /* Update the sequence number */ 5593 strm->last_sequence_delivered = stseq->sequence; 5594 } 5595 /* now kick the stream the new way */ 5596 /* sa_ignore NO_NULL_CHK */ 5597 sctp_kick_prsctp_reorder_queue(stcb, strm); 5598 } 5599 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5600 } 5601 /* 5602 * Now slide thing forward. 5603 */ 5604 sctp_slide_mapping_arrays(stcb); 5605 5606 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 5607 /* now lets kick out and check for more fragmented delivery */ 5608 /* sa_ignore NO_NULL_CHK */ 5609 sctp_deliver_reasm_check(stcb, &stcb->asoc); 5610 } 5611 } 5612