1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_bsd_addr.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_crc32.h> 50 #include <netinet/udp.h> 51 52 53 54 static void 55 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 56 { 57 struct sctp_nets *net; 58 59 /* 60 * This now not only stops all cookie timers it also stops any INIT 61 * timers as well. This will make sure that the timers are stopped 62 * in all collision cases. 63 */ 64 SCTP_TCB_LOCK_ASSERT(stcb); 65 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 66 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 67 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 68 stcb->sctp_ep, 69 stcb, 70 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 71 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 72 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 73 stcb->sctp_ep, 74 stcb, 75 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 76 } 77 } 78 } 79 80 /* INIT handler */ 81 static void 82 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 83 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 84 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port) 85 { 86 struct sctp_init *init; 87 struct mbuf *op_err; 88 uint32_t init_limit; 89 90 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 91 stcb); 92 if (stcb == NULL) { 93 SCTP_INP_RLOCK(inp); 94 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 95 goto outnow; 96 } 97 } 98 op_err = NULL; 99 init = &cp->init; 100 /* First are we accepting? */ 101 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 102 SCTPDBG(SCTP_DEBUG_INPUT2, 103 "sctp_handle_init: Abort, so_qlimit:%d\n", 104 inp->sctp_socket->so_qlimit); 105 /* 106 * FIX ME ?? What about TCP model and we have a 107 * match/restart case? Actually no fix is needed. the lookup 108 * will always find the existing assoc so stcb would not be 109 * NULL. It may be questionable to do this since we COULD 110 * just send back the INIT-ACK and hope that the app did 111 * accept()'s by the time the COOKIE was sent. But there is 112 * a price to pay for COOKIE generation and I don't want to 113 * pay it on the chance that the app will actually do some 114 * accepts(). The App just looses and should NOT be in this 115 * state :-) 116 */ 117 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 118 vrf_id, port); 119 if (stcb) 120 *abort_no_unlock = 1; 121 goto outnow; 122 } 123 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 124 /* Invalid length */ 125 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 126 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 127 vrf_id, port); 128 if (stcb) 129 *abort_no_unlock = 1; 130 goto outnow; 131 } 132 /* validate parameters */ 133 if (init->initiate_tag == 0) { 134 /* protocol error... send abort */ 135 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 136 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 137 vrf_id, port); 138 if (stcb) 139 *abort_no_unlock = 1; 140 goto outnow; 141 } 142 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 143 /* invalid parameter... send abort */ 144 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 145 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 146 vrf_id, port); 147 if (stcb) 148 *abort_no_unlock = 1; 149 goto outnow; 150 } 151 if (init->num_inbound_streams == 0) { 152 /* protocol error... send abort */ 153 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 154 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 155 vrf_id, port); 156 if (stcb) 157 *abort_no_unlock = 1; 158 goto outnow; 159 } 160 if (init->num_outbound_streams == 0) { 161 /* protocol error... send abort */ 162 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 163 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 164 vrf_id, port); 165 if (stcb) 166 *abort_no_unlock = 1; 167 goto outnow; 168 } 169 init_limit = offset + ntohs(cp->ch.chunk_length); 170 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 171 init_limit)) { 172 /* auth parameter(s) error... send abort */ 173 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port); 174 if (stcb) 175 *abort_no_unlock = 1; 176 goto outnow; 177 } 178 /* send an INIT-ACK w/cookie */ 179 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 180 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port, 181 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 182 outnow: 183 if (stcb == NULL) { 184 SCTP_INP_RUNLOCK(inp); 185 } 186 } 187 188 /* 189 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 190 */ 191 192 int 193 sctp_is_there_unsent_data(struct sctp_tcb *stcb) 194 { 195 int unsent_data = 0; 196 struct sctp_stream_queue_pending *sp; 197 struct sctp_stream_out *strq; 198 struct sctp_association *asoc; 199 200 /* 201 * This function returns the number of streams that have true unsent 202 * data on them. Note that as it looks through it will clean up any 203 * places that have old data that has been sent but left at top of 204 * stream queue. 205 */ 206 asoc = &stcb->asoc; 207 SCTP_TCB_SEND_LOCK(stcb); 208 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 209 /* Check to see if some data queued */ 210 TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) { 211 is_there_another: 212 /* sa_ignore FREED_MEMORY */ 213 sp = TAILQ_FIRST(&strq->outqueue); 214 if (sp == NULL) { 215 continue; 216 } 217 if ((sp->msg_is_complete) && 218 (sp->length == 0) && 219 (sp->sender_all_done)) { 220 /* 221 * We are doing differed cleanup. Last time 222 * through when we took all the data the 223 * sender_all_done was not set. 224 */ 225 if (sp->put_last_out == 0) { 226 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 227 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 228 sp->sender_all_done, 229 sp->length, 230 sp->msg_is_complete, 231 sp->put_last_out); 232 } 233 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 234 TAILQ_REMOVE(&strq->outqueue, sp, next); 235 if (sp->net) { 236 sctp_free_remote_addr(sp->net); 237 sp->net = NULL; 238 } 239 if (sp->data) { 240 sctp_m_freem(sp->data); 241 sp->data = NULL; 242 } 243 sctp_free_a_strmoq(stcb, sp); 244 goto is_there_another; 245 } else { 246 unsent_data++; 247 continue; 248 } 249 } 250 } 251 SCTP_TCB_SEND_UNLOCK(stcb); 252 return (unsent_data); 253 } 254 255 static int 256 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 257 struct sctp_nets *net) 258 { 259 struct sctp_init *init; 260 struct sctp_association *asoc; 261 struct sctp_nets *lnet; 262 unsigned int i; 263 264 init = &cp->init; 265 asoc = &stcb->asoc; 266 /* save off parameters */ 267 asoc->peer_vtag = ntohl(init->initiate_tag); 268 asoc->peers_rwnd = ntohl(init->a_rwnd); 269 if (!TAILQ_EMPTY(&asoc->nets)) { 270 /* update any ssthresh's that may have a default */ 271 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 272 lnet->ssthresh = asoc->peers_rwnd; 273 274 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 275 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 276 } 277 } 278 } 279 SCTP_TCB_SEND_LOCK(stcb); 280 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 281 unsigned int newcnt; 282 struct sctp_stream_out *outs; 283 struct sctp_stream_queue_pending *sp; 284 struct sctp_tmit_chunk *chk, *chk_next; 285 286 /* abandon the upper streams */ 287 newcnt = ntohs(init->num_inbound_streams); 288 if (!TAILQ_EMPTY(&asoc->send_queue)) { 289 chk = TAILQ_FIRST(&asoc->send_queue); 290 while (chk) { 291 chk_next = TAILQ_NEXT(chk, sctp_next); 292 if (chk->rec.data.stream_number >= newcnt) { 293 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 294 asoc->send_queue_cnt--; 295 if (chk->data != NULL) { 296 sctp_free_bufspace(stcb, asoc, chk, 1); 297 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 298 SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_NOT_LOCKED); 299 if (chk->data) { 300 sctp_m_freem(chk->data); 301 chk->data = NULL; 302 } 303 } 304 sctp_free_a_chunk(stcb, chk); 305 /* sa_ignore FREED_MEMORY */ 306 } 307 chk = chk_next; 308 } 309 } 310 if (asoc->strmout) { 311 for (i = newcnt; i < asoc->pre_open_streams; i++) { 312 outs = &asoc->strmout[i]; 313 sp = TAILQ_FIRST(&outs->outqueue); 314 while (sp) { 315 TAILQ_REMOVE(&outs->outqueue, sp, next); 316 asoc->stream_queue_cnt--; 317 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 318 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 319 sp, SCTP_SO_NOT_LOCKED); 320 if (sp->data) { 321 sctp_m_freem(sp->data); 322 sp->data = NULL; 323 } 324 if (sp->net) { 325 sctp_free_remote_addr(sp->net); 326 sp->net = NULL; 327 } 328 /* Free the chunk */ 329 sctp_free_a_strmoq(stcb, sp); 330 /* sa_ignore FREED_MEMORY */ 331 sp = TAILQ_FIRST(&outs->outqueue); 332 } 333 } 334 } 335 /* cut back the count */ 336 asoc->pre_open_streams = newcnt; 337 } 338 SCTP_TCB_SEND_UNLOCK(stcb); 339 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams; 340 /* init tsn's */ 341 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 342 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */ 343 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 344 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 345 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 346 } 347 /* This is the next one we expect */ 348 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 349 350 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 351 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in; 352 asoc->last_echo_tsn = asoc->asconf_seq_in; 353 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 354 /* open the requested streams */ 355 356 if (asoc->strmin != NULL) { 357 /* Free the old ones */ 358 struct sctp_queued_to_read *ctl; 359 360 for (i = 0; i < asoc->streamincnt; i++) { 361 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 362 while (ctl) { 363 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 364 sctp_free_remote_addr(ctl->whoFrom); 365 ctl->whoFrom = NULL; 366 sctp_m_freem(ctl->data); 367 ctl->data = NULL; 368 sctp_free_a_readq(stcb, ctl); 369 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 370 } 371 } 372 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 373 } 374 asoc->streamincnt = ntohs(init->num_outbound_streams); 375 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 376 asoc->streamincnt = MAX_SCTP_STREAMS; 377 } 378 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 379 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 380 if (asoc->strmin == NULL) { 381 /* we didn't get memory for the streams! */ 382 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 383 return (-1); 384 } 385 for (i = 0; i < asoc->streamincnt; i++) { 386 asoc->strmin[i].stream_no = i; 387 asoc->strmin[i].last_sequence_delivered = 0xffff; 388 /* 389 * U-stream ranges will be set when the cookie is unpacked. 390 * Or for the INIT sender they are un set (if pr-sctp not 391 * supported) when the INIT-ACK arrives. 392 */ 393 TAILQ_INIT(&asoc->strmin[i].inqueue); 394 asoc->strmin[i].delivery_started = 0; 395 } 396 /* 397 * load_address_from_init will put the addresses into the 398 * association when the COOKIE is processed or the INIT-ACK is 399 * processed. Both types of COOKIE's existing and new call this 400 * routine. It will remove addresses that are no longer in the 401 * association (for the restarting case where addresses are 402 * removed). Up front when the INIT arrives we will discard it if it 403 * is a restart and new addresses have been added. 404 */ 405 /* sa_ignore MEMLEAK */ 406 return (0); 407 } 408 409 /* 410 * INIT-ACK message processing/consumption returns value < 0 on error 411 */ 412 static int 413 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 414 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 415 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 416 { 417 struct sctp_association *asoc; 418 struct mbuf *op_err; 419 int retval, abort_flag; 420 uint32_t initack_limit; 421 int nat_friendly = 0; 422 423 /* First verify that we have no illegal param's */ 424 abort_flag = 0; 425 op_err = NULL; 426 427 op_err = sctp_arethere_unrecognized_parameters(m, 428 (offset + sizeof(struct sctp_init_chunk)), 429 &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly); 430 if (abort_flag) { 431 /* Send an abort and notify peer */ 432 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED); 433 *abort_no_unlock = 1; 434 return (-1); 435 } 436 asoc = &stcb->asoc; 437 asoc->peer_supports_nat = (uint8_t) nat_friendly; 438 /* process the peer's parameters in the INIT-ACK */ 439 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 440 if (retval < 0) { 441 return (retval); 442 } 443 initack_limit = offset + ntohs(cp->ch.chunk_length); 444 /* load all addresses */ 445 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 446 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 447 NULL))) { 448 /* Huh, we should abort */ 449 SCTPDBG(SCTP_DEBUG_INPUT1, 450 "Load addresses from INIT causes an abort %d\n", 451 retval); 452 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 453 NULL, 0, net->port); 454 *abort_no_unlock = 1; 455 return (-1); 456 } 457 /* if the peer doesn't support asconf, flush the asconf queue */ 458 if (asoc->peer_supports_asconf == 0) { 459 struct sctp_asconf_addr *aparam; 460 461 while (!TAILQ_EMPTY(&asoc->asconf_queue)) { 462 /* sa_ignore FREED_MEMORY */ 463 aparam = TAILQ_FIRST(&asoc->asconf_queue); 464 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 465 SCTP_FREE(aparam, SCTP_M_ASC_ADDR); 466 } 467 } 468 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 469 stcb->asoc.local_hmacs); 470 if (op_err) { 471 sctp_queue_op_err(stcb, op_err); 472 /* queuing will steal away the mbuf chain to the out queue */ 473 op_err = NULL; 474 } 475 /* extract the cookie and queue it to "echo" it back... */ 476 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 477 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 478 stcb->asoc.overall_error_count, 479 0, 480 SCTP_FROM_SCTP_INPUT, 481 __LINE__); 482 } 483 stcb->asoc.overall_error_count = 0; 484 net->error_count = 0; 485 486 /* 487 * Cancel the INIT timer, We do this first before queueing the 488 * cookie. We always cancel at the primary to assue that we are 489 * canceling the timer started by the INIT which always goes to the 490 * primary. 491 */ 492 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 493 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 494 495 /* calculate the RTO */ 496 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy); 497 498 retval = sctp_send_cookie_echo(m, offset, stcb, net); 499 if (retval < 0) { 500 /* 501 * No cookie, we probably should send a op error. But in any 502 * case if there is no cookie in the INIT-ACK, we can 503 * abandon the peer, its broke. 504 */ 505 if (retval == -3) { 506 /* We abort with an error of missing mandatory param */ 507 op_err = 508 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 509 if (op_err) { 510 /* 511 * Expand beyond to include the mandatory 512 * param cookie 513 */ 514 struct sctp_inv_mandatory_param *mp; 515 516 SCTP_BUF_LEN(op_err) = 517 sizeof(struct sctp_inv_mandatory_param); 518 mp = mtod(op_err, 519 struct sctp_inv_mandatory_param *); 520 /* Subtract the reserved param */ 521 mp->length = 522 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 523 mp->num_param = htonl(1); 524 mp->param = htons(SCTP_STATE_COOKIE); 525 mp->resv = 0; 526 } 527 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 528 sh, op_err, 0, net->port); 529 *abort_no_unlock = 1; 530 } 531 return (retval); 532 } 533 return (0); 534 } 535 536 static void 537 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 538 struct sctp_tcb *stcb, struct sctp_nets *net) 539 { 540 struct sockaddr_storage store; 541 struct sockaddr_in *sin; 542 struct sockaddr_in6 *sin6; 543 struct sctp_nets *r_net, *f_net; 544 struct timeval tv; 545 int req_prim = 0; 546 547 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 548 /* Invalid length */ 549 return; 550 } 551 sin = (struct sockaddr_in *)&store; 552 sin6 = (struct sockaddr_in6 *)&store; 553 554 memset(&store, 0, sizeof(store)); 555 if (cp->heartbeat.hb_info.addr_family == AF_INET && 556 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 557 sin->sin_family = cp->heartbeat.hb_info.addr_family; 558 sin->sin_len = cp->heartbeat.hb_info.addr_len; 559 sin->sin_port = stcb->rport; 560 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 561 sizeof(sin->sin_addr)); 562 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 563 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 564 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 565 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 566 sin6->sin6_port = stcb->rport; 567 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 568 sizeof(sin6->sin6_addr)); 569 } else { 570 return; 571 } 572 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 573 if (r_net == NULL) { 574 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 575 return; 576 } 577 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 578 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 579 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 580 /* 581 * If the its a HB and it's random value is correct when can 582 * confirm the destination. 583 */ 584 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 585 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 586 stcb->asoc.primary_destination = r_net; 587 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 588 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 589 f_net = TAILQ_FIRST(&stcb->asoc.nets); 590 if (f_net != r_net) { 591 /* 592 * first one on the list is NOT the primary 593 * sctp_cmpaddr() is much more efficent if 594 * the primary is the first on the list, 595 * make it so. 596 */ 597 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next); 598 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next); 599 } 600 req_prim = 1; 601 } 602 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 603 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 604 } 605 r_net->error_count = 0; 606 r_net->hb_responded = 1; 607 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 608 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 609 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 610 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 611 r_net->dest_state |= SCTP_ADDR_REACHABLE; 612 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 613 SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED); 614 /* now was it the primary? if so restore */ 615 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 616 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 617 } 618 } 619 /* 620 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state, 621 * set the destination to active state and set the cwnd to one or 622 * two MTU's based on whether PF1 or PF2 is being used. If a T3 623 * timer is running, for the destination, stop the timer because a 624 * PF-heartbeat was received. 625 */ 626 if ((stcb->asoc.sctp_cmt_on_off == 1) && 627 (stcb->asoc.sctp_cmt_pf > 0) && 628 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 629 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 630 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 631 stcb, net, 632 SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 633 } 634 net->dest_state &= ~SCTP_ADDR_PF; 635 net->cwnd = net->mtu * stcb->asoc.sctp_cmt_pf; 636 SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n", 637 net, net->cwnd); 638 } 639 /* Now lets do a RTO with this */ 640 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy); 641 /* Mobility adaptation */ 642 if (req_prim) { 643 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 644 SCTP_MOBILITY_BASE) || 645 sctp_is_mobility_feature_on(stcb->sctp_ep, 646 SCTP_MOBILITY_FASTHANDOFF)) && 647 sctp_is_mobility_feature_on(stcb->sctp_ep, 648 SCTP_MOBILITY_PRIM_DELETED)) { 649 650 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 651 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 652 SCTP_MOBILITY_FASTHANDOFF)) { 653 sctp_assoc_immediate_retrans(stcb, 654 stcb->asoc.primary_destination); 655 } 656 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 657 SCTP_MOBILITY_BASE)) { 658 sctp_move_chunks_from_net(stcb, 659 stcb->asoc.deleted_primary); 660 } 661 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 662 stcb->asoc.deleted_primary); 663 } 664 } 665 } 666 667 static int 668 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) 669 { 670 /* 671 * return 0 means we want you to proceed with the abort non-zero 672 * means no abort processing 673 */ 674 struct sctpasochead *head; 675 676 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 677 /* generate a new vtag and send init */ 678 LIST_REMOVE(stcb, sctp_asocs); 679 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 680 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 681 /* 682 * put it in the bucket in the vtag hash of assoc's for the 683 * system 684 */ 685 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 686 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 687 return (1); 688 } 689 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 690 /* 691 * treat like a case where the cookie expired i.e.: - dump 692 * current cookie. - generate a new vtag. - resend init. 693 */ 694 /* generate a new vtag and send init */ 695 LIST_REMOVE(stcb, sctp_asocs); 696 stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED; 697 stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT; 698 sctp_stop_all_cookie_timers(stcb); 699 sctp_toss_old_cookies(stcb, &stcb->asoc); 700 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 701 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 702 /* 703 * put it in the bucket in the vtag hash of assoc's for the 704 * system 705 */ 706 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 707 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 708 return (1); 709 } 710 return (0); 711 } 712 713 static int 714 sctp_handle_nat_missing_state(struct sctp_tcb *stcb, 715 struct sctp_nets *net) 716 { 717 /* 718 * return 0 means we want you to proceed with the abort non-zero 719 * means no abort processing 720 */ 721 if (stcb->asoc.peer_supports_auth == 0) { 722 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n"); 723 return (0); 724 } 725 sctp_asconf_send_nat_state_update(stcb, net); 726 return (1); 727 } 728 729 730 static void 731 sctp_handle_abort(struct sctp_abort_chunk *cp, 732 struct sctp_tcb *stcb, struct sctp_nets *net) 733 { 734 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 735 struct socket *so; 736 737 #endif 738 uint16_t len; 739 740 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 741 if (stcb == NULL) 742 return; 743 744 len = ntohs(cp->ch.chunk_length); 745 if (len > sizeof(struct sctp_chunkhdr)) { 746 /* 747 * Need to check the cause codes for our two magic nat 748 * aborts which don't kill the assoc necessarily. 749 */ 750 struct sctp_abort_chunk *cpnext; 751 struct sctp_missing_nat_state *natc; 752 uint16_t cause; 753 754 cpnext = cp; 755 cpnext++; 756 natc = (struct sctp_missing_nat_state *)cpnext; 757 cause = ntohs(natc->cause); 758 if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) { 759 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 760 cp->ch.chunk_flags); 761 if (sctp_handle_nat_colliding_state(stcb)) { 762 return; 763 } 764 } else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) { 765 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 766 cp->ch.chunk_flags); 767 if (sctp_handle_nat_missing_state(stcb, net)) { 768 return; 769 } 770 } 771 } 772 /* stop any receive timers */ 773 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 774 /* notify user of the abort and clean up... */ 775 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 776 /* free the tcb */ 777 #if defined(SCTP_PANIC_ON_ABORT) 778 printf("stcb:%p state:%d rport:%d net:%p\n", 779 stcb, stcb->asoc.state, stcb->rport, net); 780 if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 781 panic("Received an ABORT"); 782 } else { 783 printf("No panic its in state %x closed\n", stcb->asoc.state); 784 } 785 #endif 786 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 787 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 788 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 789 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 790 } 791 #ifdef SCTP_ASOCLOG_OF_TSNS 792 sctp_print_out_track_log(stcb); 793 #endif 794 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 795 so = SCTP_INP_SO(stcb->sctp_ep); 796 atomic_add_int(&stcb->asoc.refcnt, 1); 797 SCTP_TCB_UNLOCK(stcb); 798 SCTP_SOCKET_LOCK(so, 1); 799 SCTP_TCB_LOCK(stcb); 800 atomic_subtract_int(&stcb->asoc.refcnt, 1); 801 #endif 802 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 803 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 804 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 805 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 806 SCTP_SOCKET_UNLOCK(so, 1); 807 #endif 808 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 809 } 810 811 static void 812 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 813 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 814 { 815 struct sctp_association *asoc; 816 int some_on_streamwheel; 817 818 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 819 struct socket *so; 820 821 #endif 822 823 SCTPDBG(SCTP_DEBUG_INPUT2, 824 "sctp_handle_shutdown: handling SHUTDOWN\n"); 825 if (stcb == NULL) 826 return; 827 asoc = &stcb->asoc; 828 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 829 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 830 return; 831 } 832 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 833 /* Shutdown NOT the expected size */ 834 return; 835 } else { 836 sctp_update_acked(stcb, cp, net, abort_flag); 837 if (*abort_flag) { 838 return; 839 } 840 } 841 if (asoc->control_pdapi) { 842 /* 843 * With a normal shutdown we assume the end of last record. 844 */ 845 SCTP_INP_READ_LOCK(stcb->sctp_ep); 846 asoc->control_pdapi->end_added = 1; 847 asoc->control_pdapi->pdapi_aborted = 1; 848 asoc->control_pdapi = NULL; 849 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 850 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 851 so = SCTP_INP_SO(stcb->sctp_ep); 852 atomic_add_int(&stcb->asoc.refcnt, 1); 853 SCTP_TCB_UNLOCK(stcb); 854 SCTP_SOCKET_LOCK(so, 1); 855 SCTP_TCB_LOCK(stcb); 856 atomic_subtract_int(&stcb->asoc.refcnt, 1); 857 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 858 /* assoc was freed while we were unlocked */ 859 SCTP_SOCKET_UNLOCK(so, 1); 860 return; 861 } 862 #endif 863 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 864 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 865 SCTP_SOCKET_UNLOCK(so, 1); 866 #endif 867 } 868 /* goto SHUTDOWN_RECEIVED state to block new requests */ 869 if (stcb->sctp_socket) { 870 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 871 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 872 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 873 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 874 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 875 /* 876 * notify upper layer that peer has initiated a 877 * shutdown 878 */ 879 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 880 881 /* reset time */ 882 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 883 } 884 } 885 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 886 /* 887 * stop the shutdown timer, since we WILL move to 888 * SHUTDOWN-ACK-SENT. 889 */ 890 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 891 } 892 /* Now is there unsent data on a stream somewhere? */ 893 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 894 895 if (!TAILQ_EMPTY(&asoc->send_queue) || 896 !TAILQ_EMPTY(&asoc->sent_queue) || 897 some_on_streamwheel) { 898 /* By returning we will push more data out */ 899 return; 900 } else { 901 /* no outstanding data to send, so move on... */ 902 /* send SHUTDOWN-ACK */ 903 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 904 /* move to SHUTDOWN-ACK-SENT state */ 905 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 906 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 907 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 908 } 909 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 910 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 911 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 912 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 913 /* start SHUTDOWN timer */ 914 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 915 stcb, net); 916 } 917 } 918 919 static void 920 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 921 struct sctp_tcb *stcb, 922 struct sctp_nets *net) 923 { 924 struct sctp_association *asoc; 925 926 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 927 struct socket *so; 928 929 so = SCTP_INP_SO(stcb->sctp_ep); 930 #endif 931 SCTPDBG(SCTP_DEBUG_INPUT2, 932 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 933 if (stcb == NULL) 934 return; 935 936 asoc = &stcb->asoc; 937 /* process according to association state */ 938 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 939 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 940 /* unexpected SHUTDOWN-ACK... do OOTB handling... */ 941 sctp_send_shutdown_complete(stcb, net, 1); 942 SCTP_TCB_UNLOCK(stcb); 943 return; 944 } 945 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 946 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 947 /* unexpected SHUTDOWN-ACK... so ignore... */ 948 SCTP_TCB_UNLOCK(stcb); 949 return; 950 } 951 if (asoc->control_pdapi) { 952 /* 953 * With a normal shutdown we assume the end of last record. 954 */ 955 SCTP_INP_READ_LOCK(stcb->sctp_ep); 956 asoc->control_pdapi->end_added = 1; 957 asoc->control_pdapi->pdapi_aborted = 1; 958 asoc->control_pdapi = NULL; 959 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 960 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 961 atomic_add_int(&stcb->asoc.refcnt, 1); 962 SCTP_TCB_UNLOCK(stcb); 963 SCTP_SOCKET_LOCK(so, 1); 964 SCTP_TCB_LOCK(stcb); 965 atomic_subtract_int(&stcb->asoc.refcnt, 1); 966 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 967 /* assoc was freed while we were unlocked */ 968 SCTP_SOCKET_UNLOCK(so, 1); 969 return; 970 } 971 #endif 972 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 973 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 974 SCTP_SOCKET_UNLOCK(so, 1); 975 #endif 976 } 977 /* are the queues empty? */ 978 if (!TAILQ_EMPTY(&asoc->send_queue) || 979 !TAILQ_EMPTY(&asoc->sent_queue) || 980 !TAILQ_EMPTY(&asoc->out_wheel)) { 981 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 982 } 983 /* stop the timer */ 984 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 985 /* send SHUTDOWN-COMPLETE */ 986 sctp_send_shutdown_complete(stcb, net, 0); 987 /* notify upper layer protocol */ 988 if (stcb->sctp_socket) { 989 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 990 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 991 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 992 /* Set the connected flag to disconnected */ 993 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 994 } 995 } 996 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 997 /* free the TCB but first save off the ep */ 998 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 999 atomic_add_int(&stcb->asoc.refcnt, 1); 1000 SCTP_TCB_UNLOCK(stcb); 1001 SCTP_SOCKET_LOCK(so, 1); 1002 SCTP_TCB_LOCK(stcb); 1003 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1004 #endif 1005 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1006 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 1007 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1008 SCTP_SOCKET_UNLOCK(so, 1); 1009 #endif 1010 } 1011 1012 /* 1013 * Skip past the param header and then we will find the chunk that caused the 1014 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 1015 * our peer must be broken. 1016 */ 1017 static void 1018 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 1019 struct sctp_nets *net) 1020 { 1021 struct sctp_chunkhdr *chk; 1022 1023 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 1024 switch (chk->chunk_type) { 1025 case SCTP_ASCONF_ACK: 1026 case SCTP_ASCONF: 1027 sctp_asconf_cleanup(stcb, net); 1028 break; 1029 case SCTP_FORWARD_CUM_TSN: 1030 stcb->asoc.peer_supports_prsctp = 0; 1031 break; 1032 default: 1033 SCTPDBG(SCTP_DEBUG_INPUT2, 1034 "Peer does not support chunk type %d(%x)??\n", 1035 chk->chunk_type, (uint32_t) chk->chunk_type); 1036 break; 1037 } 1038 } 1039 1040 /* 1041 * Skip past the param header and then we will find the param that caused the 1042 * problem. There are a number of param's in a ASCONF OR the prsctp param 1043 * these will turn of specific features. 1044 */ 1045 static void 1046 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 1047 { 1048 struct sctp_paramhdr *pbad; 1049 1050 pbad = phdr + 1; 1051 switch (ntohs(pbad->param_type)) { 1052 /* pr-sctp draft */ 1053 case SCTP_PRSCTP_SUPPORTED: 1054 stcb->asoc.peer_supports_prsctp = 0; 1055 break; 1056 case SCTP_SUPPORTED_CHUNK_EXT: 1057 break; 1058 /* draft-ietf-tsvwg-addip-sctp */ 1059 case SCTP_HAS_NAT_SUPPORT: 1060 stcb->asoc.peer_supports_nat = 0; 1061 break; 1062 case SCTP_ECN_NONCE_SUPPORTED: 1063 stcb->asoc.peer_supports_ecn_nonce = 0; 1064 stcb->asoc.ecn_nonce_allowed = 0; 1065 stcb->asoc.ecn_allowed = 0; 1066 break; 1067 case SCTP_ADD_IP_ADDRESS: 1068 case SCTP_DEL_IP_ADDRESS: 1069 case SCTP_SET_PRIM_ADDR: 1070 stcb->asoc.peer_supports_asconf = 0; 1071 break; 1072 case SCTP_SUCCESS_REPORT: 1073 case SCTP_ERROR_CAUSE_IND: 1074 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 1075 SCTPDBG(SCTP_DEBUG_INPUT2, 1076 "Turning off ASCONF to this strange peer\n"); 1077 stcb->asoc.peer_supports_asconf = 0; 1078 break; 1079 default: 1080 SCTPDBG(SCTP_DEBUG_INPUT2, 1081 "Peer does not support param type %d(%x)??\n", 1082 pbad->param_type, (uint32_t) pbad->param_type); 1083 break; 1084 } 1085 } 1086 1087 static int 1088 sctp_handle_error(struct sctp_chunkhdr *ch, 1089 struct sctp_tcb *stcb, struct sctp_nets *net) 1090 { 1091 int chklen; 1092 struct sctp_paramhdr *phdr; 1093 uint16_t error_type; 1094 uint16_t error_len; 1095 struct sctp_association *asoc; 1096 int adjust; 1097 1098 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1099 struct socket *so; 1100 1101 #endif 1102 1103 /* parse through all of the errors and process */ 1104 asoc = &stcb->asoc; 1105 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 1106 sizeof(struct sctp_chunkhdr)); 1107 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 1108 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 1109 /* Process an Error Cause */ 1110 error_type = ntohs(phdr->param_type); 1111 error_len = ntohs(phdr->param_length); 1112 if ((error_len > chklen) || (error_len == 0)) { 1113 /* invalid param length for this param */ 1114 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 1115 chklen, error_len); 1116 return (0); 1117 } 1118 switch (error_type) { 1119 case SCTP_CAUSE_INVALID_STREAM: 1120 case SCTP_CAUSE_MISSING_PARAM: 1121 case SCTP_CAUSE_INVALID_PARAM: 1122 case SCTP_CAUSE_NO_USER_DATA: 1123 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 1124 error_type); 1125 break; 1126 case SCTP_CAUSE_NAT_COLLIDING_STATE: 1127 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 1128 ch->chunk_flags); 1129 if (sctp_handle_nat_colliding_state(stcb)) { 1130 return (0); 1131 } 1132 break; 1133 case SCTP_CAUSE_NAT_MISSING_STATE: 1134 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 1135 ch->chunk_flags); 1136 if (sctp_handle_nat_missing_state(stcb, net)) { 1137 return (0); 1138 } 1139 break; 1140 case SCTP_CAUSE_STALE_COOKIE: 1141 /* 1142 * We only act if we have echoed a cookie and are 1143 * waiting. 1144 */ 1145 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 1146 int *p; 1147 1148 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 1149 /* Save the time doubled */ 1150 asoc->cookie_preserve_req = ntohl(*p) << 1; 1151 asoc->stale_cookie_count++; 1152 if (asoc->stale_cookie_count > 1153 asoc->max_init_times) { 1154 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 1155 /* now free the asoc */ 1156 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1157 so = SCTP_INP_SO(stcb->sctp_ep); 1158 atomic_add_int(&stcb->asoc.refcnt, 1); 1159 SCTP_TCB_UNLOCK(stcb); 1160 SCTP_SOCKET_LOCK(so, 1); 1161 SCTP_TCB_LOCK(stcb); 1162 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1163 #endif 1164 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1165 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1166 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1167 SCTP_SOCKET_UNLOCK(so, 1); 1168 #endif 1169 return (-1); 1170 } 1171 /* blast back to INIT state */ 1172 sctp_toss_old_cookies(stcb, &stcb->asoc); 1173 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1174 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1175 sctp_stop_all_cookie_timers(stcb); 1176 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1177 } 1178 break; 1179 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1180 /* 1181 * Nothing we can do here, we don't do hostname 1182 * addresses so if the peer does not like my IPv6 1183 * (or IPv4 for that matter) it does not matter. If 1184 * they don't support that type of address, they can 1185 * NOT possibly get that packet type... i.e. with no 1186 * IPv6 you can't recieve a IPv6 packet. so we can 1187 * safely ignore this one. If we ever added support 1188 * for HOSTNAME Addresses, then we would need to do 1189 * something here. 1190 */ 1191 break; 1192 case SCTP_CAUSE_UNRECOG_CHUNK: 1193 sctp_process_unrecog_chunk(stcb, phdr, net); 1194 break; 1195 case SCTP_CAUSE_UNRECOG_PARAM: 1196 sctp_process_unrecog_param(stcb, phdr); 1197 break; 1198 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1199 /* 1200 * We ignore this since the timer will drive out a 1201 * new cookie anyway and there timer will drive us 1202 * to send a SHUTDOWN_COMPLETE. We can't send one 1203 * here since we don't have their tag. 1204 */ 1205 break; 1206 case SCTP_CAUSE_DELETING_LAST_ADDR: 1207 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1208 case SCTP_CAUSE_DELETING_SRC_ADDR: 1209 /* 1210 * We should NOT get these here, but in a 1211 * ASCONF-ACK. 1212 */ 1213 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1214 error_type); 1215 break; 1216 case SCTP_CAUSE_OUT_OF_RESC: 1217 /* 1218 * And what, pray tell do we do with the fact that 1219 * the peer is out of resources? Not really sure we 1220 * could do anything but abort. I suspect this 1221 * should have came WITH an abort instead of in a 1222 * OP-ERROR. 1223 */ 1224 break; 1225 default: 1226 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1227 error_type); 1228 break; 1229 } 1230 adjust = SCTP_SIZE32(error_len); 1231 chklen -= adjust; 1232 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1233 } 1234 return (0); 1235 } 1236 1237 static int 1238 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1239 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1240 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 1241 { 1242 struct sctp_init_ack *init_ack; 1243 struct mbuf *op_err; 1244 1245 SCTPDBG(SCTP_DEBUG_INPUT2, 1246 "sctp_handle_init_ack: handling INIT-ACK\n"); 1247 1248 if (stcb == NULL) { 1249 SCTPDBG(SCTP_DEBUG_INPUT2, 1250 "sctp_handle_init_ack: TCB is null\n"); 1251 return (-1); 1252 } 1253 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1254 /* Invalid length */ 1255 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1256 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1257 op_err, 0, net->port); 1258 *abort_no_unlock = 1; 1259 return (-1); 1260 } 1261 init_ack = &cp->init; 1262 /* validate parameters */ 1263 if (init_ack->initiate_tag == 0) { 1264 /* protocol error... send an abort */ 1265 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1266 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1267 op_err, 0, net->port); 1268 *abort_no_unlock = 1; 1269 return (-1); 1270 } 1271 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1272 /* protocol error... send an abort */ 1273 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1274 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1275 op_err, 0, net->port); 1276 *abort_no_unlock = 1; 1277 return (-1); 1278 } 1279 if (init_ack->num_inbound_streams == 0) { 1280 /* protocol error... send an abort */ 1281 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1282 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1283 op_err, 0, net->port); 1284 *abort_no_unlock = 1; 1285 return (-1); 1286 } 1287 if (init_ack->num_outbound_streams == 0) { 1288 /* protocol error... send an abort */ 1289 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1290 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1291 op_err, 0, net->port); 1292 *abort_no_unlock = 1; 1293 return (-1); 1294 } 1295 /* process according to association state... */ 1296 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1297 case SCTP_STATE_COOKIE_WAIT: 1298 /* this is the expected state for this chunk */ 1299 /* process the INIT-ACK parameters */ 1300 if (stcb->asoc.primary_destination->dest_state & 1301 SCTP_ADDR_UNCONFIRMED) { 1302 /* 1303 * The primary is where we sent the INIT, we can 1304 * always consider it confirmed when the INIT-ACK is 1305 * returned. Do this before we load addresses 1306 * though. 1307 */ 1308 stcb->asoc.primary_destination->dest_state &= 1309 ~SCTP_ADDR_UNCONFIRMED; 1310 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1311 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1312 } 1313 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1314 net, abort_no_unlock, vrf_id) < 0) { 1315 /* error in parsing parameters */ 1316 return (-1); 1317 } 1318 /* update our state */ 1319 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1320 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1321 1322 /* reset the RTO calc */ 1323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1324 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1325 stcb->asoc.overall_error_count, 1326 0, 1327 SCTP_FROM_SCTP_INPUT, 1328 __LINE__); 1329 } 1330 stcb->asoc.overall_error_count = 0; 1331 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1332 /* 1333 * collapse the init timer back in case of a exponential 1334 * backoff 1335 */ 1336 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1337 stcb, net); 1338 /* 1339 * the send at the end of the inbound data processing will 1340 * cause the cookie to be sent 1341 */ 1342 break; 1343 case SCTP_STATE_SHUTDOWN_SENT: 1344 /* incorrect state... discard */ 1345 break; 1346 case SCTP_STATE_COOKIE_ECHOED: 1347 /* incorrect state... discard */ 1348 break; 1349 case SCTP_STATE_OPEN: 1350 /* incorrect state... discard */ 1351 break; 1352 case SCTP_STATE_EMPTY: 1353 case SCTP_STATE_INUSE: 1354 default: 1355 /* incorrect state... discard */ 1356 return (-1); 1357 break; 1358 } 1359 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1360 return (0); 1361 } 1362 1363 static struct sctp_tcb * 1364 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1365 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1366 struct sctp_inpcb *inp, struct sctp_nets **netp, 1367 struct sockaddr *init_src, int *notification, 1368 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1369 uint32_t vrf_id, uint16_t port); 1370 1371 1372 /* 1373 * handle a state cookie for an existing association m: input packet mbuf 1374 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1375 * "split" mbuf and the cookie signature does not exist offset: offset into 1376 * mbuf to the cookie-echo chunk 1377 */ 1378 static struct sctp_tcb * 1379 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1380 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1381 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, 1382 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1383 uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port) 1384 { 1385 struct sctp_association *asoc; 1386 struct sctp_init_chunk *init_cp, init_buf; 1387 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1388 struct sctp_nets *net; 1389 struct mbuf *op_err; 1390 struct sctp_paramhdr *ph; 1391 int chk_length; 1392 int init_offset, initack_offset, i; 1393 int retval; 1394 int spec_flag = 0; 1395 uint32_t how_indx; 1396 1397 net = *netp; 1398 /* I know that the TCB is non-NULL from the caller */ 1399 asoc = &stcb->asoc; 1400 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1401 if (asoc->cookie_how[how_indx] == 0) 1402 break; 1403 } 1404 if (how_indx < sizeof(asoc->cookie_how)) { 1405 asoc->cookie_how[how_indx] = 1; 1406 } 1407 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1408 /* SHUTDOWN came in after sending INIT-ACK */ 1409 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1410 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1411 0, M_DONTWAIT, 1, MT_DATA); 1412 if (op_err == NULL) { 1413 /* FOOBAR */ 1414 return (NULL); 1415 } 1416 /* Set the len */ 1417 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1418 ph = mtod(op_err, struct sctp_paramhdr *); 1419 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1420 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1421 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1422 vrf_id, net->port); 1423 if (how_indx < sizeof(asoc->cookie_how)) 1424 asoc->cookie_how[how_indx] = 2; 1425 return (NULL); 1426 } 1427 /* 1428 * find and validate the INIT chunk in the cookie (peer's info) the 1429 * INIT should start after the cookie-echo header struct (chunk 1430 * header, state cookie header struct) 1431 */ 1432 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1433 1434 init_cp = (struct sctp_init_chunk *) 1435 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1436 (uint8_t *) & init_buf); 1437 if (init_cp == NULL) { 1438 /* could not pull a INIT chunk in cookie */ 1439 return (NULL); 1440 } 1441 chk_length = ntohs(init_cp->ch.chunk_length); 1442 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1443 return (NULL); 1444 } 1445 /* 1446 * find and validate the INIT-ACK chunk in the cookie (my info) the 1447 * INIT-ACK follows the INIT chunk 1448 */ 1449 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1450 initack_cp = (struct sctp_init_ack_chunk *) 1451 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1452 (uint8_t *) & initack_buf); 1453 if (initack_cp == NULL) { 1454 /* could not pull INIT-ACK chunk in cookie */ 1455 return (NULL); 1456 } 1457 chk_length = ntohs(initack_cp->ch.chunk_length); 1458 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1459 return (NULL); 1460 } 1461 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1462 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1463 /* 1464 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1465 * to get into the OPEN state 1466 */ 1467 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1468 /*- 1469 * Opps, this means that we somehow generated two vtag's 1470 * the same. I.e. we did: 1471 * Us Peer 1472 * <---INIT(tag=a)------ 1473 * ----INIT-ACK(tag=t)--> 1474 * ----INIT(tag=t)------> *1 1475 * <---INIT-ACK(tag=a)--- 1476 * <----CE(tag=t)------------- *2 1477 * 1478 * At point *1 we should be generating a different 1479 * tag t'. Which means we would throw away the CE and send 1480 * ours instead. Basically this is case C (throw away side). 1481 */ 1482 if (how_indx < sizeof(asoc->cookie_how)) 1483 asoc->cookie_how[how_indx] = 17; 1484 return (NULL); 1485 1486 } 1487 switch SCTP_GET_STATE 1488 (asoc) { 1489 case SCTP_STATE_COOKIE_WAIT: 1490 case SCTP_STATE_COOKIE_ECHOED: 1491 /* 1492 * INIT was sent but got a COOKIE_ECHO with the 1493 * correct tags... just accept it...but we must 1494 * process the init so that we can make sure we have 1495 * the right seq no's. 1496 */ 1497 /* First we must process the INIT !! */ 1498 retval = sctp_process_init(init_cp, stcb, net); 1499 if (retval < 0) { 1500 if (how_indx < sizeof(asoc->cookie_how)) 1501 asoc->cookie_how[how_indx] = 3; 1502 return (NULL); 1503 } 1504 /* we have already processed the INIT so no problem */ 1505 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1506 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1507 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1508 /* update current state */ 1509 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1510 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1511 else 1512 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1513 1514 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1515 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1516 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1517 stcb->sctp_ep, stcb, asoc->primary_destination); 1518 } 1519 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1520 sctp_stop_all_cookie_timers(stcb); 1521 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1522 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1523 (inp->sctp_socket->so_qlimit == 0) 1524 ) { 1525 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1526 struct socket *so; 1527 1528 #endif 1529 /* 1530 * Here is where collision would go if we 1531 * did a connect() and instead got a 1532 * init/init-ack/cookie done before the 1533 * init-ack came back.. 1534 */ 1535 stcb->sctp_ep->sctp_flags |= 1536 SCTP_PCB_FLAGS_CONNECTED; 1537 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1538 so = SCTP_INP_SO(stcb->sctp_ep); 1539 atomic_add_int(&stcb->asoc.refcnt, 1); 1540 SCTP_TCB_UNLOCK(stcb); 1541 SCTP_SOCKET_LOCK(so, 1); 1542 SCTP_TCB_LOCK(stcb); 1543 atomic_add_int(&stcb->asoc.refcnt, -1); 1544 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1545 SCTP_SOCKET_UNLOCK(so, 1); 1546 return (NULL); 1547 } 1548 #endif 1549 soisconnected(stcb->sctp_socket); 1550 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1551 SCTP_SOCKET_UNLOCK(so, 1); 1552 #endif 1553 } 1554 /* notify upper layer */ 1555 *notification = SCTP_NOTIFY_ASSOC_UP; 1556 /* 1557 * since we did not send a HB make sure we don't 1558 * double things 1559 */ 1560 net->hb_responded = 1; 1561 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1562 &cookie->time_entered, sctp_align_unsafe_makecopy); 1563 1564 if (stcb->asoc.sctp_autoclose_ticks && 1565 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1566 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1567 inp, stcb, NULL); 1568 } 1569 break; 1570 default: 1571 /* 1572 * we're in the OPEN state (or beyond), so peer must 1573 * have simply lost the COOKIE-ACK 1574 */ 1575 break; 1576 } /* end switch */ 1577 sctp_stop_all_cookie_timers(stcb); 1578 /* 1579 * We ignore the return code here.. not sure if we should 1580 * somehow abort.. but we do have an existing asoc. This 1581 * really should not fail. 1582 */ 1583 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1584 init_offset + sizeof(struct sctp_init_chunk), 1585 initack_offset, sh, init_src)) { 1586 if (how_indx < sizeof(asoc->cookie_how)) 1587 asoc->cookie_how[how_indx] = 4; 1588 return (NULL); 1589 } 1590 /* respond with a COOKIE-ACK */ 1591 sctp_toss_old_cookies(stcb, asoc); 1592 sctp_send_cookie_ack(stcb); 1593 if (how_indx < sizeof(asoc->cookie_how)) 1594 asoc->cookie_how[how_indx] = 5; 1595 return (stcb); 1596 } 1597 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1598 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1599 cookie->tie_tag_my_vtag == 0 && 1600 cookie->tie_tag_peer_vtag == 0) { 1601 /* 1602 * case C in Section 5.2.4 Table 2: XMOO silently discard 1603 */ 1604 if (how_indx < sizeof(asoc->cookie_how)) 1605 asoc->cookie_how[how_indx] = 6; 1606 return (NULL); 1607 } 1608 /* 1609 * If nat support, and the below and stcb is established, send back 1610 * a ABORT(colliding state) if we are established. 1611 */ 1612 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) && 1613 (asoc->peer_supports_nat) && 1614 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1615 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1616 (asoc->peer_vtag == 0)))) { 1617 /* 1618 * Special case - Peer's support nat. We may have two init's 1619 * that we gave out the same tag on since one was not 1620 * established.. i.e. we get INIT from host-1 behind the nat 1621 * and we respond tag-a, we get a INIT from host-2 behind 1622 * the nat and we get tag-a again. Then we bring up host-1 1623 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1). 1624 * Now we have colliding state. We must send an abort here 1625 * with colliding state indication. 1626 */ 1627 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1628 0, M_DONTWAIT, 1, MT_DATA); 1629 if (op_err == NULL) { 1630 /* FOOBAR */ 1631 return (NULL); 1632 } 1633 /* pre-reserve some space */ 1634 #ifdef INET6 1635 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1636 #else 1637 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 1638 #endif 1639 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1640 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1641 /* Set the len */ 1642 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1643 ph = mtod(op_err, struct sctp_paramhdr *); 1644 ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE); 1645 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1646 sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port); 1647 return (NULL); 1648 } 1649 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1650 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1651 (asoc->peer_vtag == 0))) { 1652 /* 1653 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1654 * should be ok, re-accept peer info 1655 */ 1656 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1657 /* 1658 * Extension of case C. If we hit this, then the 1659 * random number generator returned the same vtag 1660 * when we first sent our INIT-ACK and when we later 1661 * sent our INIT. The side with the seq numbers that 1662 * are different will be the one that normnally 1663 * would have hit case C. This in effect "extends" 1664 * our vtags in this collision case to be 64 bits. 1665 * The same collision could occur aka you get both 1666 * vtag and seq number the same twice in a row.. but 1667 * is much less likely. If it did happen then we 1668 * would proceed through and bring up the assoc.. we 1669 * may end up with the wrong stream setup however.. 1670 * which would be bad.. but there is no way to 1671 * tell.. until we send on a stream that does not 1672 * exist :-) 1673 */ 1674 if (how_indx < sizeof(asoc->cookie_how)) 1675 asoc->cookie_how[how_indx] = 7; 1676 1677 return (NULL); 1678 } 1679 if (how_indx < sizeof(asoc->cookie_how)) 1680 asoc->cookie_how[how_indx] = 8; 1681 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1682 sctp_stop_all_cookie_timers(stcb); 1683 /* 1684 * since we did not send a HB make sure we don't double 1685 * things 1686 */ 1687 net->hb_responded = 1; 1688 if (stcb->asoc.sctp_autoclose_ticks && 1689 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1690 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1691 NULL); 1692 } 1693 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1694 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1695 1696 /* Note last_cwr_tsn? where is this used? */ 1697 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1698 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1699 /* 1700 * Ok the peer probably discarded our data (if we 1701 * echoed a cookie+data). So anything on the 1702 * sent_queue should be marked for retransmit, we 1703 * may not get something to kick us so it COULD 1704 * still take a timeout to move these.. but it can't 1705 * hurt to mark them. 1706 */ 1707 struct sctp_tmit_chunk *chk; 1708 1709 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1710 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1711 chk->sent = SCTP_DATAGRAM_RESEND; 1712 sctp_flight_size_decrease(chk); 1713 sctp_total_flight_decrease(stcb, chk); 1714 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1715 spec_flag++; 1716 } 1717 } 1718 1719 } 1720 /* process the INIT info (peer's info) */ 1721 retval = sctp_process_init(init_cp, stcb, net); 1722 if (retval < 0) { 1723 if (how_indx < sizeof(asoc->cookie_how)) 1724 asoc->cookie_how[how_indx] = 9; 1725 return (NULL); 1726 } 1727 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1728 init_offset + sizeof(struct sctp_init_chunk), 1729 initack_offset, sh, init_src)) { 1730 if (how_indx < sizeof(asoc->cookie_how)) 1731 asoc->cookie_how[how_indx] = 10; 1732 return (NULL); 1733 } 1734 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1735 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1736 *notification = SCTP_NOTIFY_ASSOC_UP; 1737 1738 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1739 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1740 (inp->sctp_socket->so_qlimit == 0)) { 1741 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1742 struct socket *so; 1743 1744 #endif 1745 stcb->sctp_ep->sctp_flags |= 1746 SCTP_PCB_FLAGS_CONNECTED; 1747 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1748 so = SCTP_INP_SO(stcb->sctp_ep); 1749 atomic_add_int(&stcb->asoc.refcnt, 1); 1750 SCTP_TCB_UNLOCK(stcb); 1751 SCTP_SOCKET_LOCK(so, 1); 1752 SCTP_TCB_LOCK(stcb); 1753 atomic_add_int(&stcb->asoc.refcnt, -1); 1754 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1755 SCTP_SOCKET_UNLOCK(so, 1); 1756 return (NULL); 1757 } 1758 #endif 1759 soisconnected(stcb->sctp_socket); 1760 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1761 SCTP_SOCKET_UNLOCK(so, 1); 1762 #endif 1763 } 1764 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1765 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1766 else 1767 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1768 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1769 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1770 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1771 } else { 1772 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1773 } 1774 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1775 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1776 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1777 stcb->sctp_ep, stcb, asoc->primary_destination); 1778 } 1779 sctp_stop_all_cookie_timers(stcb); 1780 sctp_toss_old_cookies(stcb, asoc); 1781 sctp_send_cookie_ack(stcb); 1782 if (spec_flag) { 1783 /* 1784 * only if we have retrans set do we do this. What 1785 * this call does is get only the COOKIE-ACK out and 1786 * then when we return the normal call to 1787 * sctp_chunk_output will get the retrans out behind 1788 * this. 1789 */ 1790 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1791 } 1792 if (how_indx < sizeof(asoc->cookie_how)) 1793 asoc->cookie_how[how_indx] = 11; 1794 1795 return (stcb); 1796 } 1797 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1798 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1799 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1800 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1801 cookie->tie_tag_peer_vtag != 0) { 1802 struct sctpasochead *head; 1803 1804 if (asoc->peer_supports_nat) { 1805 /* 1806 * This is a gross gross hack. just call the 1807 * cookie_new code since we are allowing a duplicate 1808 * association. I hope this works... 1809 */ 1810 return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len, 1811 inp, netp, init_src, notification, 1812 auth_skipped, auth_offset, auth_len, 1813 vrf_id, port)); 1814 } 1815 /* 1816 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1817 */ 1818 /* temp code */ 1819 if (how_indx < sizeof(asoc->cookie_how)) 1820 asoc->cookie_how[how_indx] = 12; 1821 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1822 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1823 1824 *sac_assoc_id = sctp_get_associd(stcb); 1825 /* notify upper layer */ 1826 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1827 atomic_add_int(&stcb->asoc.refcnt, 1); 1828 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1829 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1830 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1831 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1832 } 1833 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1834 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1835 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1836 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1837 } 1838 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1839 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1840 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1841 stcb->sctp_ep, stcb, asoc->primary_destination); 1842 1843 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1844 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1845 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1846 } 1847 asoc->pre_open_streams = 1848 ntohs(initack_cp->init.num_outbound_streams); 1849 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1850 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1851 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1852 1853 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1854 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1855 1856 asoc->str_reset_seq_in = asoc->init_seq_number; 1857 1858 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1859 if (asoc->mapping_array) { 1860 memset(asoc->mapping_array, 0, 1861 asoc->mapping_array_size); 1862 } 1863 if (asoc->nr_mapping_array) { 1864 memset(asoc->nr_mapping_array, 0, 1865 asoc->mapping_array_size); 1866 } 1867 SCTP_TCB_UNLOCK(stcb); 1868 SCTP_INP_INFO_WLOCK(); 1869 SCTP_INP_WLOCK(stcb->sctp_ep); 1870 SCTP_TCB_LOCK(stcb); 1871 atomic_add_int(&stcb->asoc.refcnt, -1); 1872 /* send up all the data */ 1873 SCTP_TCB_SEND_LOCK(stcb); 1874 1875 sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED); 1876 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1877 stcb->asoc.strmout[i].stream_no = i; 1878 stcb->asoc.strmout[i].next_sequence_sent = 0; 1879 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1880 } 1881 /* process the INIT-ACK info (my info) */ 1882 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1883 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1884 1885 /* pull from vtag hash */ 1886 LIST_REMOVE(stcb, sctp_asocs); 1887 /* re-insert to new vtag position */ 1888 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1889 SCTP_BASE_INFO(hashasocmark))]; 1890 /* 1891 * put it in the bucket in the vtag hash of assoc's for the 1892 * system 1893 */ 1894 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1895 1896 /* process the INIT info (peer's info) */ 1897 SCTP_TCB_SEND_UNLOCK(stcb); 1898 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1899 SCTP_INP_INFO_WUNLOCK(); 1900 1901 retval = sctp_process_init(init_cp, stcb, net); 1902 if (retval < 0) { 1903 if (how_indx < sizeof(asoc->cookie_how)) 1904 asoc->cookie_how[how_indx] = 13; 1905 1906 return (NULL); 1907 } 1908 /* 1909 * since we did not send a HB make sure we don't double 1910 * things 1911 */ 1912 net->hb_responded = 1; 1913 1914 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1915 init_offset + sizeof(struct sctp_init_chunk), 1916 initack_offset, sh, init_src)) { 1917 if (how_indx < sizeof(asoc->cookie_how)) 1918 asoc->cookie_how[how_indx] = 14; 1919 1920 return (NULL); 1921 } 1922 /* respond with a COOKIE-ACK */ 1923 sctp_stop_all_cookie_timers(stcb); 1924 sctp_toss_old_cookies(stcb, asoc); 1925 sctp_send_cookie_ack(stcb); 1926 if (how_indx < sizeof(asoc->cookie_how)) 1927 asoc->cookie_how[how_indx] = 15; 1928 1929 return (stcb); 1930 } 1931 if (how_indx < sizeof(asoc->cookie_how)) 1932 asoc->cookie_how[how_indx] = 16; 1933 /* all other cases... */ 1934 return (NULL); 1935 } 1936 1937 1938 /* 1939 * handle a state cookie for a new association m: input packet mbuf chain-- 1940 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1941 * and the cookie signature does not exist offset: offset into mbuf to the 1942 * cookie-echo chunk length: length of the cookie chunk to: where the init 1943 * was from returns a new TCB 1944 */ 1945 struct sctp_tcb * 1946 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1947 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1948 struct sctp_inpcb *inp, struct sctp_nets **netp, 1949 struct sockaddr *init_src, int *notification, 1950 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1951 uint32_t vrf_id, uint16_t port) 1952 { 1953 struct sctp_tcb *stcb; 1954 struct sctp_init_chunk *init_cp, init_buf; 1955 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1956 struct sockaddr_storage sa_store; 1957 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1958 struct sockaddr_in *sin; 1959 struct sockaddr_in6 *sin6; 1960 struct sctp_association *asoc; 1961 int chk_length; 1962 int init_offset, initack_offset, initack_limit; 1963 int retval; 1964 int error = 0; 1965 uint32_t old_tag; 1966 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1967 1968 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1969 struct socket *so; 1970 1971 so = SCTP_INP_SO(inp); 1972 #endif 1973 1974 /* 1975 * find and validate the INIT chunk in the cookie (peer's info) the 1976 * INIT should start after the cookie-echo header struct (chunk 1977 * header, state cookie header struct) 1978 */ 1979 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1980 init_cp = (struct sctp_init_chunk *) 1981 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1982 (uint8_t *) & init_buf); 1983 if (init_cp == NULL) { 1984 /* could not pull a INIT chunk in cookie */ 1985 SCTPDBG(SCTP_DEBUG_INPUT1, 1986 "process_cookie_new: could not pull INIT chunk hdr\n"); 1987 return (NULL); 1988 } 1989 chk_length = ntohs(init_cp->ch.chunk_length); 1990 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1991 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1992 return (NULL); 1993 } 1994 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1995 /* 1996 * find and validate the INIT-ACK chunk in the cookie (my info) the 1997 * INIT-ACK follows the INIT chunk 1998 */ 1999 initack_cp = (struct sctp_init_ack_chunk *) 2000 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 2001 (uint8_t *) & initack_buf); 2002 if (initack_cp == NULL) { 2003 /* could not pull INIT-ACK chunk in cookie */ 2004 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 2005 return (NULL); 2006 } 2007 chk_length = ntohs(initack_cp->ch.chunk_length); 2008 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 2009 return (NULL); 2010 } 2011 /* 2012 * NOTE: We can't use the INIT_ACK's chk_length to determine the 2013 * "initack_limit" value. This is because the chk_length field 2014 * includes the length of the cookie, but the cookie is omitted when 2015 * the INIT and INIT_ACK are tacked onto the cookie... 2016 */ 2017 initack_limit = offset + cookie_len; 2018 2019 /* 2020 * now that we know the INIT/INIT-ACK are in place, create a new TCB 2021 * and popluate 2022 */ 2023 2024 /* 2025 * Here we do a trick, we set in NULL for the proc/thread argument. 2026 * We do this since in effect we only use the p argument when the 2027 * socket is unbound and we must do an implicit bind. Since we are 2028 * getting a cookie, we cannot be unbound. 2029 */ 2030 stcb = sctp_aloc_assoc(inp, init_src, &error, 2031 ntohl(initack_cp->init.initiate_tag), vrf_id, 2032 (struct thread *)NULL 2033 ); 2034 if (stcb == NULL) { 2035 struct mbuf *op_err; 2036 2037 /* memory problem? */ 2038 SCTPDBG(SCTP_DEBUG_INPUT1, 2039 "process_cookie_new: no room for another TCB!\n"); 2040 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2041 2042 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2043 sh, op_err, vrf_id, port); 2044 return (NULL); 2045 } 2046 /* get the correct sctp_nets */ 2047 if (netp) 2048 *netp = sctp_findnet(stcb, init_src); 2049 2050 asoc = &stcb->asoc; 2051 /* get scope variables out of cookie */ 2052 asoc->ipv4_local_scope = cookie->ipv4_scope; 2053 asoc->site_scope = cookie->site_scope; 2054 asoc->local_scope = cookie->local_scope; 2055 asoc->loopback_scope = cookie->loopback_scope; 2056 2057 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 2058 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 2059 struct mbuf *op_err; 2060 2061 /* 2062 * Houston we have a problem. The EP changed while the 2063 * cookie was in flight. Only recourse is to abort the 2064 * association. 2065 */ 2066 atomic_add_int(&stcb->asoc.refcnt, 1); 2067 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2068 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2069 sh, op_err, vrf_id, port); 2070 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2071 SCTP_TCB_UNLOCK(stcb); 2072 SCTP_SOCKET_LOCK(so, 1); 2073 SCTP_TCB_LOCK(stcb); 2074 #endif 2075 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2076 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 2077 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2078 SCTP_SOCKET_UNLOCK(so, 1); 2079 #endif 2080 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2081 return (NULL); 2082 } 2083 /* process the INIT-ACK info (my info) */ 2084 old_tag = asoc->my_vtag; 2085 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 2086 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 2087 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 2088 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 2089 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 2090 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 2091 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 2092 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 2093 asoc->str_reset_seq_in = asoc->init_seq_number; 2094 2095 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 2096 2097 /* process the INIT info (peer's info) */ 2098 if (netp) 2099 retval = sctp_process_init(init_cp, stcb, *netp); 2100 else 2101 retval = 0; 2102 if (retval < 0) { 2103 atomic_add_int(&stcb->asoc.refcnt, 1); 2104 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2105 SCTP_TCB_UNLOCK(stcb); 2106 SCTP_SOCKET_LOCK(so, 1); 2107 SCTP_TCB_LOCK(stcb); 2108 #endif 2109 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 2110 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2111 SCTP_SOCKET_UNLOCK(so, 1); 2112 #endif 2113 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2114 return (NULL); 2115 } 2116 /* load all addresses */ 2117 if (sctp_load_addresses_from_init(stcb, m, iphlen, 2118 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 2119 init_src)) { 2120 atomic_add_int(&stcb->asoc.refcnt, 1); 2121 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2122 SCTP_TCB_UNLOCK(stcb); 2123 SCTP_SOCKET_LOCK(so, 1); 2124 SCTP_TCB_LOCK(stcb); 2125 #endif 2126 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 2127 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2128 SCTP_SOCKET_UNLOCK(so, 1); 2129 #endif 2130 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2131 return (NULL); 2132 } 2133 /* 2134 * verify any preceding AUTH chunk that was skipped 2135 */ 2136 /* pull the local authentication parameters from the cookie/init-ack */ 2137 sctp_auth_get_cookie_params(stcb, m, 2138 initack_offset + sizeof(struct sctp_init_ack_chunk), 2139 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 2140 if (auth_skipped) { 2141 struct sctp_auth_chunk *auth; 2142 2143 auth = (struct sctp_auth_chunk *) 2144 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 2145 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 2146 /* auth HMAC failed, dump the assoc and packet */ 2147 SCTPDBG(SCTP_DEBUG_AUTH1, 2148 "COOKIE-ECHO: AUTH failed\n"); 2149 atomic_add_int(&stcb->asoc.refcnt, 1); 2150 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2151 SCTP_TCB_UNLOCK(stcb); 2152 SCTP_SOCKET_LOCK(so, 1); 2153 SCTP_TCB_LOCK(stcb); 2154 #endif 2155 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 2156 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2157 SCTP_SOCKET_UNLOCK(so, 1); 2158 #endif 2159 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2160 return (NULL); 2161 } else { 2162 /* remaining chunks checked... good to go */ 2163 stcb->asoc.authenticated = 1; 2164 } 2165 } 2166 /* update current state */ 2167 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2168 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2169 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2170 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2171 stcb->sctp_ep, stcb, asoc->primary_destination); 2172 } 2173 sctp_stop_all_cookie_timers(stcb); 2174 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 2175 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2176 2177 /* 2178 * if we're doing ASCONFs, check to see if we have any new local 2179 * addresses that need to get added to the peer (eg. addresses 2180 * changed while cookie echo in flight). This needs to be done 2181 * after we go to the OPEN state to do the correct asconf 2182 * processing. else, make sure we have the correct addresses in our 2183 * lists 2184 */ 2185 2186 /* warning, we re-use sin, sin6, sa_store here! */ 2187 /* pull in local_address (our "from" address) */ 2188 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 2189 /* source addr is IPv4 */ 2190 sin = (struct sockaddr_in *)initack_src; 2191 memset(sin, 0, sizeof(*sin)); 2192 sin->sin_family = AF_INET; 2193 sin->sin_len = sizeof(struct sockaddr_in); 2194 sin->sin_addr.s_addr = cookie->laddress[0]; 2195 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 2196 /* source addr is IPv6 */ 2197 sin6 = (struct sockaddr_in6 *)initack_src; 2198 memset(sin6, 0, sizeof(*sin6)); 2199 sin6->sin6_family = AF_INET6; 2200 sin6->sin6_len = sizeof(struct sockaddr_in6); 2201 sin6->sin6_scope_id = cookie->scope_id; 2202 memcpy(&sin6->sin6_addr, cookie->laddress, 2203 sizeof(sin6->sin6_addr)); 2204 } else { 2205 atomic_add_int(&stcb->asoc.refcnt, 1); 2206 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2207 SCTP_TCB_UNLOCK(stcb); 2208 SCTP_SOCKET_LOCK(so, 1); 2209 SCTP_TCB_LOCK(stcb); 2210 #endif 2211 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2212 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2213 SCTP_SOCKET_UNLOCK(so, 1); 2214 #endif 2215 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2216 return (NULL); 2217 } 2218 2219 /* set up to notify upper layer */ 2220 *notification = SCTP_NOTIFY_ASSOC_UP; 2221 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2222 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2223 (inp->sctp_socket->so_qlimit == 0)) { 2224 /* 2225 * This is an endpoint that called connect() how it got a 2226 * cookie that is NEW is a bit of a mystery. It must be that 2227 * the INIT was sent, but before it got there.. a complete 2228 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2229 * should have went to the other code.. not here.. oh well.. 2230 * a bit of protection is worth having.. 2231 */ 2232 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2233 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2234 atomic_add_int(&stcb->asoc.refcnt, 1); 2235 SCTP_TCB_UNLOCK(stcb); 2236 SCTP_SOCKET_LOCK(so, 1); 2237 SCTP_TCB_LOCK(stcb); 2238 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2239 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2240 SCTP_SOCKET_UNLOCK(so, 1); 2241 return (NULL); 2242 } 2243 #endif 2244 soisconnected(stcb->sctp_socket); 2245 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2246 SCTP_SOCKET_UNLOCK(so, 1); 2247 #endif 2248 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2249 (inp->sctp_socket->so_qlimit)) { 2250 /* 2251 * We don't want to do anything with this one. Since it is 2252 * the listening guy. The timer will get started for 2253 * accepted connections in the caller. 2254 */ 2255 ; 2256 } 2257 /* since we did not send a HB make sure we don't double things */ 2258 if ((netp) && (*netp)) 2259 (*netp)->hb_responded = 1; 2260 2261 if (stcb->asoc.sctp_autoclose_ticks && 2262 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2263 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2264 } 2265 /* calculate the RTT */ 2266 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2267 if ((netp) && (*netp)) { 2268 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2269 &cookie->time_entered, sctp_align_unsafe_makecopy); 2270 } 2271 /* respond with a COOKIE-ACK */ 2272 sctp_send_cookie_ack(stcb); 2273 2274 /* 2275 * check the address lists for any ASCONFs that need to be sent 2276 * AFTER the cookie-ack is sent 2277 */ 2278 sctp_check_address_list(stcb, m, 2279 initack_offset + sizeof(struct sctp_init_ack_chunk), 2280 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2281 initack_src, cookie->local_scope, cookie->site_scope, 2282 cookie->ipv4_scope, cookie->loopback_scope); 2283 2284 2285 return (stcb); 2286 } 2287 2288 /* 2289 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e 2290 * we NEED to make sure we are not already using the vtag. If so we 2291 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit! 2292 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, 2293 SCTP_BASE_INFO(hashasocmark))]; 2294 LIST_FOREACH(stcb, head, sctp_asocs) { 2295 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) { 2296 -- SEND ABORT - TRY AGAIN -- 2297 } 2298 } 2299 */ 2300 2301 /* 2302 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2303 * existing (non-NULL) TCB 2304 */ 2305 static struct mbuf * 2306 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2307 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2308 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2309 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2310 struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port) 2311 { 2312 struct sctp_state_cookie *cookie; 2313 struct sockaddr_in6 sin6; 2314 struct sockaddr_in sin; 2315 struct sctp_tcb *l_stcb = *stcb; 2316 struct sctp_inpcb *l_inp; 2317 struct sockaddr *to; 2318 sctp_assoc_t sac_restart_id; 2319 struct sctp_pcb *ep; 2320 struct mbuf *m_sig; 2321 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2322 uint8_t *sig; 2323 uint8_t cookie_ok = 0; 2324 unsigned int size_of_pkt, sig_offset, cookie_offset; 2325 unsigned int cookie_len; 2326 struct timeval now; 2327 struct timeval time_expires; 2328 struct sockaddr_storage dest_store; 2329 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2330 struct ip *iph; 2331 int notification = 0; 2332 struct sctp_nets *netl; 2333 int had_a_existing_tcb = 0; 2334 2335 SCTPDBG(SCTP_DEBUG_INPUT2, 2336 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2337 2338 if (inp_p == NULL) { 2339 return (NULL); 2340 } 2341 /* First get the destination address setup too. */ 2342 iph = mtod(m, struct ip *); 2343 switch (iph->ip_v) { 2344 case IPVERSION: 2345 { 2346 /* its IPv4 */ 2347 struct sockaddr_in *lsin; 2348 2349 lsin = (struct sockaddr_in *)(localep_sa); 2350 memset(lsin, 0, sizeof(*lsin)); 2351 lsin->sin_family = AF_INET; 2352 lsin->sin_len = sizeof(*lsin); 2353 lsin->sin_port = sh->dest_port; 2354 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2355 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2356 break; 2357 } 2358 #ifdef INET6 2359 case IPV6_VERSION >> 4: 2360 { 2361 /* its IPv6 */ 2362 struct ip6_hdr *ip6; 2363 struct sockaddr_in6 *lsin6; 2364 2365 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2366 memset(lsin6, 0, sizeof(*lsin6)); 2367 lsin6->sin6_family = AF_INET6; 2368 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2369 ip6 = mtod(m, struct ip6_hdr *); 2370 lsin6->sin6_port = sh->dest_port; 2371 lsin6->sin6_addr = ip6->ip6_dst; 2372 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2373 break; 2374 } 2375 #endif 2376 default: 2377 return (NULL); 2378 } 2379 2380 cookie = &cp->cookie; 2381 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2382 cookie_len = ntohs(cp->ch.chunk_length); 2383 2384 if ((cookie->peerport != sh->src_port) && 2385 (cookie->myport != sh->dest_port) && 2386 (cookie->my_vtag != sh->v_tag)) { 2387 /* 2388 * invalid ports or bad tag. Note that we always leave the 2389 * v_tag in the header in network order and when we stored 2390 * it in the my_vtag slot we also left it in network order. 2391 * This maintains the match even though it may be in the 2392 * opposite byte order of the machine :-> 2393 */ 2394 return (NULL); 2395 } 2396 if (cookie_len > size_of_pkt || 2397 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2398 sizeof(struct sctp_init_chunk) + 2399 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2400 /* cookie too long! or too small */ 2401 return (NULL); 2402 } 2403 /* 2404 * split off the signature into its own mbuf (since it should not be 2405 * calculated in the sctp_hmac_m() call). 2406 */ 2407 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2408 if (sig_offset > size_of_pkt) { 2409 /* packet not correct size! */ 2410 /* XXX this may already be accounted for earlier... */ 2411 return (NULL); 2412 } 2413 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2414 if (m_sig == NULL) { 2415 /* out of memory or ?? */ 2416 return (NULL); 2417 } 2418 #ifdef SCTP_MBUF_LOGGING 2419 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2420 struct mbuf *mat; 2421 2422 mat = m_sig; 2423 while (mat) { 2424 if (SCTP_BUF_IS_EXTENDED(mat)) { 2425 sctp_log_mb(mat, SCTP_MBUF_SPLIT); 2426 } 2427 mat = SCTP_BUF_NEXT(mat); 2428 } 2429 } 2430 #endif 2431 2432 /* 2433 * compute the signature/digest for the cookie 2434 */ 2435 ep = &(*inp_p)->sctp_ep; 2436 l_inp = *inp_p; 2437 if (l_stcb) { 2438 SCTP_TCB_UNLOCK(l_stcb); 2439 } 2440 SCTP_INP_RLOCK(l_inp); 2441 if (l_stcb) { 2442 SCTP_TCB_LOCK(l_stcb); 2443 } 2444 /* which cookie is it? */ 2445 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2446 (ep->current_secret_number != ep->last_secret_number)) { 2447 /* it's the old cookie */ 2448 (void)sctp_hmac_m(SCTP_HMAC, 2449 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2450 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2451 } else { 2452 /* it's the current cookie */ 2453 (void)sctp_hmac_m(SCTP_HMAC, 2454 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2455 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2456 } 2457 /* get the signature */ 2458 SCTP_INP_RUNLOCK(l_inp); 2459 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2460 if (sig == NULL) { 2461 /* couldn't find signature */ 2462 sctp_m_freem(m_sig); 2463 return (NULL); 2464 } 2465 /* compare the received digest with the computed digest */ 2466 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2467 /* try the old cookie? */ 2468 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2469 (ep->current_secret_number != ep->last_secret_number)) { 2470 /* compute digest with old */ 2471 (void)sctp_hmac_m(SCTP_HMAC, 2472 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2473 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2474 /* compare */ 2475 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2476 cookie_ok = 1; 2477 } 2478 } else { 2479 cookie_ok = 1; 2480 } 2481 2482 /* 2483 * Now before we continue we must reconstruct our mbuf so that 2484 * normal processing of any other chunks will work. 2485 */ 2486 { 2487 struct mbuf *m_at; 2488 2489 m_at = m; 2490 while (SCTP_BUF_NEXT(m_at) != NULL) { 2491 m_at = SCTP_BUF_NEXT(m_at); 2492 } 2493 SCTP_BUF_NEXT(m_at) = m_sig; 2494 } 2495 2496 if (cookie_ok == 0) { 2497 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2498 SCTPDBG(SCTP_DEBUG_INPUT2, 2499 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2500 (uint32_t) offset, cookie_offset, sig_offset); 2501 return (NULL); 2502 } 2503 /* 2504 * check the cookie timestamps to be sure it's not stale 2505 */ 2506 (void)SCTP_GETTIME_TIMEVAL(&now); 2507 /* Expire time is in Ticks, so we convert to seconds */ 2508 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2509 time_expires.tv_usec = cookie->time_entered.tv_usec; 2510 /* 2511 * TODO sctp_constants.h needs alternative time macros when _KERNEL 2512 * is undefined. 2513 */ 2514 if (timevalcmp(&now, &time_expires, >)) { 2515 /* cookie is stale! */ 2516 struct mbuf *op_err; 2517 struct sctp_stale_cookie_msg *scm; 2518 uint32_t tim; 2519 2520 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2521 0, M_DONTWAIT, 1, MT_DATA); 2522 if (op_err == NULL) { 2523 /* FOOBAR */ 2524 return (NULL); 2525 } 2526 /* Set the len */ 2527 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2528 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2529 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2530 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2531 (sizeof(uint32_t)))); 2532 /* seconds to usec */ 2533 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2534 /* add in usec */ 2535 if (tim == 0) 2536 tim = now.tv_usec - cookie->time_entered.tv_usec; 2537 scm->time_usec = htonl(tim); 2538 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 2539 vrf_id, port); 2540 return (NULL); 2541 } 2542 /* 2543 * Now we must see with the lookup address if we have an existing 2544 * asoc. This will only happen if we were in the COOKIE-WAIT state 2545 * and a INIT collided with us and somewhere the peer sent the 2546 * cookie on another address besides the single address our assoc 2547 * had for him. In this case we will have one of the tie-tags set at 2548 * least AND the address field in the cookie can be used to look it 2549 * up. 2550 */ 2551 to = NULL; 2552 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 2553 memset(&sin6, 0, sizeof(sin6)); 2554 sin6.sin6_family = AF_INET6; 2555 sin6.sin6_len = sizeof(sin6); 2556 sin6.sin6_port = sh->src_port; 2557 sin6.sin6_scope_id = cookie->scope_id; 2558 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2559 sizeof(sin6.sin6_addr.s6_addr)); 2560 to = (struct sockaddr *)&sin6; 2561 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 2562 memset(&sin, 0, sizeof(sin)); 2563 sin.sin_family = AF_INET; 2564 sin.sin_len = sizeof(sin); 2565 sin.sin_port = sh->src_port; 2566 sin.sin_addr.s_addr = cookie->address[0]; 2567 to = (struct sockaddr *)&sin; 2568 } else { 2569 /* This should not happen */ 2570 return (NULL); 2571 } 2572 if ((*stcb == NULL) && to) { 2573 /* Yep, lets check */ 2574 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2575 if (*stcb == NULL) { 2576 /* 2577 * We should have only got back the same inp. If we 2578 * got back a different ep we have a problem. The 2579 * original findep got back l_inp and now 2580 */ 2581 if (l_inp != *inp_p) { 2582 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2583 } 2584 } else { 2585 if (*locked_tcb == NULL) { 2586 /* 2587 * In this case we found the assoc only 2588 * after we locked the create lock. This 2589 * means we are in a colliding case and we 2590 * must make sure that we unlock the tcb if 2591 * its one of the cases where we throw away 2592 * the incoming packets. 2593 */ 2594 *locked_tcb = *stcb; 2595 2596 /* 2597 * We must also increment the inp ref count 2598 * since the ref_count flags was set when we 2599 * did not find the TCB, now we found it 2600 * which reduces the refcount.. we must 2601 * raise it back out to balance it all :-) 2602 */ 2603 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2604 if ((*stcb)->sctp_ep != l_inp) { 2605 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2606 (*stcb)->sctp_ep, l_inp); 2607 } 2608 } 2609 } 2610 } 2611 if (to == NULL) { 2612 return (NULL); 2613 } 2614 cookie_len -= SCTP_SIGNATURE_SIZE; 2615 if (*stcb == NULL) { 2616 /* this is the "normal" case... get a new TCB */ 2617 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2618 cookie_len, *inp_p, netp, to, ¬ification, 2619 auth_skipped, auth_offset, auth_len, vrf_id, port); 2620 } else { 2621 /* this is abnormal... cookie-echo on existing TCB */ 2622 had_a_existing_tcb = 1; 2623 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2624 cookie, cookie_len, *inp_p, *stcb, netp, to, 2625 ¬ification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port); 2626 } 2627 2628 if (*stcb == NULL) { 2629 /* still no TCB... must be bad cookie-echo */ 2630 return (NULL); 2631 } 2632 /* 2633 * Ok, we built an association so confirm the address we sent the 2634 * INIT-ACK to. 2635 */ 2636 netl = sctp_findnet(*stcb, to); 2637 /* 2638 * This code should in theory NOT run but 2639 */ 2640 if (netl == NULL) { 2641 /* TSNH! Huh, why do I need to add this address here? */ 2642 int ret; 2643 2644 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2645 SCTP_IN_COOKIE_PROC); 2646 netl = sctp_findnet(*stcb, to); 2647 } 2648 if (netl) { 2649 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2650 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2651 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2652 netl); 2653 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2654 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2655 } 2656 } 2657 if (*stcb) { 2658 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2659 *stcb, NULL); 2660 } 2661 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2662 if (!had_a_existing_tcb || 2663 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2664 /* 2665 * If we have a NEW cookie or the connect never 2666 * reached the connected state during collision we 2667 * must do the TCP accept thing. 2668 */ 2669 struct socket *so, *oso; 2670 struct sctp_inpcb *inp; 2671 2672 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2673 /* 2674 * For a restart we will keep the same 2675 * socket, no need to do anything. I THINK!! 2676 */ 2677 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED); 2678 return (m); 2679 } 2680 oso = (*inp_p)->sctp_socket; 2681 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2682 SCTP_TCB_UNLOCK((*stcb)); 2683 so = sonewconn(oso, 0 2684 ); 2685 SCTP_TCB_LOCK((*stcb)); 2686 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2687 2688 if (so == NULL) { 2689 struct mbuf *op_err; 2690 2691 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2692 struct socket *pcb_so; 2693 2694 #endif 2695 /* Too many sockets */ 2696 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2697 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2698 sctp_abort_association(*inp_p, NULL, m, iphlen, 2699 sh, op_err, vrf_id, port); 2700 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2701 pcb_so = SCTP_INP_SO(*inp_p); 2702 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2703 SCTP_TCB_UNLOCK((*stcb)); 2704 SCTP_SOCKET_LOCK(pcb_so, 1); 2705 SCTP_TCB_LOCK((*stcb)); 2706 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2707 #endif 2708 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2709 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2710 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2711 #endif 2712 return (NULL); 2713 } 2714 inp = (struct sctp_inpcb *)so->so_pcb; 2715 SCTP_INP_INCR_REF(inp); 2716 /* 2717 * We add the unbound flag here so that if we get an 2718 * soabort() before we get the move_pcb done, we 2719 * will properly cleanup. 2720 */ 2721 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2722 SCTP_PCB_FLAGS_CONNECTED | 2723 SCTP_PCB_FLAGS_IN_TCPPOOL | 2724 SCTP_PCB_FLAGS_UNBOUND | 2725 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2726 SCTP_PCB_FLAGS_DONT_WAKE); 2727 inp->sctp_features = (*inp_p)->sctp_features; 2728 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2729 inp->sctp_socket = so; 2730 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2731 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off; 2732 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2733 inp->sctp_context = (*inp_p)->sctp_context; 2734 inp->inp_starting_point_for_iterator = NULL; 2735 /* 2736 * copy in the authentication parameters from the 2737 * original endpoint 2738 */ 2739 if (inp->sctp_ep.local_hmacs) 2740 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2741 inp->sctp_ep.local_hmacs = 2742 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2743 if (inp->sctp_ep.local_auth_chunks) 2744 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2745 inp->sctp_ep.local_auth_chunks = 2746 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2747 2748 /* 2749 * Now we must move it from one hash table to 2750 * another and get the tcb in the right place. 2751 */ 2752 2753 /* 2754 * This is where the one-2-one socket is put into 2755 * the accept state waiting for the accept! 2756 */ 2757 if (*stcb) { 2758 (*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE; 2759 } 2760 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2761 2762 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2763 SCTP_TCB_UNLOCK((*stcb)); 2764 2765 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2766 0); 2767 SCTP_TCB_LOCK((*stcb)); 2768 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2769 2770 2771 /* 2772 * now we must check to see if we were aborted while 2773 * the move was going on and the lock/unlock 2774 * happened. 2775 */ 2776 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2777 /* 2778 * yep it was, we leave the assoc attached 2779 * to the socket since the sctp_inpcb_free() 2780 * call will send an abort for us. 2781 */ 2782 SCTP_INP_DECR_REF(inp); 2783 return (NULL); 2784 } 2785 SCTP_INP_DECR_REF(inp); 2786 /* Switch over to the new guy */ 2787 *inp_p = inp; 2788 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2789 2790 /* 2791 * Pull it from the incomplete queue and wake the 2792 * guy 2793 */ 2794 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2795 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2796 SCTP_TCB_UNLOCK((*stcb)); 2797 SCTP_SOCKET_LOCK(so, 1); 2798 #endif 2799 soisconnected(so); 2800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2801 SCTP_TCB_LOCK((*stcb)); 2802 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2803 SCTP_SOCKET_UNLOCK(so, 1); 2804 #endif 2805 return (m); 2806 } 2807 } 2808 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2809 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2810 } 2811 return (m); 2812 } 2813 2814 static void 2815 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2816 struct sctp_tcb *stcb, struct sctp_nets *net) 2817 { 2818 /* cp must not be used, others call this without a c-ack :-) */ 2819 struct sctp_association *asoc; 2820 2821 SCTPDBG(SCTP_DEBUG_INPUT2, 2822 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2823 if (stcb == NULL) 2824 return; 2825 2826 asoc = &stcb->asoc; 2827 2828 sctp_stop_all_cookie_timers(stcb); 2829 /* process according to association state */ 2830 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2831 /* state change only needed when I am in right state */ 2832 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2833 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2834 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2835 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2836 stcb->sctp_ep, stcb, asoc->primary_destination); 2837 2838 } 2839 /* update RTO */ 2840 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2841 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2842 if (asoc->overall_error_count == 0) { 2843 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2844 &asoc->time_entered, sctp_align_safe_nocopy); 2845 } 2846 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2847 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2848 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2849 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2850 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2851 struct socket *so; 2852 2853 #endif 2854 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2855 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2856 so = SCTP_INP_SO(stcb->sctp_ep); 2857 atomic_add_int(&stcb->asoc.refcnt, 1); 2858 SCTP_TCB_UNLOCK(stcb); 2859 SCTP_SOCKET_LOCK(so, 1); 2860 SCTP_TCB_LOCK(stcb); 2861 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2862 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2863 SCTP_SOCKET_UNLOCK(so, 1); 2864 return; 2865 } 2866 #endif 2867 soisconnected(stcb->sctp_socket); 2868 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2869 SCTP_SOCKET_UNLOCK(so, 1); 2870 #endif 2871 } 2872 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2873 stcb, net); 2874 /* 2875 * since we did not send a HB make sure we don't double 2876 * things 2877 */ 2878 net->hb_responded = 1; 2879 2880 if (stcb->asoc.sctp_autoclose_ticks && 2881 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2882 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2883 stcb->sctp_ep, stcb, NULL); 2884 } 2885 /* 2886 * send ASCONF if parameters are pending and ASCONFs are 2887 * allowed (eg. addresses changed when init/cookie echo were 2888 * in flight) 2889 */ 2890 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2891 (stcb->asoc.peer_supports_asconf) && 2892 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2893 #ifdef SCTP_TIMER_BASED_ASCONF 2894 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2895 stcb->sctp_ep, stcb, 2896 stcb->asoc.primary_destination); 2897 #else 2898 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2899 SCTP_ADDR_NOT_LOCKED); 2900 #endif 2901 } 2902 } 2903 /* Toss the cookie if I can */ 2904 sctp_toss_old_cookies(stcb, asoc); 2905 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2906 /* Restart the timer if we have pending data */ 2907 struct sctp_tmit_chunk *chk; 2908 2909 chk = TAILQ_FIRST(&asoc->sent_queue); 2910 if (chk) { 2911 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2912 stcb, chk->whoTo); 2913 } 2914 } 2915 } 2916 2917 static void 2918 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2919 struct sctp_tcb *stcb) 2920 { 2921 struct sctp_nets *net; 2922 struct sctp_tmit_chunk *lchk; 2923 uint32_t tsn; 2924 2925 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2926 return; 2927 } 2928 SCTP_STAT_INCR(sctps_recvecne); 2929 tsn = ntohl(cp->tsn); 2930 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2931 /* Also we make sure we disable the nonce_wait */ 2932 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2933 if (lchk == NULL) { 2934 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2935 } else { 2936 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2937 } 2938 stcb->asoc.nonce_wait_for_ecne = 0; 2939 stcb->asoc.nonce_sum_check = 0; 2940 2941 /* Find where it was sent, if possible */ 2942 net = NULL; 2943 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2944 while (lchk) { 2945 if (lchk->rec.data.TSN_seq == tsn) { 2946 net = lchk->whoTo; 2947 break; 2948 } 2949 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2950 break; 2951 lchk = TAILQ_NEXT(lchk, sctp_next); 2952 } 2953 if (net == NULL) 2954 /* default is we use the primary */ 2955 net = stcb->asoc.primary_destination; 2956 2957 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2958 /* 2959 * JRS - Use the congestion control given in the pluggable 2960 * CC module 2961 */ 2962 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net); 2963 /* 2964 * we reduce once every RTT. So we will only lower cwnd at 2965 * the next sending seq i.e. the resync_tsn. 2966 */ 2967 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2968 } 2969 /* 2970 * We always send a CWR this way if our previous one was lost our 2971 * peer will get an update, or if it is not time again to reduce we 2972 * still get the cwr to the peer. 2973 */ 2974 sctp_send_cwr(stcb, net, tsn); 2975 } 2976 2977 static void 2978 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2979 { 2980 /* 2981 * Here we get a CWR from the peer. We must look in the outqueue and 2982 * make sure that we have a covered ECNE in teh control chunk part. 2983 * If so remove it. 2984 */ 2985 struct sctp_tmit_chunk *chk; 2986 struct sctp_ecne_chunk *ecne; 2987 2988 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2989 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2990 continue; 2991 } 2992 /* 2993 * Look for and remove if it is the right TSN. Since there 2994 * is only ONE ECNE on the control queue at any one time we 2995 * don't need to worry about more than one! 2996 */ 2997 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2998 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2999 MAX_TSN) || (cp->tsn == ecne->tsn)) { 3000 /* this covers this ECNE, we can remove it */ 3001 stcb->asoc.ecn_echo_cnt_onq--; 3002 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 3003 sctp_next); 3004 if (chk->data) { 3005 sctp_m_freem(chk->data); 3006 chk->data = NULL; 3007 } 3008 stcb->asoc.ctrl_queue_cnt--; 3009 sctp_free_a_chunk(stcb, chk); 3010 break; 3011 } 3012 } 3013 } 3014 3015 static void 3016 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 3017 struct sctp_tcb *stcb, struct sctp_nets *net) 3018 { 3019 struct sctp_association *asoc; 3020 3021 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3022 struct socket *so; 3023 3024 #endif 3025 3026 SCTPDBG(SCTP_DEBUG_INPUT2, 3027 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 3028 if (stcb == NULL) 3029 return; 3030 3031 asoc = &stcb->asoc; 3032 /* process according to association state */ 3033 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 3034 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 3035 SCTPDBG(SCTP_DEBUG_INPUT2, 3036 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 3037 SCTP_TCB_UNLOCK(stcb); 3038 return; 3039 } 3040 /* notify upper layer protocol */ 3041 if (stcb->sctp_socket) { 3042 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3043 /* are the queues empty? they should be */ 3044 if (!TAILQ_EMPTY(&asoc->send_queue) || 3045 !TAILQ_EMPTY(&asoc->sent_queue) || 3046 !TAILQ_EMPTY(&asoc->out_wheel)) { 3047 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 3048 } 3049 } 3050 /* stop the timer */ 3051 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 3052 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 3053 /* free the TCB */ 3054 SCTPDBG(SCTP_DEBUG_INPUT2, 3055 "sctp_handle_shutdown_complete: calls free-asoc\n"); 3056 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3057 so = SCTP_INP_SO(stcb->sctp_ep); 3058 atomic_add_int(&stcb->asoc.refcnt, 1); 3059 SCTP_TCB_UNLOCK(stcb); 3060 SCTP_SOCKET_LOCK(so, 1); 3061 SCTP_TCB_LOCK(stcb); 3062 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3063 #endif 3064 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 3065 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3066 SCTP_SOCKET_UNLOCK(so, 1); 3067 #endif 3068 return; 3069 } 3070 3071 static int 3072 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 3073 struct sctp_nets *net, uint8_t flg) 3074 { 3075 switch (desc->chunk_type) { 3076 case SCTP_DATA: 3077 /* find the tsn to resend (possibly */ 3078 { 3079 uint32_t tsn; 3080 struct sctp_tmit_chunk *tp1; 3081 3082 tsn = ntohl(desc->tsn_ifany); 3083 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3084 while (tp1) { 3085 if (tp1->rec.data.TSN_seq == tsn) { 3086 /* found it */ 3087 break; 3088 } 3089 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 3090 MAX_TSN)) { 3091 /* not found */ 3092 tp1 = NULL; 3093 break; 3094 } 3095 tp1 = TAILQ_NEXT(tp1, sctp_next); 3096 } 3097 if (tp1 == NULL) { 3098 /* 3099 * Do it the other way , aka without paying 3100 * attention to queue seq order. 3101 */ 3102 SCTP_STAT_INCR(sctps_pdrpdnfnd); 3103 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3104 while (tp1) { 3105 if (tp1->rec.data.TSN_seq == tsn) { 3106 /* found it */ 3107 break; 3108 } 3109 tp1 = TAILQ_NEXT(tp1, sctp_next); 3110 } 3111 } 3112 if (tp1 == NULL) { 3113 SCTP_STAT_INCR(sctps_pdrptsnnf); 3114 } 3115 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 3116 uint8_t *ddp; 3117 3118 if ((stcb->asoc.peers_rwnd == 0) && 3119 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3120 SCTP_STAT_INCR(sctps_pdrpdiwnp); 3121 return (0); 3122 } 3123 if (stcb->asoc.peers_rwnd == 0 && 3124 (flg & SCTP_FROM_MIDDLE_BOX)) { 3125 SCTP_STAT_INCR(sctps_pdrpdizrw); 3126 return (0); 3127 } 3128 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 3129 sizeof(struct sctp_data_chunk)); 3130 { 3131 unsigned int iii; 3132 3133 for (iii = 0; iii < sizeof(desc->data_bytes); 3134 iii++) { 3135 if (ddp[iii] != desc->data_bytes[iii]) { 3136 SCTP_STAT_INCR(sctps_pdrpbadd); 3137 return (-1); 3138 } 3139 } 3140 } 3141 /* 3142 * We zero out the nonce so resync not 3143 * needed 3144 */ 3145 tp1->rec.data.ect_nonce = 0; 3146 3147 if (tp1->do_rtt) { 3148 /* 3149 * this guy had a RTO calculation 3150 * pending on it, cancel it 3151 */ 3152 tp1->do_rtt = 0; 3153 } 3154 SCTP_STAT_INCR(sctps_pdrpmark); 3155 if (tp1->sent != SCTP_DATAGRAM_RESEND) 3156 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3157 tp1->sent = SCTP_DATAGRAM_RESEND; 3158 /* 3159 * mark it as if we were doing a FR, since 3160 * we will be getting gap ack reports behind 3161 * the info from the router. 3162 */ 3163 tp1->rec.data.doing_fast_retransmit = 1; 3164 /* 3165 * mark the tsn with what sequences can 3166 * cause a new FR. 3167 */ 3168 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 3169 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 3170 } else { 3171 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 3172 } 3173 3174 /* restart the timer */ 3175 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3176 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 3177 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3178 stcb, tp1->whoTo); 3179 3180 /* fix counts and things */ 3181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3182 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 3183 tp1->whoTo->flight_size, 3184 tp1->book_size, 3185 (uintptr_t) stcb, 3186 tp1->rec.data.TSN_seq); 3187 } 3188 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3189 sctp_flight_size_decrease(tp1); 3190 sctp_total_flight_decrease(stcb, tp1); 3191 } 3192 } { 3193 /* audit code */ 3194 unsigned int audit; 3195 3196 audit = 0; 3197 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3198 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3199 audit++; 3200 } 3201 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 3202 sctp_next) { 3203 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3204 audit++; 3205 } 3206 if (audit != stcb->asoc.sent_queue_retran_cnt) { 3207 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 3208 audit, stcb->asoc.sent_queue_retran_cnt); 3209 #ifndef SCTP_AUDITING_ENABLED 3210 stcb->asoc.sent_queue_retran_cnt = audit; 3211 #endif 3212 } 3213 } 3214 } 3215 break; 3216 case SCTP_ASCONF: 3217 { 3218 struct sctp_tmit_chunk *asconf; 3219 3220 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 3221 sctp_next) { 3222 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 3223 break; 3224 } 3225 } 3226 if (asconf) { 3227 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3228 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3229 asconf->sent = SCTP_DATAGRAM_RESEND; 3230 asconf->snd_count--; 3231 } 3232 } 3233 break; 3234 case SCTP_INITIATION: 3235 /* resend the INIT */ 3236 stcb->asoc.dropped_special_cnt++; 3237 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3238 /* 3239 * If we can get it in, in a few attempts we do 3240 * this, otherwise we let the timer fire. 3241 */ 3242 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3243 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3244 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3245 } 3246 break; 3247 case SCTP_SELECTIVE_ACK: 3248 case SCTP_NR_SELECTIVE_ACK: 3249 /* resend the sack */ 3250 sctp_send_sack(stcb); 3251 break; 3252 case SCTP_HEARTBEAT_REQUEST: 3253 /* resend a demand HB */ 3254 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3255 /* 3256 * Only retransmit if we KNOW we wont destroy the 3257 * tcb 3258 */ 3259 (void)sctp_send_hb(stcb, 1, net); 3260 } 3261 break; 3262 case SCTP_SHUTDOWN: 3263 sctp_send_shutdown(stcb, net); 3264 break; 3265 case SCTP_SHUTDOWN_ACK: 3266 sctp_send_shutdown_ack(stcb, net); 3267 break; 3268 case SCTP_COOKIE_ECHO: 3269 { 3270 struct sctp_tmit_chunk *cookie; 3271 3272 cookie = NULL; 3273 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3274 sctp_next) { 3275 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3276 break; 3277 } 3278 } 3279 if (cookie) { 3280 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3281 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3282 cookie->sent = SCTP_DATAGRAM_RESEND; 3283 sctp_stop_all_cookie_timers(stcb); 3284 } 3285 } 3286 break; 3287 case SCTP_COOKIE_ACK: 3288 sctp_send_cookie_ack(stcb); 3289 break; 3290 case SCTP_ASCONF_ACK: 3291 /* resend last asconf ack */ 3292 sctp_send_asconf_ack(stcb); 3293 break; 3294 case SCTP_FORWARD_CUM_TSN: 3295 send_forward_tsn(stcb, &stcb->asoc); 3296 break; 3297 /* can't do anything with these */ 3298 case SCTP_PACKET_DROPPED: 3299 case SCTP_INITIATION_ACK: /* this should not happen */ 3300 case SCTP_HEARTBEAT_ACK: 3301 case SCTP_ABORT_ASSOCIATION: 3302 case SCTP_OPERATION_ERROR: 3303 case SCTP_SHUTDOWN_COMPLETE: 3304 case SCTP_ECN_ECHO: 3305 case SCTP_ECN_CWR: 3306 default: 3307 break; 3308 } 3309 return (0); 3310 } 3311 3312 void 3313 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3314 { 3315 int i; 3316 uint16_t temp; 3317 3318 /* 3319 * We set things to 0xffff since this is the last delivered sequence 3320 * and we will be sending in 0 after the reset. 3321 */ 3322 3323 if (number_entries) { 3324 for (i = 0; i < number_entries; i++) { 3325 temp = ntohs(list[i]); 3326 if (temp >= stcb->asoc.streamincnt) { 3327 continue; 3328 } 3329 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3330 } 3331 } else { 3332 list = NULL; 3333 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3334 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3335 } 3336 } 3337 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3338 } 3339 3340 static void 3341 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3342 { 3343 int i; 3344 3345 if (number_entries == 0) { 3346 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3347 stcb->asoc.strmout[i].next_sequence_sent = 0; 3348 } 3349 } else if (number_entries) { 3350 for (i = 0; i < number_entries; i++) { 3351 uint16_t temp; 3352 3353 temp = ntohs(list[i]); 3354 if (temp >= stcb->asoc.streamoutcnt) { 3355 /* no such stream */ 3356 continue; 3357 } 3358 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3359 } 3360 } 3361 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3362 } 3363 3364 3365 struct sctp_stream_reset_out_request * 3366 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3367 { 3368 struct sctp_association *asoc; 3369 struct sctp_stream_reset_out_req *req; 3370 struct sctp_stream_reset_out_request *r; 3371 struct sctp_tmit_chunk *chk; 3372 int len, clen; 3373 3374 asoc = &stcb->asoc; 3375 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3376 asoc->stream_reset_outstanding = 0; 3377 return (NULL); 3378 } 3379 if (stcb->asoc.str_reset == NULL) { 3380 asoc->stream_reset_outstanding = 0; 3381 return (NULL); 3382 } 3383 chk = stcb->asoc.str_reset; 3384 if (chk->data == NULL) { 3385 return (NULL); 3386 } 3387 if (bchk) { 3388 /* he wants a copy of the chk pointer */ 3389 *bchk = chk; 3390 } 3391 clen = chk->send_size; 3392 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3393 r = &req->sr_req; 3394 if (ntohl(r->request_seq) == seq) { 3395 /* found it */ 3396 return (r); 3397 } 3398 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3399 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3400 /* move to the next one, there can only be a max of two */ 3401 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3402 if (ntohl(r->request_seq) == seq) { 3403 return (r); 3404 } 3405 } 3406 /* that seq is not here */ 3407 return (NULL); 3408 } 3409 3410 static void 3411 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3412 { 3413 struct sctp_association *asoc; 3414 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3415 3416 if (stcb->asoc.str_reset == NULL) { 3417 return; 3418 } 3419 asoc = &stcb->asoc; 3420 3421 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3422 TAILQ_REMOVE(&asoc->control_send_queue, 3423 chk, 3424 sctp_next); 3425 if (chk->data) { 3426 sctp_m_freem(chk->data); 3427 chk->data = NULL; 3428 } 3429 asoc->ctrl_queue_cnt--; 3430 sctp_free_a_chunk(stcb, chk); 3431 /* sa_ignore NO_NULL_CHK */ 3432 stcb->asoc.str_reset = NULL; 3433 } 3434 3435 3436 static int 3437 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3438 uint32_t seq, uint32_t action, 3439 struct sctp_stream_reset_response *respin) 3440 { 3441 uint16_t type; 3442 int lparm_len; 3443 struct sctp_association *asoc = &stcb->asoc; 3444 struct sctp_tmit_chunk *chk; 3445 struct sctp_stream_reset_out_request *srparam; 3446 int number_entries; 3447 3448 if (asoc->stream_reset_outstanding == 0) { 3449 /* duplicate */ 3450 return (0); 3451 } 3452 if (seq == stcb->asoc.str_reset_seq_out) { 3453 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3454 if (srparam) { 3455 stcb->asoc.str_reset_seq_out++; 3456 type = ntohs(srparam->ph.param_type); 3457 lparm_len = ntohs(srparam->ph.param_length); 3458 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3459 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3460 asoc->stream_reset_out_is_outstanding = 0; 3461 if (asoc->stream_reset_outstanding) 3462 asoc->stream_reset_outstanding--; 3463 if (action == SCTP_STREAM_RESET_PERFORMED) { 3464 /* do it */ 3465 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3466 } else { 3467 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3468 } 3469 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3470 /* Answered my request */ 3471 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3472 if (asoc->stream_reset_outstanding) 3473 asoc->stream_reset_outstanding--; 3474 if (action != SCTP_STREAM_RESET_PERFORMED) { 3475 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3476 } 3477 } else if (type == SCTP_STR_RESET_ADD_STREAMS) { 3478 /* Ok we now may have more streams */ 3479 if (asoc->stream_reset_outstanding) 3480 asoc->stream_reset_outstanding--; 3481 if (action == SCTP_STREAM_RESET_PERFORMED) { 3482 /* Put the new streams into effect */ 3483 stcb->asoc.streamoutcnt = stcb->asoc.strm_realoutsize; 3484 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_OK, stcb, 3485 (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED); 3486 } else { 3487 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_FAIL, stcb, 3488 (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED); 3489 } 3490 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3491 /** 3492 * a) Adopt the new in tsn. 3493 * b) reset the map 3494 * c) Adopt the new out-tsn 3495 */ 3496 struct sctp_stream_reset_response_tsn *resp; 3497 struct sctp_forward_tsn_chunk fwdtsn; 3498 int abort_flag = 0; 3499 3500 if (respin == NULL) { 3501 /* huh ? */ 3502 return (0); 3503 } 3504 if (action == SCTP_STREAM_RESET_PERFORMED) { 3505 resp = (struct sctp_stream_reset_response_tsn *)respin; 3506 asoc->stream_reset_outstanding--; 3507 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3508 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3509 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3510 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3511 if (abort_flag) { 3512 return (1); 3513 } 3514 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3516 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3517 } 3518 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3519 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3520 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3521 3522 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3523 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3524 3525 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3526 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3527 3528 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3529 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3530 3531 } 3532 } 3533 /* get rid of the request and get the request flags */ 3534 if (asoc->stream_reset_outstanding == 0) { 3535 sctp_clean_up_stream_reset(stcb); 3536 } 3537 } 3538 } 3539 return (0); 3540 } 3541 3542 static void 3543 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3544 struct sctp_tmit_chunk *chk, 3545 struct sctp_stream_reset_in_request *req, int trunc) 3546 { 3547 uint32_t seq; 3548 int len, i; 3549 int number_entries; 3550 uint16_t temp; 3551 3552 /* 3553 * peer wants me to send a str-reset to him for my outgoing seq's if 3554 * seq_in is right. 3555 */ 3556 struct sctp_association *asoc = &stcb->asoc; 3557 3558 seq = ntohl(req->request_seq); 3559 if (asoc->str_reset_seq_in == seq) { 3560 if (trunc) { 3561 /* Can't do it, since they exceeded our buffer size */ 3562 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3563 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3564 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3565 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3566 len = ntohs(req->ph.param_length); 3567 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3568 for (i = 0; i < number_entries; i++) { 3569 temp = ntohs(req->list_of_streams[i]); 3570 req->list_of_streams[i] = temp; 3571 } 3572 /* move the reset action back one */ 3573 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3574 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3575 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3576 asoc->str_reset_seq_out, 3577 seq, (asoc->sending_seq - 1)); 3578 asoc->stream_reset_out_is_outstanding = 1; 3579 asoc->str_reset = chk; 3580 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3581 stcb->asoc.stream_reset_outstanding++; 3582 } else { 3583 /* Can't do it, since we have sent one out */ 3584 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3585 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 3586 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3587 } 3588 asoc->str_reset_seq_in++; 3589 } else if (asoc->str_reset_seq_in - 1 == seq) { 3590 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3591 } else if (asoc->str_reset_seq_in - 2 == seq) { 3592 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3593 } else { 3594 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3595 } 3596 } 3597 3598 static int 3599 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3600 struct sctp_tmit_chunk *chk, 3601 struct sctp_stream_reset_tsn_request *req) 3602 { 3603 /* reset all in and out and update the tsn */ 3604 /* 3605 * A) reset my str-seq's on in and out. B) Select a receive next, 3606 * and set cum-ack to it. Also process this selected number as a 3607 * fwd-tsn as well. C) set in the response my next sending seq. 3608 */ 3609 struct sctp_forward_tsn_chunk fwdtsn; 3610 struct sctp_association *asoc = &stcb->asoc; 3611 int abort_flag = 0; 3612 uint32_t seq; 3613 3614 seq = ntohl(req->request_seq); 3615 if (asoc->str_reset_seq_in == seq) { 3616 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3617 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3618 fwdtsn.ch.chunk_flags = 0; 3619 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3620 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3621 if (abort_flag) { 3622 return (1); 3623 } 3624 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3625 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3626 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3627 } 3628 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3629 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 3630 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3631 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3632 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3633 atomic_add_int(&stcb->asoc.sending_seq, 1); 3634 /* save off historical data for retrans */ 3635 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 3636 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 3637 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3638 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3639 3640 sctp_add_stream_reset_result_tsn(chk, 3641 ntohl(req->request_seq), 3642 SCTP_STREAM_RESET_PERFORMED, 3643 stcb->asoc.sending_seq, 3644 stcb->asoc.mapping_array_base_tsn); 3645 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3646 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3647 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3648 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3649 3650 asoc->str_reset_seq_in++; 3651 } else if (asoc->str_reset_seq_in - 1 == seq) { 3652 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3653 stcb->asoc.last_sending_seq[0], 3654 stcb->asoc.last_base_tsnsent[0] 3655 ); 3656 } else if (asoc->str_reset_seq_in - 2 == seq) { 3657 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3658 stcb->asoc.last_sending_seq[1], 3659 stcb->asoc.last_base_tsnsent[1] 3660 ); 3661 } else { 3662 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3663 } 3664 return (0); 3665 } 3666 3667 static void 3668 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3669 struct sctp_tmit_chunk *chk, 3670 struct sctp_stream_reset_out_request *req, int trunc) 3671 { 3672 uint32_t seq, tsn; 3673 int number_entries, len; 3674 struct sctp_association *asoc = &stcb->asoc; 3675 3676 seq = ntohl(req->request_seq); 3677 3678 /* now if its not a duplicate we process it */ 3679 if (asoc->str_reset_seq_in == seq) { 3680 len = ntohs(req->ph.param_length); 3681 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3682 /* 3683 * the sender is resetting, handle the list issue.. we must 3684 * a) verify if we can do the reset, if so no problem b) If 3685 * we can't do the reset we must copy the request. c) queue 3686 * it, and setup the data in processor to trigger it off 3687 * when needed and dequeue all the queued data. 3688 */ 3689 tsn = ntohl(req->send_reset_at_tsn); 3690 3691 /* move the reset action back one */ 3692 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3693 if (trunc) { 3694 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3695 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3696 } else if ((tsn == asoc->cumulative_tsn) || 3697 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3698 /* we can do it now */ 3699 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3700 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3701 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3702 } else { 3703 /* 3704 * we must queue it up and thus wait for the TSN's 3705 * to arrive that are at or before tsn 3706 */ 3707 struct sctp_stream_reset_list *liste; 3708 int siz; 3709 3710 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3711 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3712 siz, SCTP_M_STRESET); 3713 if (liste == NULL) { 3714 /* gak out of memory */ 3715 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3716 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3717 return; 3718 } 3719 liste->tsn = tsn; 3720 liste->number_entries = number_entries; 3721 memcpy(&liste->req, req, 3722 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3723 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3724 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3725 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3726 } 3727 asoc->str_reset_seq_in++; 3728 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3729 /* 3730 * one seq back, just echo back last action since my 3731 * response was lost. 3732 */ 3733 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3734 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3735 /* 3736 * two seq back, just echo back last action since my 3737 * response was lost. 3738 */ 3739 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3740 } else { 3741 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3742 } 3743 } 3744 3745 static void 3746 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 3747 struct sctp_stream_reset_add_strm *str_add) 3748 { 3749 /* 3750 * Peer is requesting to add more streams. If its within our 3751 * max-streams we will allow it. 3752 */ 3753 uint16_t num_stream, i; 3754 uint32_t seq; 3755 struct sctp_association *asoc = &stcb->asoc; 3756 struct sctp_queued_to_read *ctl; 3757 3758 /* Get the number. */ 3759 seq = ntohl(str_add->request_seq); 3760 num_stream = ntohs(str_add->number_of_streams); 3761 /* Now what would be the new total? */ 3762 if (asoc->str_reset_seq_in == seq) { 3763 num_stream += stcb->asoc.streamincnt; 3764 if (num_stream > stcb->asoc.max_inbound_streams) { 3765 /* We must reject it they ask for to many */ 3766 denied: 3767 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3768 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3769 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3770 } else { 3771 /* Ok, we can do that :-) */ 3772 struct sctp_stream_in *oldstrm; 3773 3774 /* save off the old */ 3775 oldstrm = stcb->asoc.strmin; 3776 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *, 3777 (num_stream * sizeof(struct sctp_stream_in)), 3778 SCTP_M_STRMI); 3779 if (stcb->asoc.strmin == NULL) { 3780 stcb->asoc.strmin = oldstrm; 3781 goto denied; 3782 } 3783 /* copy off the old data */ 3784 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3785 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 3786 stcb->asoc.strmin[i].stream_no = i; 3787 stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered; 3788 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started; 3789 /* now anything on those queues? */ 3790 while (TAILQ_EMPTY(&oldstrm[i].inqueue) == 0) { 3791 ctl = TAILQ_FIRST(&oldstrm[i].inqueue); 3792 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next); 3793 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next); 3794 } 3795 } 3796 /* Init the new streams */ 3797 for (i = stcb->asoc.streamincnt; i < num_stream; i++) { 3798 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 3799 stcb->asoc.strmin[i].stream_no = i; 3800 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3801 stcb->asoc.strmin[i].delivery_started = 0; 3802 } 3803 SCTP_FREE(oldstrm, SCTP_M_STRMI); 3804 /* update the size */ 3805 stcb->asoc.streamincnt = num_stream; 3806 /* Send the ack */ 3807 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3808 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3809 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3810 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK, stcb, 3811 (uint32_t) stcb->asoc.streamincnt, NULL, SCTP_SO_NOT_LOCKED); 3812 } 3813 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3814 /* 3815 * one seq back, just echo back last action since my 3816 * response was lost. 3817 */ 3818 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3819 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3820 /* 3821 * two seq back, just echo back last action since my 3822 * response was lost. 3823 */ 3824 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3825 } else { 3826 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3827 3828 } 3829 } 3830 3831 #ifdef __GNUC__ 3832 __attribute__((noinline)) 3833 #endif 3834 static int 3835 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3836 struct sctp_stream_reset_out_req *sr_req) 3837 { 3838 int chk_length, param_len, ptype; 3839 struct sctp_paramhdr pstore; 3840 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3841 3842 uint32_t seq; 3843 int num_req = 0; 3844 int trunc = 0; 3845 struct sctp_tmit_chunk *chk; 3846 struct sctp_chunkhdr *ch; 3847 struct sctp_paramhdr *ph; 3848 int ret_code = 0; 3849 int num_param = 0; 3850 3851 /* now it may be a reset or a reset-response */ 3852 chk_length = ntohs(sr_req->ch.chunk_length); 3853 3854 /* setup for adding the response */ 3855 sctp_alloc_a_chunk(stcb, chk); 3856 if (chk == NULL) { 3857 return (ret_code); 3858 } 3859 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3860 chk->rec.chunk_id.can_take_data = 0; 3861 chk->asoc = &stcb->asoc; 3862 chk->no_fr_allowed = 0; 3863 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3864 chk->book_size_scale = 0; 3865 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3866 if (chk->data == NULL) { 3867 strres_nochunk: 3868 if (chk->data) { 3869 sctp_m_freem(chk->data); 3870 chk->data = NULL; 3871 } 3872 sctp_free_a_chunk(stcb, chk); 3873 return (ret_code); 3874 } 3875 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3876 3877 /* setup chunk parameters */ 3878 chk->sent = SCTP_DATAGRAM_UNSENT; 3879 chk->snd_count = 0; 3880 chk->whoTo = stcb->asoc.primary_destination; 3881 atomic_add_int(&chk->whoTo->ref_count, 1); 3882 3883 ch = mtod(chk->data, struct sctp_chunkhdr *); 3884 ch->chunk_type = SCTP_STREAM_RESET; 3885 ch->chunk_flags = 0; 3886 ch->chunk_length = htons(chk->send_size); 3887 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3888 offset += sizeof(struct sctp_chunkhdr); 3889 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3890 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 3891 if (ph == NULL) 3892 break; 3893 param_len = ntohs(ph->param_length); 3894 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3895 /* bad param */ 3896 break; 3897 } 3898 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 3899 (uint8_t *) & cstore); 3900 ptype = ntohs(ph->param_type); 3901 num_param++; 3902 if (param_len > (int)sizeof(cstore)) { 3903 trunc = 1; 3904 } else { 3905 trunc = 0; 3906 } 3907 3908 if (num_param > SCTP_MAX_RESET_PARAMS) { 3909 /* hit the max of parameters already sorry.. */ 3910 break; 3911 } 3912 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3913 struct sctp_stream_reset_out_request *req_out; 3914 3915 req_out = (struct sctp_stream_reset_out_request *)ph; 3916 num_req++; 3917 if (stcb->asoc.stream_reset_outstanding) { 3918 seq = ntohl(req_out->response_seq); 3919 if (seq == stcb->asoc.str_reset_seq_out) { 3920 /* implicit ack */ 3921 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3922 } 3923 } 3924 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 3925 } else if (ptype == SCTP_STR_RESET_ADD_STREAMS) { 3926 struct sctp_stream_reset_add_strm *str_add; 3927 3928 str_add = (struct sctp_stream_reset_add_strm *)ph; 3929 num_req++; 3930 sctp_handle_str_reset_add_strm(stcb, chk, str_add); 3931 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3932 struct sctp_stream_reset_in_request *req_in; 3933 3934 num_req++; 3935 3936 req_in = (struct sctp_stream_reset_in_request *)ph; 3937 3938 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 3939 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3940 struct sctp_stream_reset_tsn_request *req_tsn; 3941 3942 num_req++; 3943 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3944 3945 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3946 ret_code = 1; 3947 goto strres_nochunk; 3948 } 3949 /* no more */ 3950 break; 3951 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3952 struct sctp_stream_reset_response *resp; 3953 uint32_t result; 3954 3955 resp = (struct sctp_stream_reset_response *)ph; 3956 seq = ntohl(resp->response_seq); 3957 result = ntohl(resp->result); 3958 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3959 ret_code = 1; 3960 goto strres_nochunk; 3961 } 3962 } else { 3963 break; 3964 } 3965 offset += SCTP_SIZE32(param_len); 3966 chk_length -= SCTP_SIZE32(param_len); 3967 } 3968 if (num_req == 0) { 3969 /* we have no response free the stuff */ 3970 goto strres_nochunk; 3971 } 3972 /* ok we have a chunk to link in */ 3973 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3974 chk, 3975 sctp_next); 3976 stcb->asoc.ctrl_queue_cnt++; 3977 return (ret_code); 3978 } 3979 3980 /* 3981 * Handle a router or endpoints report of a packet loss, there are two ways 3982 * to handle this, either we get the whole packet and must disect it 3983 * ourselves (possibly with truncation and or corruption) or it is a summary 3984 * from a middle box that did the disectting for us. 3985 */ 3986 static void 3987 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3988 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 3989 { 3990 uint32_t bottle_bw, on_queue; 3991 uint16_t trunc_len; 3992 unsigned int chlen; 3993 unsigned int at; 3994 struct sctp_chunk_desc desc; 3995 struct sctp_chunkhdr *ch; 3996 3997 chlen = ntohs(cp->ch.chunk_length); 3998 chlen -= sizeof(struct sctp_pktdrop_chunk); 3999 /* XXX possible chlen underflow */ 4000 if (chlen == 0) { 4001 ch = NULL; 4002 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 4003 SCTP_STAT_INCR(sctps_pdrpbwrpt); 4004 } else { 4005 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 4006 chlen -= sizeof(struct sctphdr); 4007 /* XXX possible chlen underflow */ 4008 memset(&desc, 0, sizeof(desc)); 4009 } 4010 trunc_len = (uint16_t) ntohs(cp->trunc_len); 4011 if (trunc_len > limit) { 4012 trunc_len = limit; 4013 } 4014 /* now the chunks themselves */ 4015 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 4016 desc.chunk_type = ch->chunk_type; 4017 /* get amount we need to move */ 4018 at = ntohs(ch->chunk_length); 4019 if (at < sizeof(struct sctp_chunkhdr)) { 4020 /* corrupt chunk, maybe at the end? */ 4021 SCTP_STAT_INCR(sctps_pdrpcrupt); 4022 break; 4023 } 4024 if (trunc_len == 0) { 4025 /* we are supposed to have all of it */ 4026 if (at > chlen) { 4027 /* corrupt skip it */ 4028 SCTP_STAT_INCR(sctps_pdrpcrupt); 4029 break; 4030 } 4031 } else { 4032 /* is there enough of it left ? */ 4033 if (desc.chunk_type == SCTP_DATA) { 4034 if (chlen < (sizeof(struct sctp_data_chunk) + 4035 sizeof(desc.data_bytes))) { 4036 break; 4037 } 4038 } else { 4039 if (chlen < sizeof(struct sctp_chunkhdr)) { 4040 break; 4041 } 4042 } 4043 } 4044 if (desc.chunk_type == SCTP_DATA) { 4045 /* can we get out the tsn? */ 4046 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4047 SCTP_STAT_INCR(sctps_pdrpmbda); 4048 4049 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 4050 /* yep */ 4051 struct sctp_data_chunk *dcp; 4052 uint8_t *ddp; 4053 unsigned int iii; 4054 4055 dcp = (struct sctp_data_chunk *)ch; 4056 ddp = (uint8_t *) (dcp + 1); 4057 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 4058 desc.data_bytes[iii] = ddp[iii]; 4059 } 4060 desc.tsn_ifany = dcp->dp.tsn; 4061 } else { 4062 /* nope we are done. */ 4063 SCTP_STAT_INCR(sctps_pdrpnedat); 4064 break; 4065 } 4066 } else { 4067 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4068 SCTP_STAT_INCR(sctps_pdrpmbct); 4069 } 4070 4071 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 4072 SCTP_STAT_INCR(sctps_pdrppdbrk); 4073 break; 4074 } 4075 if (SCTP_SIZE32(at) > chlen) { 4076 break; 4077 } 4078 chlen -= SCTP_SIZE32(at); 4079 if (chlen < sizeof(struct sctp_chunkhdr)) { 4080 /* done, none left */ 4081 break; 4082 } 4083 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 4084 } 4085 /* Now update any rwnd --- possibly */ 4086 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 4087 /* From a peer, we get a rwnd report */ 4088 uint32_t a_rwnd; 4089 4090 SCTP_STAT_INCR(sctps_pdrpfehos); 4091 4092 bottle_bw = ntohl(cp->bottle_bw); 4093 on_queue = ntohl(cp->current_onq); 4094 if (bottle_bw && on_queue) { 4095 /* a rwnd report is in here */ 4096 if (bottle_bw > on_queue) 4097 a_rwnd = bottle_bw - on_queue; 4098 else 4099 a_rwnd = 0; 4100 4101 if (a_rwnd == 0) 4102 stcb->asoc.peers_rwnd = 0; 4103 else { 4104 if (a_rwnd > stcb->asoc.total_flight) { 4105 stcb->asoc.peers_rwnd = 4106 a_rwnd - stcb->asoc.total_flight; 4107 } else { 4108 stcb->asoc.peers_rwnd = 0; 4109 } 4110 if (stcb->asoc.peers_rwnd < 4111 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4112 /* SWS sender side engages */ 4113 stcb->asoc.peers_rwnd = 0; 4114 } 4115 } 4116 } 4117 } else { 4118 SCTP_STAT_INCR(sctps_pdrpfmbox); 4119 } 4120 4121 /* now middle boxes in sat networks get a cwnd bump */ 4122 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 4123 (stcb->asoc.sat_t3_loss_recovery == 0) && 4124 (stcb->asoc.sat_network)) { 4125 /* 4126 * This is debateable but for sat networks it makes sense 4127 * Note if a T3 timer has went off, we will prohibit any 4128 * changes to cwnd until we exit the t3 loss recovery. 4129 */ 4130 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 4131 net, cp, &bottle_bw, &on_queue); 4132 } 4133 } 4134 4135 /* 4136 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 4137 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 4138 * offset: offset into the mbuf chain to first chunkhdr - length: is the 4139 * length of the complete packet outputs: - length: modified to remaining 4140 * length after control processing - netp: modified to new sctp_nets after 4141 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 4142 * bad packet,...) otherwise return the tcb for this packet 4143 */ 4144 #ifdef __GNUC__ 4145 __attribute__((noinline)) 4146 #endif 4147 static struct sctp_tcb * 4148 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 4149 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 4150 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 4151 uint32_t vrf_id, uint16_t port) 4152 { 4153 struct sctp_association *asoc; 4154 uint32_t vtag_in; 4155 int num_chunks = 0; /* number of control chunks processed */ 4156 uint32_t chk_length; 4157 int ret; 4158 int abort_no_unlock = 0; 4159 4160 /* 4161 * How big should this be, and should it be alloc'd? Lets try the 4162 * d-mtu-ceiling for now (2k) and that should hopefully work ... 4163 * until we get into jumbo grams and such.. 4164 */ 4165 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 4166 struct sctp_tcb *locked_tcb = stcb; 4167 int got_auth = 0; 4168 uint32_t auth_offset = 0, auth_len = 0; 4169 int auth_skipped = 0; 4170 int asconf_cnt = 0; 4171 4172 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4173 struct socket *so; 4174 4175 #endif 4176 4177 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 4178 iphlen, *offset, length, stcb); 4179 4180 /* validate chunk header length... */ 4181 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 4182 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 4183 ntohs(ch->chunk_length)); 4184 if (locked_tcb) { 4185 SCTP_TCB_UNLOCK(locked_tcb); 4186 } 4187 return (NULL); 4188 } 4189 /* 4190 * validate the verification tag 4191 */ 4192 vtag_in = ntohl(sh->v_tag); 4193 4194 if (locked_tcb) { 4195 SCTP_TCB_LOCK_ASSERT(locked_tcb); 4196 } 4197 if (ch->chunk_type == SCTP_INITIATION) { 4198 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 4199 ntohs(ch->chunk_length), vtag_in); 4200 if (vtag_in != 0) { 4201 /* protocol error- silently discard... */ 4202 SCTP_STAT_INCR(sctps_badvtag); 4203 if (locked_tcb) { 4204 SCTP_TCB_UNLOCK(locked_tcb); 4205 } 4206 return (NULL); 4207 } 4208 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 4209 /* 4210 * If there is no stcb, skip the AUTH chunk and process 4211 * later after a stcb is found (to validate the lookup was 4212 * valid. 4213 */ 4214 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 4215 (stcb == NULL) && 4216 !SCTP_BASE_SYSCTL(sctp_auth_disable)) { 4217 /* save this chunk for later processing */ 4218 auth_skipped = 1; 4219 auth_offset = *offset; 4220 auth_len = ntohs(ch->chunk_length); 4221 4222 /* (temporarily) move past this chunk */ 4223 *offset += SCTP_SIZE32(auth_len); 4224 if (*offset >= length) { 4225 /* no more data left in the mbuf chain */ 4226 *offset = length; 4227 if (locked_tcb) { 4228 SCTP_TCB_UNLOCK(locked_tcb); 4229 } 4230 return (NULL); 4231 } 4232 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4233 sizeof(struct sctp_chunkhdr), chunk_buf); 4234 } 4235 if (ch == NULL) { 4236 /* Help */ 4237 *offset = length; 4238 if (locked_tcb) { 4239 SCTP_TCB_UNLOCK(locked_tcb); 4240 } 4241 return (NULL); 4242 } 4243 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 4244 goto process_control_chunks; 4245 } 4246 /* 4247 * first check if it's an ASCONF with an unknown src addr we 4248 * need to look inside to find the association 4249 */ 4250 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 4251 struct sctp_chunkhdr *asconf_ch = ch; 4252 uint32_t asconf_offset = 0, asconf_len = 0; 4253 4254 /* inp's refcount may be reduced */ 4255 SCTP_INP_INCR_REF(inp); 4256 4257 asconf_offset = *offset; 4258 do { 4259 asconf_len = ntohs(asconf_ch->chunk_length); 4260 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 4261 break; 4262 stcb = sctp_findassociation_ep_asconf(m, iphlen, 4263 *offset, sh, &inp, netp, vrf_id); 4264 if (stcb != NULL) 4265 break; 4266 asconf_offset += SCTP_SIZE32(asconf_len); 4267 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 4268 sizeof(struct sctp_chunkhdr), chunk_buf); 4269 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 4270 if (stcb == NULL) { 4271 /* 4272 * reduce inp's refcount if not reduced in 4273 * sctp_findassociation_ep_asconf(). 4274 */ 4275 SCTP_INP_DECR_REF(inp); 4276 } else { 4277 locked_tcb = stcb; 4278 } 4279 4280 /* now go back and verify any auth chunk to be sure */ 4281 if (auth_skipped && (stcb != NULL)) { 4282 struct sctp_auth_chunk *auth; 4283 4284 auth = (struct sctp_auth_chunk *) 4285 sctp_m_getptr(m, auth_offset, 4286 auth_len, chunk_buf); 4287 got_auth = 1; 4288 auth_skipped = 0; 4289 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 4290 auth_offset)) { 4291 /* auth HMAC failed so dump it */ 4292 *offset = length; 4293 if (locked_tcb) { 4294 SCTP_TCB_UNLOCK(locked_tcb); 4295 } 4296 return (NULL); 4297 } else { 4298 /* remaining chunks are HMAC checked */ 4299 stcb->asoc.authenticated = 1; 4300 } 4301 } 4302 } 4303 if (stcb == NULL) { 4304 /* no association, so it's out of the blue... */ 4305 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 4306 vrf_id, port); 4307 *offset = length; 4308 if (locked_tcb) { 4309 SCTP_TCB_UNLOCK(locked_tcb); 4310 } 4311 return (NULL); 4312 } 4313 asoc = &stcb->asoc; 4314 /* ABORT and SHUTDOWN can use either v_tag... */ 4315 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 4316 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 4317 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 4318 if ((vtag_in == asoc->my_vtag) || 4319 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 4320 (vtag_in == asoc->peer_vtag))) { 4321 /* this is valid */ 4322 } else { 4323 /* drop this packet... */ 4324 SCTP_STAT_INCR(sctps_badvtag); 4325 if (locked_tcb) { 4326 SCTP_TCB_UNLOCK(locked_tcb); 4327 } 4328 return (NULL); 4329 } 4330 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4331 if (vtag_in != asoc->my_vtag) { 4332 /* 4333 * this could be a stale SHUTDOWN-ACK or the 4334 * peer never got the SHUTDOWN-COMPLETE and 4335 * is still hung; we have started a new asoc 4336 * but it won't complete until the shutdown 4337 * is completed 4338 */ 4339 if (locked_tcb) { 4340 SCTP_TCB_UNLOCK(locked_tcb); 4341 } 4342 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 4343 NULL, vrf_id, port); 4344 return (NULL); 4345 } 4346 } else { 4347 /* for all other chunks, vtag must match */ 4348 if (vtag_in != asoc->my_vtag) { 4349 /* invalid vtag... */ 4350 SCTPDBG(SCTP_DEBUG_INPUT3, 4351 "invalid vtag: %xh, expect %xh\n", 4352 vtag_in, asoc->my_vtag); 4353 SCTP_STAT_INCR(sctps_badvtag); 4354 if (locked_tcb) { 4355 SCTP_TCB_UNLOCK(locked_tcb); 4356 } 4357 *offset = length; 4358 return (NULL); 4359 } 4360 } 4361 } /* end if !SCTP_COOKIE_ECHO */ 4362 /* 4363 * process all control chunks... 4364 */ 4365 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4366 /* EY */ 4367 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) || 4368 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4369 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4370 /* implied cookie-ack.. we must have lost the ack */ 4371 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4372 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4373 stcb->asoc.overall_error_count, 4374 0, 4375 SCTP_FROM_SCTP_INPUT, 4376 __LINE__); 4377 } 4378 stcb->asoc.overall_error_count = 0; 4379 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4380 *netp); 4381 } 4382 process_control_chunks: 4383 while (IS_SCTP_CONTROL(ch)) { 4384 /* validate chunk length */ 4385 chk_length = ntohs(ch->chunk_length); 4386 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4387 ch->chunk_type, chk_length); 4388 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4389 if (chk_length < sizeof(*ch) || 4390 (*offset + (int)chk_length) > length) { 4391 *offset = length; 4392 if (locked_tcb) { 4393 SCTP_TCB_UNLOCK(locked_tcb); 4394 } 4395 return (NULL); 4396 } 4397 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4398 /* 4399 * INIT-ACK only gets the init ack "header" portion only 4400 * because we don't have to process the peer's COOKIE. All 4401 * others get a complete chunk. 4402 */ 4403 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4404 (ch->chunk_type == SCTP_INITIATION)) { 4405 /* get an init-ack chunk */ 4406 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4407 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4408 if (ch == NULL) { 4409 *offset = length; 4410 if (locked_tcb) { 4411 SCTP_TCB_UNLOCK(locked_tcb); 4412 } 4413 return (NULL); 4414 } 4415 } else { 4416 /* For cookies and all other chunks. */ 4417 if (chk_length > sizeof(chunk_buf)) { 4418 /* 4419 * use just the size of the chunk buffer so 4420 * the front part of our chunks fit in 4421 * contiguous space up to the chunk buffer 4422 * size (508 bytes). For chunks that need to 4423 * get more than that they must use the 4424 * sctp_m_getptr() function or other means 4425 * (e.g. know how to parse mbuf chains). 4426 * Cookies do this already. 4427 */ 4428 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4429 (sizeof(chunk_buf) - 4), 4430 chunk_buf); 4431 if (ch == NULL) { 4432 *offset = length; 4433 if (locked_tcb) { 4434 SCTP_TCB_UNLOCK(locked_tcb); 4435 } 4436 return (NULL); 4437 } 4438 } else { 4439 /* We can fit it all */ 4440 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4441 chk_length, chunk_buf); 4442 if (ch == NULL) { 4443 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4444 *offset = length; 4445 if (locked_tcb) { 4446 SCTP_TCB_UNLOCK(locked_tcb); 4447 } 4448 return (NULL); 4449 } 4450 } 4451 } 4452 num_chunks++; 4453 /* Save off the last place we got a control from */ 4454 if (stcb != NULL) { 4455 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4456 /* 4457 * allow last_control to be NULL if 4458 * ASCONF... ASCONF processing will find the 4459 * right net later 4460 */ 4461 if ((netp != NULL) && (*netp != NULL)) 4462 stcb->asoc.last_control_chunk_from = *netp; 4463 } 4464 } 4465 #ifdef SCTP_AUDITING_ENABLED 4466 sctp_audit_log(0xB0, ch->chunk_type); 4467 #endif 4468 4469 /* check to see if this chunk required auth, but isn't */ 4470 if ((stcb != NULL) && 4471 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 4472 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) && 4473 !stcb->asoc.authenticated) { 4474 /* "silently" ignore */ 4475 SCTP_STAT_INCR(sctps_recvauthmissing); 4476 goto next_chunk; 4477 } 4478 switch (ch->chunk_type) { 4479 case SCTP_INITIATION: 4480 /* must be first and only chunk */ 4481 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4482 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4483 /* We are not interested anymore? */ 4484 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4485 /* 4486 * collision case where we are 4487 * sending to them too 4488 */ 4489 ; 4490 } else { 4491 if (locked_tcb) { 4492 SCTP_TCB_UNLOCK(locked_tcb); 4493 } 4494 *offset = length; 4495 return (NULL); 4496 } 4497 } 4498 if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) || 4499 (num_chunks > 1) || 4500 (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4501 *offset = length; 4502 if (locked_tcb) { 4503 SCTP_TCB_UNLOCK(locked_tcb); 4504 } 4505 return (NULL); 4506 } 4507 if ((stcb != NULL) && 4508 (SCTP_GET_STATE(&stcb->asoc) == 4509 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4510 sctp_send_shutdown_ack(stcb, 4511 stcb->asoc.primary_destination); 4512 *offset = length; 4513 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4514 if (locked_tcb) { 4515 SCTP_TCB_UNLOCK(locked_tcb); 4516 } 4517 return (NULL); 4518 } 4519 if (netp) { 4520 sctp_handle_init(m, iphlen, *offset, sh, 4521 (struct sctp_init_chunk *)ch, inp, 4522 stcb, *netp, &abort_no_unlock, vrf_id, port); 4523 } 4524 if (abort_no_unlock) 4525 return (NULL); 4526 4527 *offset = length; 4528 if (locked_tcb) { 4529 SCTP_TCB_UNLOCK(locked_tcb); 4530 } 4531 return (NULL); 4532 break; 4533 case SCTP_PAD_CHUNK: 4534 break; 4535 case SCTP_INITIATION_ACK: 4536 /* must be first and only chunk */ 4537 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4538 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4539 /* We are not interested anymore */ 4540 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4541 ; 4542 } else { 4543 if (locked_tcb != stcb) { 4544 /* Very unlikely */ 4545 SCTP_TCB_UNLOCK(locked_tcb); 4546 } 4547 *offset = length; 4548 if (stcb) { 4549 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4550 so = SCTP_INP_SO(inp); 4551 atomic_add_int(&stcb->asoc.refcnt, 1); 4552 SCTP_TCB_UNLOCK(stcb); 4553 SCTP_SOCKET_LOCK(so, 1); 4554 SCTP_TCB_LOCK(stcb); 4555 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4556 #endif 4557 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4558 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4559 SCTP_SOCKET_UNLOCK(so, 1); 4560 #endif 4561 } 4562 return (NULL); 4563 } 4564 } 4565 if ((num_chunks > 1) || 4566 (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4567 *offset = length; 4568 if (locked_tcb) { 4569 SCTP_TCB_UNLOCK(locked_tcb); 4570 } 4571 return (NULL); 4572 } 4573 if ((netp) && (*netp)) { 4574 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4575 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 4576 } else { 4577 ret = -1; 4578 } 4579 /* 4580 * Special case, I must call the output routine to 4581 * get the cookie echoed 4582 */ 4583 if (abort_no_unlock) 4584 return (NULL); 4585 4586 if ((stcb) && ret == 0) 4587 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4588 *offset = length; 4589 if (locked_tcb) { 4590 SCTP_TCB_UNLOCK(locked_tcb); 4591 } 4592 return (NULL); 4593 break; 4594 case SCTP_SELECTIVE_ACK: 4595 { 4596 struct sctp_sack_chunk *sack; 4597 int abort_now = 0; 4598 uint32_t a_rwnd, cum_ack; 4599 uint16_t num_seg, num_dup; 4600 uint8_t flags; 4601 int offset_seg, offset_dup; 4602 int nonce_sum_flag; 4603 4604 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4605 SCTP_STAT_INCR(sctps_recvsacks); 4606 if (stcb == NULL) { 4607 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n"); 4608 break; 4609 } 4610 if (chk_length < sizeof(struct sctp_sack_chunk)) { 4611 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n"); 4612 break; 4613 } 4614 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4615 /*- 4616 * If we have sent a shutdown-ack, we will pay no 4617 * attention to a sack sent in to us since 4618 * we don't care anymore. 4619 */ 4620 break; 4621 } 4622 sack = (struct sctp_sack_chunk *)ch; 4623 flags = ch->chunk_flags; 4624 nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM; 4625 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4626 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4627 num_dup = ntohs(sack->sack.num_dup_tsns); 4628 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4629 if (sizeof(struct sctp_sack_chunk) + 4630 num_seg * sizeof(struct sctp_gap_ack_block) + 4631 num_dup * sizeof(uint32_t) != chk_length) { 4632 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n"); 4633 break; 4634 } 4635 offset_seg = *offset + sizeof(struct sctp_sack_chunk); 4636 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4637 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4638 cum_ack, num_seg, a_rwnd); 4639 stcb->asoc.seen_a_sack_this_pkt = 1; 4640 if ((stcb->asoc.pr_sctp_cnt == 0) && 4641 (num_seg == 0) && 4642 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 4643 (cum_ack == stcb->asoc.last_acked_seq)) && 4644 (stcb->asoc.saw_sack_with_frags == 0) && 4645 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4646 ) { 4647 /* 4648 * We have a SIMPLE sack having no 4649 * prior segments and data on sent 4650 * queue to be acked.. Use the 4651 * faster path sack processing. We 4652 * also allow window update sacks 4653 * with no missing segments to go 4654 * this way too. 4655 */ 4656 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 4657 &abort_now); 4658 } else { 4659 if (netp && *netp) 4660 sctp_handle_sack(m, offset_seg, offset_dup, 4661 stcb, *netp, 4662 num_seg, 0, num_dup, &abort_now, flags, 4663 cum_ack, a_rwnd); 4664 } 4665 if (abort_now) { 4666 /* ABORT signal from sack processing */ 4667 *offset = length; 4668 return (NULL); 4669 } 4670 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 4671 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 4672 (stcb->asoc.stream_queue_cnt == 0)) { 4673 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4674 } 4675 } 4676 break; 4677 /* 4678 * EY - nr_sack: If the received chunk is an 4679 * nr_sack chunk 4680 */ 4681 case SCTP_NR_SELECTIVE_ACK: 4682 { 4683 struct sctp_nr_sack_chunk *nr_sack; 4684 int abort_now = 0; 4685 uint32_t a_rwnd, cum_ack; 4686 uint16_t num_seg, num_nr_seg, num_dup; 4687 uint8_t flags; 4688 int offset_seg, offset_dup; 4689 int nonce_sum_flag; 4690 4691 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n"); 4692 SCTP_STAT_INCR(sctps_recvsacks); 4693 if (stcb == NULL) { 4694 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n"); 4695 break; 4696 } 4697 if ((stcb->asoc.sctp_nr_sack_on_off == 0) || 4698 (stcb->asoc.peer_supports_nr_sack == 0)) { 4699 goto unknown_chunk; 4700 } 4701 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) { 4702 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n"); 4703 break; 4704 } 4705 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4706 /*- 4707 * If we have sent a shutdown-ack, we will pay no 4708 * attention to a sack sent in to us since 4709 * we don't care anymore. 4710 */ 4711 break; 4712 } 4713 nr_sack = (struct sctp_nr_sack_chunk *)ch; 4714 flags = ch->chunk_flags; 4715 nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM; 4716 4717 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack); 4718 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks); 4719 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks); 4720 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns); 4721 a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd); 4722 if (sizeof(struct sctp_nr_sack_chunk) + 4723 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) + 4724 num_dup * sizeof(uint32_t) != chk_length) { 4725 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n"); 4726 break; 4727 } 4728 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk); 4729 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4730 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4731 cum_ack, num_seg, a_rwnd); 4732 stcb->asoc.seen_a_sack_this_pkt = 1; 4733 if ((stcb->asoc.pr_sctp_cnt == 0) && 4734 (num_seg == 0) && (num_nr_seg == 0) && 4735 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 4736 (cum_ack == stcb->asoc.last_acked_seq)) && 4737 (stcb->asoc.saw_sack_with_frags == 0) && 4738 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 4739 /* 4740 * We have a SIMPLE sack having no 4741 * prior segments and data on sent 4742 * queue to be acked. Use the faster 4743 * path sack processing. We also 4744 * allow window update sacks with no 4745 * missing segments to go this way 4746 * too. 4747 */ 4748 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 4749 &abort_now); 4750 } else { 4751 if (netp && *netp) 4752 sctp_handle_sack(m, offset_seg, offset_dup, 4753 stcb, *netp, 4754 num_seg, num_nr_seg, num_dup, &abort_now, flags, 4755 cum_ack, a_rwnd); 4756 } 4757 if (abort_now) { 4758 /* ABORT signal from sack processing */ 4759 *offset = length; 4760 return (NULL); 4761 } 4762 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 4763 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 4764 (stcb->asoc.stream_queue_cnt == 0)) { 4765 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4766 } 4767 } 4768 break; 4769 4770 case SCTP_HEARTBEAT_REQUEST: 4771 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4772 if ((stcb) && netp && *netp) { 4773 SCTP_STAT_INCR(sctps_recvheartbeat); 4774 sctp_send_heartbeat_ack(stcb, m, *offset, 4775 chk_length, *netp); 4776 4777 /* He's alive so give him credit */ 4778 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4779 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4780 stcb->asoc.overall_error_count, 4781 0, 4782 SCTP_FROM_SCTP_INPUT, 4783 __LINE__); 4784 } 4785 stcb->asoc.overall_error_count = 0; 4786 } 4787 break; 4788 case SCTP_HEARTBEAT_ACK: 4789 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 4790 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 4791 /* Its not ours */ 4792 *offset = length; 4793 if (locked_tcb) { 4794 SCTP_TCB_UNLOCK(locked_tcb); 4795 } 4796 return (NULL); 4797 } 4798 /* He's alive so give him credit */ 4799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4800 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4801 stcb->asoc.overall_error_count, 4802 0, 4803 SCTP_FROM_SCTP_INPUT, 4804 __LINE__); 4805 } 4806 stcb->asoc.overall_error_count = 0; 4807 SCTP_STAT_INCR(sctps_recvheartbeatack); 4808 if (netp && *netp) 4809 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4810 stcb, *netp); 4811 break; 4812 case SCTP_ABORT_ASSOCIATION: 4813 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4814 stcb); 4815 if ((stcb) && netp && *netp) 4816 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4817 stcb, *netp); 4818 *offset = length; 4819 return (NULL); 4820 break; 4821 case SCTP_SHUTDOWN: 4822 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4823 stcb); 4824 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4825 *offset = length; 4826 if (locked_tcb) { 4827 SCTP_TCB_UNLOCK(locked_tcb); 4828 } 4829 return (NULL); 4830 } 4831 if (netp && *netp) { 4832 int abort_flag = 0; 4833 4834 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4835 stcb, *netp, &abort_flag); 4836 if (abort_flag) { 4837 *offset = length; 4838 return (NULL); 4839 } 4840 } 4841 break; 4842 case SCTP_SHUTDOWN_ACK: 4843 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4844 if ((stcb) && (netp) && (*netp)) 4845 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4846 *offset = length; 4847 return (NULL); 4848 break; 4849 4850 case SCTP_OPERATION_ERROR: 4851 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4852 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4853 4854 *offset = length; 4855 return (NULL); 4856 } 4857 break; 4858 case SCTP_COOKIE_ECHO: 4859 SCTPDBG(SCTP_DEBUG_INPUT3, 4860 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4861 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4862 ; 4863 } else { 4864 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4865 /* We are not interested anymore */ 4866 abend: 4867 if (stcb) { 4868 SCTP_TCB_UNLOCK(stcb); 4869 } 4870 *offset = length; 4871 return (NULL); 4872 } 4873 } 4874 /* 4875 * First are we accepting? We do this again here 4876 * since it is possible that a previous endpoint WAS 4877 * listening responded to a INIT-ACK and then 4878 * closed. We opened and bound.. and are now no 4879 * longer listening. 4880 */ 4881 4882 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 4883 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4884 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) { 4885 struct mbuf *oper; 4886 struct sctp_paramhdr *phdr; 4887 4888 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4889 0, M_DONTWAIT, 1, MT_DATA); 4890 if (oper) { 4891 SCTP_BUF_LEN(oper) = 4892 sizeof(struct sctp_paramhdr); 4893 phdr = mtod(oper, 4894 struct sctp_paramhdr *); 4895 phdr->param_type = 4896 htons(SCTP_CAUSE_OUT_OF_RESC); 4897 phdr->param_length = 4898 htons(sizeof(struct sctp_paramhdr)); 4899 } 4900 sctp_abort_association(inp, stcb, m, 4901 iphlen, sh, oper, vrf_id, port); 4902 } 4903 *offset = length; 4904 return (NULL); 4905 } else { 4906 struct mbuf *ret_buf; 4907 struct sctp_inpcb *linp; 4908 4909 if (stcb) { 4910 linp = NULL; 4911 } else { 4912 linp = inp; 4913 } 4914 4915 if (linp) { 4916 SCTP_ASOC_CREATE_LOCK(linp); 4917 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 4918 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4919 SCTP_ASOC_CREATE_UNLOCK(linp); 4920 goto abend; 4921 } 4922 } 4923 if (netp) { 4924 ret_buf = 4925 sctp_handle_cookie_echo(m, iphlen, 4926 *offset, sh, 4927 (struct sctp_cookie_echo_chunk *)ch, 4928 &inp, &stcb, netp, 4929 auth_skipped, 4930 auth_offset, 4931 auth_len, 4932 &locked_tcb, 4933 vrf_id, 4934 port); 4935 } else { 4936 ret_buf = NULL; 4937 } 4938 if (linp) { 4939 SCTP_ASOC_CREATE_UNLOCK(linp); 4940 } 4941 if (ret_buf == NULL) { 4942 if (locked_tcb) { 4943 SCTP_TCB_UNLOCK(locked_tcb); 4944 } 4945 SCTPDBG(SCTP_DEBUG_INPUT3, 4946 "GAK, null buffer\n"); 4947 auth_skipped = 0; 4948 *offset = length; 4949 return (NULL); 4950 } 4951 /* if AUTH skipped, see if it verified... */ 4952 if (auth_skipped) { 4953 got_auth = 1; 4954 auth_skipped = 0; 4955 } 4956 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4957 /* 4958 * Restart the timer if we have 4959 * pending data 4960 */ 4961 struct sctp_tmit_chunk *chk; 4962 4963 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4964 if (chk) { 4965 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4966 stcb->sctp_ep, stcb, 4967 chk->whoTo); 4968 } 4969 } 4970 } 4971 break; 4972 case SCTP_COOKIE_ACK: 4973 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 4974 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4975 if (locked_tcb) { 4976 SCTP_TCB_UNLOCK(locked_tcb); 4977 } 4978 return (NULL); 4979 } 4980 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4981 /* We are not interested anymore */ 4982 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4983 ; 4984 } else if (stcb) { 4985 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4986 so = SCTP_INP_SO(inp); 4987 atomic_add_int(&stcb->asoc.refcnt, 1); 4988 SCTP_TCB_UNLOCK(stcb); 4989 SCTP_SOCKET_LOCK(so, 1); 4990 SCTP_TCB_LOCK(stcb); 4991 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4992 #endif 4993 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4994 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4995 SCTP_SOCKET_UNLOCK(so, 1); 4996 #endif 4997 *offset = length; 4998 return (NULL); 4999 } 5000 } 5001 /* He's alive so give him credit */ 5002 if ((stcb) && netp && *netp) { 5003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5004 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5005 stcb->asoc.overall_error_count, 5006 0, 5007 SCTP_FROM_SCTP_INPUT, 5008 __LINE__); 5009 } 5010 stcb->asoc.overall_error_count = 0; 5011 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 5012 } 5013 break; 5014 case SCTP_ECN_ECHO: 5015 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 5016 /* He's alive so give him credit */ 5017 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 5018 /* Its not ours */ 5019 if (locked_tcb) { 5020 SCTP_TCB_UNLOCK(locked_tcb); 5021 } 5022 *offset = length; 5023 return (NULL); 5024 } 5025 if (stcb) { 5026 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5027 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5028 stcb->asoc.overall_error_count, 5029 0, 5030 SCTP_FROM_SCTP_INPUT, 5031 __LINE__); 5032 } 5033 stcb->asoc.overall_error_count = 0; 5034 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 5035 stcb); 5036 } 5037 break; 5038 case SCTP_ECN_CWR: 5039 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 5040 /* He's alive so give him credit */ 5041 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 5042 /* Its not ours */ 5043 if (locked_tcb) { 5044 SCTP_TCB_UNLOCK(locked_tcb); 5045 } 5046 *offset = length; 5047 return (NULL); 5048 } 5049 if (stcb) { 5050 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5051 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5052 stcb->asoc.overall_error_count, 5053 0, 5054 SCTP_FROM_SCTP_INPUT, 5055 __LINE__); 5056 } 5057 stcb->asoc.overall_error_count = 0; 5058 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 5059 } 5060 break; 5061 case SCTP_SHUTDOWN_COMPLETE: 5062 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 5063 /* must be first and only chunk */ 5064 if ((num_chunks > 1) || 5065 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 5066 *offset = length; 5067 if (locked_tcb) { 5068 SCTP_TCB_UNLOCK(locked_tcb); 5069 } 5070 return (NULL); 5071 } 5072 if ((stcb) && netp && *netp) { 5073 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 5074 stcb, *netp); 5075 } 5076 *offset = length; 5077 return (NULL); 5078 break; 5079 case SCTP_ASCONF: 5080 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 5081 /* He's alive so give him credit */ 5082 if (stcb) { 5083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5084 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5085 stcb->asoc.overall_error_count, 5086 0, 5087 SCTP_FROM_SCTP_INPUT, 5088 __LINE__); 5089 } 5090 stcb->asoc.overall_error_count = 0; 5091 sctp_handle_asconf(m, *offset, 5092 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 5093 asconf_cnt++; 5094 } 5095 break; 5096 case SCTP_ASCONF_ACK: 5097 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 5098 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 5099 /* Its not ours */ 5100 if (locked_tcb) { 5101 SCTP_TCB_UNLOCK(locked_tcb); 5102 } 5103 *offset = length; 5104 return (NULL); 5105 } 5106 if ((stcb) && netp && *netp) { 5107 /* He's alive so give him credit */ 5108 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5109 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5110 stcb->asoc.overall_error_count, 5111 0, 5112 SCTP_FROM_SCTP_INPUT, 5113 __LINE__); 5114 } 5115 stcb->asoc.overall_error_count = 0; 5116 sctp_handle_asconf_ack(m, *offset, 5117 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 5118 if (abort_no_unlock) 5119 return (NULL); 5120 } 5121 break; 5122 case SCTP_FORWARD_CUM_TSN: 5123 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 5124 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 5125 /* Its not ours */ 5126 if (locked_tcb) { 5127 SCTP_TCB_UNLOCK(locked_tcb); 5128 } 5129 *offset = length; 5130 return (NULL); 5131 } 5132 /* He's alive so give him credit */ 5133 if (stcb) { 5134 int abort_flag = 0; 5135 5136 stcb->asoc.overall_error_count = 0; 5137 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5138 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5139 stcb->asoc.overall_error_count, 5140 0, 5141 SCTP_FROM_SCTP_INPUT, 5142 __LINE__); 5143 } 5144 *fwd_tsn_seen = 1; 5145 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5146 /* We are not interested anymore */ 5147 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5148 so = SCTP_INP_SO(inp); 5149 atomic_add_int(&stcb->asoc.refcnt, 1); 5150 SCTP_TCB_UNLOCK(stcb); 5151 SCTP_SOCKET_LOCK(so, 1); 5152 SCTP_TCB_LOCK(stcb); 5153 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5154 #endif 5155 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 5156 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5157 SCTP_SOCKET_UNLOCK(so, 1); 5158 #endif 5159 *offset = length; 5160 return (NULL); 5161 } 5162 sctp_handle_forward_tsn(stcb, 5163 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 5164 if (abort_flag) { 5165 *offset = length; 5166 return (NULL); 5167 } else { 5168 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5169 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5170 stcb->asoc.overall_error_count, 5171 0, 5172 SCTP_FROM_SCTP_INPUT, 5173 __LINE__); 5174 } 5175 stcb->asoc.overall_error_count = 0; 5176 } 5177 5178 } 5179 break; 5180 case SCTP_STREAM_RESET: 5181 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 5182 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 5183 /* Its not ours */ 5184 if (locked_tcb) { 5185 SCTP_TCB_UNLOCK(locked_tcb); 5186 } 5187 *offset = length; 5188 return (NULL); 5189 } 5190 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5191 /* We are not interested anymore */ 5192 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5193 so = SCTP_INP_SO(inp); 5194 atomic_add_int(&stcb->asoc.refcnt, 1); 5195 SCTP_TCB_UNLOCK(stcb); 5196 SCTP_SOCKET_LOCK(so, 1); 5197 SCTP_TCB_LOCK(stcb); 5198 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5199 #endif 5200 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 5201 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5202 SCTP_SOCKET_UNLOCK(so, 1); 5203 #endif 5204 *offset = length; 5205 return (NULL); 5206 } 5207 if (stcb->asoc.peer_supports_strreset == 0) { 5208 /* 5209 * hmm, peer should have announced this, but 5210 * we will turn it on since he is sending us 5211 * a stream reset. 5212 */ 5213 stcb->asoc.peer_supports_strreset = 1; 5214 } 5215 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 5216 /* stop processing */ 5217 *offset = length; 5218 return (NULL); 5219 } 5220 break; 5221 case SCTP_PACKET_DROPPED: 5222 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 5223 /* re-get it all please */ 5224 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 5225 /* Its not ours */ 5226 if (locked_tcb) { 5227 SCTP_TCB_UNLOCK(locked_tcb); 5228 } 5229 *offset = length; 5230 return (NULL); 5231 } 5232 if (ch && (stcb) && netp && (*netp)) { 5233 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 5234 stcb, *netp, 5235 min(chk_length, (sizeof(chunk_buf) - 4))); 5236 5237 } 5238 break; 5239 5240 case SCTP_AUTHENTICATION: 5241 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 5242 if (SCTP_BASE_SYSCTL(sctp_auth_disable)) 5243 goto unknown_chunk; 5244 5245 if (stcb == NULL) { 5246 /* save the first AUTH for later processing */ 5247 if (auth_skipped == 0) { 5248 auth_offset = *offset; 5249 auth_len = chk_length; 5250 auth_skipped = 1; 5251 } 5252 /* skip this chunk (temporarily) */ 5253 goto next_chunk; 5254 } 5255 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 5256 (chk_length > (sizeof(struct sctp_auth_chunk) + 5257 SCTP_AUTH_DIGEST_LEN_MAX))) { 5258 /* Its not ours */ 5259 if (locked_tcb) { 5260 SCTP_TCB_UNLOCK(locked_tcb); 5261 } 5262 *offset = length; 5263 return (NULL); 5264 } 5265 if (got_auth == 1) { 5266 /* skip this chunk... it's already auth'd */ 5267 goto next_chunk; 5268 } 5269 got_auth = 1; 5270 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 5271 m, *offset)) { 5272 /* auth HMAC failed so dump the packet */ 5273 *offset = length; 5274 return (stcb); 5275 } else { 5276 /* remaining chunks are HMAC checked */ 5277 stcb->asoc.authenticated = 1; 5278 } 5279 break; 5280 5281 default: 5282 unknown_chunk: 5283 /* it's an unknown chunk! */ 5284 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 5285 struct mbuf *mm; 5286 struct sctp_paramhdr *phd; 5287 5288 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 5289 0, M_DONTWAIT, 1, MT_DATA); 5290 if (mm) { 5291 phd = mtod(mm, struct sctp_paramhdr *); 5292 /* 5293 * We cheat and use param type since 5294 * we did not bother to define a 5295 * error cause struct. They are the 5296 * same basic format with different 5297 * names. 5298 */ 5299 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 5300 phd->param_length = htons(chk_length + sizeof(*phd)); 5301 SCTP_BUF_LEN(mm) = sizeof(*phd); 5302 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 5303 M_DONTWAIT); 5304 if (SCTP_BUF_NEXT(mm)) { 5305 #ifdef SCTP_MBUF_LOGGING 5306 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5307 struct mbuf *mat; 5308 5309 mat = SCTP_BUF_NEXT(mm); 5310 while (mat) { 5311 if (SCTP_BUF_IS_EXTENDED(mat)) { 5312 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 5313 } 5314 mat = SCTP_BUF_NEXT(mat); 5315 } 5316 } 5317 #endif 5318 sctp_queue_op_err(stcb, mm); 5319 } else { 5320 sctp_m_freem(mm); 5321 } 5322 } 5323 } 5324 if ((ch->chunk_type & 0x80) == 0) { 5325 /* discard this packet */ 5326 *offset = length; 5327 return (stcb); 5328 } /* else skip this bad chunk and continue... */ 5329 break; 5330 } /* switch (ch->chunk_type) */ 5331 5332 5333 next_chunk: 5334 /* get the next chunk */ 5335 *offset += SCTP_SIZE32(chk_length); 5336 if (*offset >= length) { 5337 /* no more data left in the mbuf chain */ 5338 break; 5339 } 5340 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 5341 sizeof(struct sctp_chunkhdr), chunk_buf); 5342 if (ch == NULL) { 5343 if (locked_tcb) { 5344 SCTP_TCB_UNLOCK(locked_tcb); 5345 } 5346 *offset = length; 5347 return (NULL); 5348 } 5349 } /* while */ 5350 5351 if (asconf_cnt > 0 && stcb != NULL) { 5352 sctp_send_asconf_ack(stcb); 5353 } 5354 return (stcb); 5355 } 5356 5357 5358 /* 5359 * Process the ECN bits we have something set so we must look to see if it is 5360 * ECN(0) or ECN(1) or CE 5361 */ 5362 static void 5363 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 5364 uint8_t ecn_bits) 5365 { 5366 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 5367 ; 5368 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 5369 /* 5370 * we only add to the nonce sum for ECT1, ECT0 does not 5371 * change the NS bit (that we have yet to find a way to send 5372 * it yet). 5373 */ 5374 5375 /* ECN Nonce stuff */ 5376 stcb->asoc.receiver_nonce_sum++; 5377 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 5378 5379 /* 5380 * Drag up the last_echo point if cumack is larger since we 5381 * don't want the point falling way behind by more than 5382 * 2^^31 and then having it be incorrect. 5383 */ 5384 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 5385 stcb->asoc.last_echo_tsn, MAX_TSN)) { 5386 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 5387 } 5388 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 5389 /* 5390 * Drag up the last_echo point if cumack is larger since we 5391 * don't want the point falling way behind by more than 5392 * 2^^31 and then having it be incorrect. 5393 */ 5394 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 5395 stcb->asoc.last_echo_tsn, MAX_TSN)) { 5396 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 5397 } 5398 } 5399 } 5400 5401 static void 5402 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 5403 uint32_t high_tsn, uint8_t ecn_bits) 5404 { 5405 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 5406 /* 5407 * we possibly must notify the sender that a congestion 5408 * window reduction is in order. We do this by adding a ECNE 5409 * chunk to the output chunk queue. The incoming CWR will 5410 * remove this chunk. 5411 */ 5412 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 5413 MAX_TSN)) { 5414 /* Yep, we need to add a ECNE */ 5415 sctp_send_ecn_echo(stcb, net, high_tsn); 5416 stcb->asoc.last_echo_tsn = high_tsn; 5417 } 5418 } 5419 } 5420 5421 #ifdef INVARIANTS 5422 #ifdef __GNUC__ 5423 __attribute__((noinline)) 5424 #endif 5425 void 5426 sctp_validate_no_locks(struct sctp_inpcb *inp) 5427 { 5428 struct sctp_tcb *lstcb; 5429 5430 LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) { 5431 if (mtx_owned(&lstcb->tcb_mtx)) { 5432 panic("Own lock on stcb at return from input"); 5433 } 5434 } 5435 if (mtx_owned(&inp->inp_create_mtx)) { 5436 panic("Own create lock on inp"); 5437 } 5438 if (mtx_owned(&inp->inp_mtx)) { 5439 panic("Own inp lock on inp"); 5440 } 5441 } 5442 5443 #endif 5444 5445 /* 5446 * common input chunk processing (v4 and v6) 5447 */ 5448 void 5449 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 5450 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 5451 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 5452 uint8_t ecn_bits, uint32_t vrf_id, uint16_t port) 5453 { 5454 /* 5455 * Control chunk processing 5456 */ 5457 uint32_t high_tsn; 5458 int fwd_tsn_seen = 0, data_processed = 0; 5459 struct mbuf *m = *mm; 5460 int abort_flag = 0; 5461 int un_sent; 5462 5463 SCTP_STAT_INCR(sctps_recvdatagrams); 5464 #ifdef SCTP_AUDITING_ENABLED 5465 sctp_audit_log(0xE0, 1); 5466 sctp_auditing(0, inp, stcb, net); 5467 #endif 5468 5469 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n", 5470 m, iphlen, offset, length, stcb); 5471 if (stcb) { 5472 /* always clear this before beginning a packet */ 5473 stcb->asoc.authenticated = 0; 5474 stcb->asoc.seen_a_sack_this_pkt = 0; 5475 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5476 stcb, stcb->asoc.state); 5477 5478 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5479 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5480 /*- 5481 * If we hit here, we had a ref count 5482 * up when the assoc was aborted and the 5483 * timer is clearing out the assoc, we should 5484 * NOT respond to any packet.. its OOTB. 5485 */ 5486 SCTP_TCB_UNLOCK(stcb); 5487 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5488 vrf_id, port); 5489 goto out_now; 5490 } 5491 } 5492 if (IS_SCTP_CONTROL(ch)) { 5493 /* process the control portion of the SCTP packet */ 5494 /* sa_ignore NO_NULL_CHK */ 5495 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 5496 inp, stcb, &net, &fwd_tsn_seen, vrf_id, port); 5497 if (stcb) { 5498 /* 5499 * This covers us if the cookie-echo was there and 5500 * it changes our INP. 5501 */ 5502 inp = stcb->sctp_ep; 5503 if ((net) && (port)) { 5504 if (net->port == 0) { 5505 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5506 } 5507 net->port = port; 5508 } 5509 } 5510 } else { 5511 /* 5512 * no control chunks, so pre-process DATA chunks (these 5513 * checks are taken care of by control processing) 5514 */ 5515 5516 /* 5517 * if DATA only packet, and auth is required, then punt... 5518 * can't have authenticated without any AUTH (control) 5519 * chunks 5520 */ 5521 if ((stcb != NULL) && 5522 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5523 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { 5524 /* "silently" ignore */ 5525 SCTP_STAT_INCR(sctps_recvauthmissing); 5526 SCTP_TCB_UNLOCK(stcb); 5527 goto out_now; 5528 } 5529 if (stcb == NULL) { 5530 /* out of the blue DATA chunk */ 5531 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5532 vrf_id, port); 5533 goto out_now; 5534 } 5535 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5536 /* v_tag mismatch! */ 5537 SCTP_STAT_INCR(sctps_badvtag); 5538 SCTP_TCB_UNLOCK(stcb); 5539 goto out_now; 5540 } 5541 } 5542 5543 if (stcb == NULL) { 5544 /* 5545 * no valid TCB for this packet, or we found it's a bad 5546 * packet while processing control, or we're done with this 5547 * packet (done or skip rest of data), so we drop it... 5548 */ 5549 goto out_now; 5550 } 5551 /* 5552 * DATA chunk processing 5553 */ 5554 /* plow through the data chunks while length > offset */ 5555 5556 /* 5557 * Rest should be DATA only. Check authentication state if AUTH for 5558 * DATA is required. 5559 */ 5560 if ((length > offset) && 5561 (stcb != NULL) && 5562 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5563 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) && 5564 !stcb->asoc.authenticated) { 5565 /* "silently" ignore */ 5566 SCTP_STAT_INCR(sctps_recvauthmissing); 5567 SCTPDBG(SCTP_DEBUG_AUTH1, 5568 "Data chunk requires AUTH, skipped\n"); 5569 goto trigger_send; 5570 } 5571 if (length > offset) { 5572 int retval; 5573 5574 /* 5575 * First check to make sure our state is correct. We would 5576 * not get here unless we really did have a tag, so we don't 5577 * abort if this happens, just dump the chunk silently. 5578 */ 5579 switch (SCTP_GET_STATE(&stcb->asoc)) { 5580 case SCTP_STATE_COOKIE_ECHOED: 5581 /* 5582 * we consider data with valid tags in this state 5583 * shows us the cookie-ack was lost. Imply it was 5584 * there. 5585 */ 5586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5587 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5588 stcb->asoc.overall_error_count, 5589 0, 5590 SCTP_FROM_SCTP_INPUT, 5591 __LINE__); 5592 } 5593 stcb->asoc.overall_error_count = 0; 5594 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5595 break; 5596 case SCTP_STATE_COOKIE_WAIT: 5597 /* 5598 * We consider OOTB any data sent during asoc setup. 5599 */ 5600 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5601 vrf_id, port); 5602 SCTP_TCB_UNLOCK(stcb); 5603 goto out_now; 5604 /* sa_ignore NOTREACHED */ 5605 break; 5606 case SCTP_STATE_EMPTY: /* should not happen */ 5607 case SCTP_STATE_INUSE: /* should not happen */ 5608 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5609 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5610 default: 5611 SCTP_TCB_UNLOCK(stcb); 5612 goto out_now; 5613 /* sa_ignore NOTREACHED */ 5614 break; 5615 case SCTP_STATE_OPEN: 5616 case SCTP_STATE_SHUTDOWN_SENT: 5617 break; 5618 } 5619 /* take care of ECN, part 1. */ 5620 if (stcb->asoc.ecn_allowed && 5621 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5622 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 5623 } 5624 /* plow through the data chunks while length > offset */ 5625 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5626 inp, stcb, net, &high_tsn); 5627 if (retval == 2) { 5628 /* 5629 * The association aborted, NO UNLOCK needed since 5630 * the association is destroyed. 5631 */ 5632 goto out_now; 5633 } 5634 data_processed = 1; 5635 if (retval == 0) { 5636 /* take care of ecn part 2. */ 5637 if (stcb->asoc.ecn_allowed && 5638 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5639 sctp_process_ecn_marked_b(stcb, net, high_tsn, 5640 ecn_bits); 5641 } 5642 } 5643 /* 5644 * Anything important needs to have been m_copy'ed in 5645 * process_data 5646 */ 5647 } 5648 if ((data_processed == 0) && (fwd_tsn_seen)) { 5649 int was_a_gap = 0; 5650 5651 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 5652 stcb->asoc.cumulative_tsn, MAX_TSN)) { 5653 /* there was a gap before this data was processed */ 5654 was_a_gap = 1; 5655 } 5656 stcb->asoc.send_sack = 1; 5657 sctp_sack_check(stcb, was_a_gap, &abort_flag); 5658 if (abort_flag) { 5659 /* Again, we aborted so NO UNLOCK needed */ 5660 goto out_now; 5661 } 5662 } else if (fwd_tsn_seen) { 5663 stcb->asoc.send_sack = 1; 5664 } 5665 /* trigger send of any chunks in queue... */ 5666 trigger_send: 5667 #ifdef SCTP_AUDITING_ENABLED 5668 sctp_audit_log(0xE0, 2); 5669 sctp_auditing(1, inp, stcb, net); 5670 #endif 5671 SCTPDBG(SCTP_DEBUG_INPUT1, 5672 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5673 stcb->asoc.peers_rwnd, 5674 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5675 stcb->asoc.total_flight); 5676 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5677 5678 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 5679 ((un_sent) && 5680 (stcb->asoc.peers_rwnd > 0 || 5681 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5682 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5683 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5684 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5685 } 5686 #ifdef SCTP_AUDITING_ENABLED 5687 sctp_audit_log(0xE0, 3); 5688 sctp_auditing(2, inp, stcb, net); 5689 #endif 5690 SCTP_TCB_UNLOCK(stcb); 5691 out_now: 5692 #ifdef INVARIANTS 5693 sctp_validate_no_locks(inp); 5694 #endif 5695 return; 5696 } 5697 5698 #if 0 5699 static void 5700 sctp_print_mbuf_chain(struct mbuf *m) 5701 { 5702 for (; m; m = SCTP_BUF_NEXT(m)) { 5703 printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m)); 5704 if (SCTP_BUF_IS_EXTENDED(m)) 5705 printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m)); 5706 } 5707 } 5708 5709 #endif 5710 5711 void 5712 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port) 5713 { 5714 #ifdef SCTP_MBUF_LOGGING 5715 struct mbuf *mat; 5716 5717 #endif 5718 struct mbuf *m; 5719 int iphlen; 5720 uint32_t vrf_id = 0; 5721 uint8_t ecn_bits; 5722 struct ip *ip; 5723 struct sctphdr *sh; 5724 struct sctp_inpcb *inp = NULL; 5725 struct sctp_nets *net; 5726 struct sctp_tcb *stcb = NULL; 5727 struct sctp_chunkhdr *ch; 5728 int refcount_up = 0; 5729 int length, mlen, offset; 5730 5731 #if !defined(SCTP_WITH_NO_CSUM) 5732 uint32_t check, calc_check; 5733 5734 #endif 5735 5736 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5737 SCTP_RELEASE_PKT(i_pak); 5738 return; 5739 } 5740 mlen = SCTP_HEADER_LEN(i_pak); 5741 iphlen = off; 5742 m = SCTP_HEADER_TO_CHAIN(i_pak); 5743 5744 net = NULL; 5745 SCTP_STAT_INCR(sctps_recvpackets); 5746 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5747 5748 5749 #ifdef SCTP_MBUF_LOGGING 5750 /* Log in any input mbufs */ 5751 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5752 mat = m; 5753 while (mat) { 5754 if (SCTP_BUF_IS_EXTENDED(mat)) { 5755 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5756 } 5757 mat = SCTP_BUF_NEXT(mat); 5758 } 5759 } 5760 #endif 5761 #ifdef SCTP_PACKET_LOGGING 5762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 5763 sctp_packet_log(m, mlen); 5764 #endif 5765 /* 5766 * Must take out the iphlen, since mlen expects this (only effect lb 5767 * case) 5768 */ 5769 mlen -= iphlen; 5770 5771 /* 5772 * Get IP, SCTP, and first chunk header together in first mbuf. 5773 */ 5774 ip = mtod(m, struct ip *); 5775 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5776 if (SCTP_BUF_LEN(m) < offset) { 5777 if ((m = m_pullup(m, offset)) == 0) { 5778 SCTP_STAT_INCR(sctps_hdrops); 5779 return; 5780 } 5781 ip = mtod(m, struct ip *); 5782 } 5783 /* validate mbuf chain length with IP payload length */ 5784 if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) { 5785 SCTP_STAT_INCR(sctps_hdrops); 5786 goto bad; 5787 } 5788 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5789 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5790 SCTPDBG(SCTP_DEBUG_INPUT1, 5791 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5792 5793 /* SCTP does not allow broadcasts or multicasts */ 5794 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5795 goto bad; 5796 } 5797 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5798 /* 5799 * We only look at broadcast if its a front state, All 5800 * others we will not have a tcb for anyway. 5801 */ 5802 goto bad; 5803 } 5804 /* validate SCTP checksum */ 5805 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 5806 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", 5807 m->m_pkthdr.len, 5808 if_name(m->m_pkthdr.rcvif), 5809 m->m_pkthdr.csum_flags); 5810 #if defined(SCTP_WITH_NO_CSUM) 5811 SCTP_STAT_INCR(sctps_recvnocrc); 5812 #else 5813 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { 5814 SCTP_STAT_INCR(sctps_recvhwcrc); 5815 goto sctp_skip_csum_4; 5816 } 5817 check = sh->checksum; /* save incoming checksum */ 5818 sh->checksum = 0; /* prepare for calc */ 5819 calc_check = sctp_calculate_cksum(m, iphlen); 5820 sh->checksum = check; 5821 SCTP_STAT_INCR(sctps_recvswcrc); 5822 if (calc_check != check) { 5823 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5824 calc_check, check, m, mlen, iphlen); 5825 5826 stcb = sctp_findassociation_addr(m, iphlen, 5827 offset - sizeof(*ch), 5828 sh, ch, &inp, &net, 5829 vrf_id); 5830 if ((net) && (port)) { 5831 if (net->port == 0) { 5832 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5833 } 5834 net->port = port; 5835 } 5836 if ((inp) && (stcb)) { 5837 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 5838 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5839 } else if ((inp != NULL) && (stcb == NULL)) { 5840 refcount_up = 1; 5841 } 5842 SCTP_STAT_INCR(sctps_badsum); 5843 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5844 goto bad; 5845 } 5846 sctp_skip_csum_4: 5847 #endif 5848 /* destination port of 0 is illegal, based on RFC2960. */ 5849 if (sh->dest_port == 0) { 5850 SCTP_STAT_INCR(sctps_hdrops); 5851 goto bad; 5852 } 5853 /* 5854 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 5855 * IP/SCTP/first chunk header... 5856 */ 5857 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 5858 sh, ch, &inp, &net, vrf_id); 5859 if ((net) && (port)) { 5860 if (net->port == 0) { 5861 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5862 } 5863 net->port = port; 5864 } 5865 /* inp's ref-count increased && stcb locked */ 5866 if (inp == NULL) { 5867 struct sctp_init_chunk *init_chk, chunk_buf; 5868 5869 SCTP_STAT_INCR(sctps_noport); 5870 #ifdef ICMP_BANDLIM 5871 /* 5872 * we use the bandwidth limiting to protect against sending 5873 * too many ABORTS all at once. In this case these count the 5874 * same as an ICMP message. 5875 */ 5876 if (badport_bandlim(0) < 0) 5877 goto bad; 5878 #endif /* ICMP_BANDLIM */ 5879 SCTPDBG(SCTP_DEBUG_INPUT1, 5880 "Sending a ABORT from packet entry!\n"); 5881 if (ch->chunk_type == SCTP_INITIATION) { 5882 /* 5883 * we do a trick here to get the INIT tag, dig in 5884 * and get the tag from the INIT and put it in the 5885 * common header. 5886 */ 5887 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 5888 iphlen + sizeof(*sh), sizeof(*init_chk), 5889 (uint8_t *) & chunk_buf); 5890 if (init_chk != NULL) 5891 sh->v_tag = init_chk->init.initiate_tag; 5892 } 5893 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5894 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port); 5895 goto bad; 5896 } 5897 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5898 goto bad; 5899 } 5900 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 5901 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port); 5902 goto bad; 5903 } else if (stcb == NULL) { 5904 refcount_up = 1; 5905 } 5906 #ifdef IPSEC 5907 /* 5908 * I very much doubt any of the IPSEC stuff will work but I have no 5909 * idea, so I will leave it in place. 5910 */ 5911 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5912 MODULE_GLOBAL(ipsec4stat).in_polvio++; 5913 SCTP_STAT_INCR(sctps_hdrops); 5914 goto bad; 5915 } 5916 #endif /* IPSEC */ 5917 5918 /* 5919 * common chunk processing 5920 */ 5921 length = ip->ip_len + iphlen; 5922 offset -= sizeof(struct sctp_chunkhdr); 5923 5924 ecn_bits = ip->ip_tos; 5925 5926 /* sa_ignore NO_NULL_CHK */ 5927 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 5928 inp, stcb, net, ecn_bits, vrf_id, port); 5929 /* inp's ref-count reduced && stcb unlocked */ 5930 if (m) { 5931 sctp_m_freem(m); 5932 } 5933 if ((inp) && (refcount_up)) { 5934 /* reduce ref-count */ 5935 SCTP_INP_DECR_REF(inp); 5936 } 5937 return; 5938 bad: 5939 if (stcb) { 5940 SCTP_TCB_UNLOCK(stcb); 5941 } 5942 if ((inp) && (refcount_up)) { 5943 /* reduce ref-count */ 5944 SCTP_INP_DECR_REF(inp); 5945 } 5946 if (m) { 5947 sctp_m_freem(m); 5948 } 5949 return; 5950 } 5951 void 5952 sctp_input(i_pak, off) 5953 struct mbuf *i_pak; 5954 int off; 5955 { 5956 sctp_input_with_port(i_pak, off, 0); 5957 } 5958