1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_bsd_addr.h> 48 #include <netinet/sctp_timer.h> 49 50 51 52 static void 53 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 54 { 55 struct sctp_nets *net; 56 57 /* 58 * This now not only stops all cookie timers it also stops any INIT 59 * timers as well. This will make sure that the timers are stopped 60 * in all collision cases. 61 */ 62 SCTP_TCB_LOCK_ASSERT(stcb); 63 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 64 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 65 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 66 stcb->sctp_ep, 67 stcb, 68 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 69 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 70 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 71 stcb->sctp_ep, 72 stcb, 73 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 74 } 75 } 76 } 77 78 /* INIT handler */ 79 static void 80 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 81 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 82 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 83 { 84 struct sctp_init *init; 85 struct mbuf *op_err; 86 uint32_t init_limit; 87 88 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 89 stcb); 90 if (stcb == NULL) { 91 SCTP_INP_RLOCK(inp); 92 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 93 goto outnow; 94 } 95 } 96 op_err = NULL; 97 init = &cp->init; 98 /* First are we accepting? */ 99 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 100 SCTPDBG(SCTP_DEBUG_INPUT2, 101 "sctp_handle_init: Abort, so_qlimit:%d\n", 102 inp->sctp_socket->so_qlimit); 103 /* 104 * FIX ME ?? What about TCP model and we have a 105 * match/restart case? 106 */ 107 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 108 vrf_id); 109 if (stcb) 110 *abort_no_unlock = 1; 111 goto outnow; 112 } 113 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 114 /* Invalid length */ 115 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 116 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 117 vrf_id); 118 if (stcb) 119 *abort_no_unlock = 1; 120 goto outnow; 121 } 122 /* validate parameters */ 123 if (init->initiate_tag == 0) { 124 /* protocol error... send abort */ 125 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 126 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 127 vrf_id); 128 if (stcb) 129 *abort_no_unlock = 1; 130 goto outnow; 131 } 132 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 133 /* invalid parameter... send abort */ 134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 135 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 136 vrf_id); 137 if (stcb) 138 *abort_no_unlock = 1; 139 goto outnow; 140 } 141 if (init->num_inbound_streams == 0) { 142 /* protocol error... send abort */ 143 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 144 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 145 vrf_id); 146 if (stcb) 147 *abort_no_unlock = 1; 148 goto outnow; 149 } 150 if (init->num_outbound_streams == 0) { 151 /* protocol error... send abort */ 152 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 153 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 154 vrf_id); 155 if (stcb) 156 *abort_no_unlock = 1; 157 goto outnow; 158 } 159 init_limit = offset + ntohs(cp->ch.chunk_length); 160 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 161 init_limit)) { 162 /* auth parameter(s) error... send abort */ 163 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id); 164 if (stcb) 165 *abort_no_unlock = 1; 166 goto outnow; 167 } 168 /* send an INIT-ACK w/cookie */ 169 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 170 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, 171 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 172 outnow: 173 if (stcb == NULL) { 174 SCTP_INP_RUNLOCK(inp); 175 } 176 } 177 178 /* 179 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 180 */ 181 182 int 183 sctp_is_there_unsent_data(struct sctp_tcb *stcb) 184 { 185 int unsent_data = 0; 186 struct sctp_stream_queue_pending *sp; 187 struct sctp_stream_out *strq; 188 struct sctp_association *asoc; 189 190 /* 191 * This function returns the number of streams that have true unsent 192 * data on them. Note that as it looks through it will clean up any 193 * places that have old data that has been sent but left at top of 194 * stream queue. 195 */ 196 asoc = &stcb->asoc; 197 SCTP_TCB_SEND_LOCK(stcb); 198 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 199 /* Check to see if some data queued */ 200 TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) { 201 is_there_another: 202 /* sa_ignore FREED_MEMORY */ 203 sp = TAILQ_FIRST(&strq->outqueue); 204 if (sp == NULL) { 205 continue; 206 } 207 if ((sp->msg_is_complete) && 208 (sp->length == 0) && 209 (sp->sender_all_done)) { 210 /* 211 * We are doing differed cleanup. Last time 212 * through when we took all the data the 213 * sender_all_done was not set. 214 */ 215 if (sp->put_last_out == 0) { 216 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 217 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 218 sp->sender_all_done, 219 sp->length, 220 sp->msg_is_complete, 221 sp->put_last_out); 222 } 223 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 224 TAILQ_REMOVE(&strq->outqueue, sp, next); 225 sctp_free_remote_addr(sp->net); 226 if (sp->data) { 227 sctp_m_freem(sp->data); 228 sp->data = NULL; 229 } 230 sctp_free_a_strmoq(stcb, sp); 231 goto is_there_another; 232 } else { 233 unsent_data++; 234 continue; 235 } 236 } 237 } 238 SCTP_TCB_SEND_UNLOCK(stcb); 239 return (unsent_data); 240 } 241 242 static int 243 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 244 struct sctp_nets *net) 245 { 246 struct sctp_init *init; 247 struct sctp_association *asoc; 248 struct sctp_nets *lnet; 249 unsigned int i; 250 251 init = &cp->init; 252 asoc = &stcb->asoc; 253 /* save off parameters */ 254 asoc->peer_vtag = ntohl(init->initiate_tag); 255 asoc->peers_rwnd = ntohl(init->a_rwnd); 256 if (TAILQ_FIRST(&asoc->nets)) { 257 /* update any ssthresh's that may have a default */ 258 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 259 lnet->ssthresh = asoc->peers_rwnd; 260 261 if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 262 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 263 } 264 } 265 } 266 SCTP_TCB_SEND_LOCK(stcb); 267 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 268 unsigned int newcnt; 269 struct sctp_stream_out *outs; 270 struct sctp_stream_queue_pending *sp; 271 272 /* cut back on number of streams */ 273 newcnt = ntohs(init->num_inbound_streams); 274 /* This if is probably not needed but I am cautious */ 275 if (asoc->strmout) { 276 /* First make sure no data chunks are trapped */ 277 for (i = newcnt; i < asoc->pre_open_streams; i++) { 278 outs = &asoc->strmout[i]; 279 sp = TAILQ_FIRST(&outs->outqueue); 280 while (sp) { 281 TAILQ_REMOVE(&outs->outqueue, sp, 282 next); 283 asoc->stream_queue_cnt--; 284 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 285 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 286 sp, SCTP_SO_NOT_LOCKED); 287 if (sp->data) { 288 sctp_m_freem(sp->data); 289 sp->data = NULL; 290 } 291 sctp_free_remote_addr(sp->net); 292 sp->net = NULL; 293 /* Free the chunk */ 294 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 295 sp, stcb); 296 297 sctp_free_a_strmoq(stcb, sp); 298 /* sa_ignore FREED_MEMORY */ 299 sp = TAILQ_FIRST(&outs->outqueue); 300 } 301 } 302 } 303 /* cut back the count and abandon the upper streams */ 304 asoc->pre_open_streams = newcnt; 305 } 306 SCTP_TCB_SEND_UNLOCK(stcb); 307 asoc->streamoutcnt = asoc->pre_open_streams; 308 /* init tsn's */ 309 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 310 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 311 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 312 } 313 /* This is the next one we expect */ 314 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 315 316 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 317 asoc->cumulative_tsn = asoc->asconf_seq_in; 318 asoc->last_echo_tsn = asoc->asconf_seq_in; 319 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 320 /* open the requested streams */ 321 322 if (asoc->strmin != NULL) { 323 /* Free the old ones */ 324 struct sctp_queued_to_read *ctl; 325 326 for (i = 0; i < asoc->streamincnt; i++) { 327 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 328 while (ctl) { 329 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 330 sctp_free_remote_addr(ctl->whoFrom); 331 ctl->whoFrom = NULL; 332 sctp_m_freem(ctl->data); 333 ctl->data = NULL; 334 sctp_free_a_readq(stcb, ctl); 335 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 336 } 337 } 338 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 339 } 340 asoc->streamincnt = ntohs(init->num_outbound_streams); 341 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 342 asoc->streamincnt = MAX_SCTP_STREAMS; 343 } 344 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 345 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 346 if (asoc->strmin == NULL) { 347 /* we didn't get memory for the streams! */ 348 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 349 return (-1); 350 } 351 for (i = 0; i < asoc->streamincnt; i++) { 352 asoc->strmin[i].stream_no = i; 353 asoc->strmin[i].last_sequence_delivered = 0xffff; 354 /* 355 * U-stream ranges will be set when the cookie is unpacked. 356 * Or for the INIT sender they are un set (if pr-sctp not 357 * supported) when the INIT-ACK arrives. 358 */ 359 TAILQ_INIT(&asoc->strmin[i].inqueue); 360 asoc->strmin[i].delivery_started = 0; 361 } 362 /* 363 * load_address_from_init will put the addresses into the 364 * association when the COOKIE is processed or the INIT-ACK is 365 * processed. Both types of COOKIE's existing and new call this 366 * routine. It will remove addresses that are no longer in the 367 * association (for the restarting case where addresses are 368 * removed). Up front when the INIT arrives we will discard it if it 369 * is a restart and new addresses have been added. 370 */ 371 /* sa_ignore MEMLEAK */ 372 return (0); 373 } 374 375 /* 376 * INIT-ACK message processing/consumption returns value < 0 on error 377 */ 378 static int 379 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 380 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 381 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 382 { 383 struct sctp_association *asoc; 384 struct mbuf *op_err; 385 int retval, abort_flag; 386 uint32_t initack_limit; 387 388 /* First verify that we have no illegal param's */ 389 abort_flag = 0; 390 op_err = NULL; 391 392 op_err = sctp_arethere_unrecognized_parameters(m, 393 (offset + sizeof(struct sctp_init_chunk)), 394 &abort_flag, (struct sctp_chunkhdr *)cp); 395 if (abort_flag) { 396 /* Send an abort and notify peer */ 397 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED); 398 *abort_no_unlock = 1; 399 return (-1); 400 } 401 asoc = &stcb->asoc; 402 /* process the peer's parameters in the INIT-ACK */ 403 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 404 if (retval < 0) { 405 return (retval); 406 } 407 initack_limit = offset + ntohs(cp->ch.chunk_length); 408 /* load all addresses */ 409 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 410 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 411 NULL))) { 412 /* Huh, we should abort */ 413 SCTPDBG(SCTP_DEBUG_INPUT1, 414 "Load addresses from INIT causes an abort %d\n", 415 retval); 416 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 417 NULL, 0); 418 *abort_no_unlock = 1; 419 return (-1); 420 } 421 /* if the peer doesn't support asconf, flush the asconf queue */ 422 if (asoc->peer_supports_asconf == 0) { 423 struct sctp_asconf_addr *aparam; 424 425 while (!TAILQ_EMPTY(&asoc->asconf_queue)) { 426 /* sa_ignore FREED_MEMORY */ 427 aparam = TAILQ_FIRST(&asoc->asconf_queue); 428 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 429 SCTP_FREE(aparam, SCTP_M_ASC_ADDR); 430 } 431 } 432 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 433 stcb->asoc.local_hmacs); 434 if (op_err) { 435 sctp_queue_op_err(stcb, op_err); 436 /* queuing will steal away the mbuf chain to the out queue */ 437 op_err = NULL; 438 } 439 /* extract the cookie and queue it to "echo" it back... */ 440 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 441 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 442 stcb->asoc.overall_error_count, 443 0, 444 SCTP_FROM_SCTP_INPUT, 445 __LINE__); 446 } 447 stcb->asoc.overall_error_count = 0; 448 net->error_count = 0; 449 450 /* 451 * Cancel the INIT timer, We do this first before queueing the 452 * cookie. We always cancel at the primary to assue that we are 453 * canceling the timer started by the INIT which always goes to the 454 * primary. 455 */ 456 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 457 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 458 459 /* calculate the RTO */ 460 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy); 461 462 retval = sctp_send_cookie_echo(m, offset, stcb, net); 463 if (retval < 0) { 464 /* 465 * No cookie, we probably should send a op error. But in any 466 * case if there is no cookie in the INIT-ACK, we can 467 * abandon the peer, its broke. 468 */ 469 if (retval == -3) { 470 /* We abort with an error of missing mandatory param */ 471 op_err = 472 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 473 if (op_err) { 474 /* 475 * Expand beyond to include the mandatory 476 * param cookie 477 */ 478 struct sctp_inv_mandatory_param *mp; 479 480 SCTP_BUF_LEN(op_err) = 481 sizeof(struct sctp_inv_mandatory_param); 482 mp = mtod(op_err, 483 struct sctp_inv_mandatory_param *); 484 /* Subtract the reserved param */ 485 mp->length = 486 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 487 mp->num_param = htonl(1); 488 mp->param = htons(SCTP_STATE_COOKIE); 489 mp->resv = 0; 490 } 491 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 492 sh, op_err, 0); 493 *abort_no_unlock = 1; 494 } 495 return (retval); 496 } 497 return (0); 498 } 499 500 static void 501 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 502 struct sctp_tcb *stcb, struct sctp_nets *net) 503 { 504 struct sockaddr_storage store; 505 struct sockaddr_in *sin; 506 struct sockaddr_in6 *sin6; 507 struct sctp_nets *r_net; 508 struct timeval tv; 509 int req_prim = 0; 510 511 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 512 /* Invalid length */ 513 return; 514 } 515 sin = (struct sockaddr_in *)&store; 516 sin6 = (struct sockaddr_in6 *)&store; 517 518 memset(&store, 0, sizeof(store)); 519 if (cp->heartbeat.hb_info.addr_family == AF_INET && 520 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 521 sin->sin_family = cp->heartbeat.hb_info.addr_family; 522 sin->sin_len = cp->heartbeat.hb_info.addr_len; 523 sin->sin_port = stcb->rport; 524 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 525 sizeof(sin->sin_addr)); 526 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 527 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 528 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 529 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 530 sin6->sin6_port = stcb->rport; 531 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 532 sizeof(sin6->sin6_addr)); 533 } else { 534 return; 535 } 536 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 537 if (r_net == NULL) { 538 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 539 return; 540 } 541 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 542 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 543 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 544 /* 545 * If the its a HB and it's random value is correct when can 546 * confirm the destination. 547 */ 548 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 549 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 550 stcb->asoc.primary_destination = r_net; 551 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 552 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 553 r_net = TAILQ_FIRST(&stcb->asoc.nets); 554 if (r_net != stcb->asoc.primary_destination) { 555 /* 556 * first one on the list is NOT the primary 557 * sctp_cmpaddr() is much more efficent if 558 * the primary is the first on the list, 559 * make it so. 560 */ 561 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 562 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 563 } 564 req_prim = 1; 565 } 566 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 567 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 568 } 569 r_net->error_count = 0; 570 r_net->hb_responded = 1; 571 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 572 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 573 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 574 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 575 r_net->dest_state |= SCTP_ADDR_REACHABLE; 576 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 577 SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED); 578 /* now was it the primary? if so restore */ 579 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 580 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 581 } 582 } 583 /* 584 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state, 585 * set the destination to active state and set the cwnd to one or 586 * two MTU's based on whether PF1 or PF2 is being used. If a T3 587 * timer is running, for the destination, stop the timer because a 588 * PF-heartbeat was received. 589 */ 590 if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == 591 SCTP_ADDR_PF) { 592 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 593 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 594 stcb, net, 595 SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 596 } 597 net->dest_state &= ~SCTP_ADDR_PF; 598 net->cwnd = net->mtu * sctp_cmt_pf; 599 SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n", 600 net, net->cwnd); 601 } 602 /* Now lets do a RTO with this */ 603 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy); 604 /* Mobility adaptation */ 605 if (req_prim) { 606 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 607 SCTP_MOBILITY_BASE) || 608 sctp_is_mobility_feature_on(stcb->sctp_ep, 609 SCTP_MOBILITY_FASTHANDOFF)) && 610 sctp_is_mobility_feature_on(stcb->sctp_ep, 611 SCTP_MOBILITY_PRIM_DELETED)) { 612 613 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 614 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 615 SCTP_MOBILITY_FASTHANDOFF)) { 616 sctp_assoc_immediate_retrans(stcb, 617 stcb->asoc.primary_destination); 618 } 619 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 620 SCTP_MOBILITY_BASE)) { 621 sctp_move_chunks_from_deleted_prim(stcb, 622 stcb->asoc.primary_destination); 623 } 624 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 625 stcb->asoc.deleted_primary); 626 } 627 } 628 } 629 630 static void 631 sctp_handle_abort(struct sctp_abort_chunk *cp, 632 struct sctp_tcb *stcb, struct sctp_nets *net) 633 { 634 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 635 struct socket *so; 636 637 #endif 638 639 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 640 if (stcb == NULL) 641 return; 642 643 /* stop any receive timers */ 644 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 645 /* notify user of the abort and clean up... */ 646 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 647 /* free the tcb */ 648 #if defined(SCTP_PANIC_ON_ABORT) 649 printf("stcb:%p state:%d rport:%d net:%p\n", 650 stcb, stcb->asoc.state, stcb->rport, net); 651 if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 652 panic("Received an ABORT"); 653 } else { 654 printf("No panic its in state %x closed\n", stcb->asoc.state); 655 } 656 #endif 657 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 658 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 659 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 660 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 661 } 662 #ifdef SCTP_ASOCLOG_OF_TSNS 663 sctp_print_out_track_log(stcb); 664 #endif 665 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 666 so = SCTP_INP_SO(stcb->sctp_ep); 667 atomic_add_int(&stcb->asoc.refcnt, 1); 668 SCTP_TCB_UNLOCK(stcb); 669 SCTP_SOCKET_LOCK(so, 1); 670 SCTP_TCB_LOCK(stcb); 671 atomic_subtract_int(&stcb->asoc.refcnt, 1); 672 #endif 673 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 674 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 675 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 676 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 677 SCTP_SOCKET_UNLOCK(so, 1); 678 #endif 679 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 680 } 681 682 static void 683 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 684 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 685 { 686 struct sctp_association *asoc; 687 int some_on_streamwheel; 688 689 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 690 struct socket *so; 691 692 #endif 693 694 SCTPDBG(SCTP_DEBUG_INPUT2, 695 "sctp_handle_shutdown: handling SHUTDOWN\n"); 696 if (stcb == NULL) 697 return; 698 asoc = &stcb->asoc; 699 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 700 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 701 return; 702 } 703 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 704 /* Shutdown NOT the expected size */ 705 return; 706 } else { 707 sctp_update_acked(stcb, cp, net, abort_flag); 708 } 709 if (asoc->control_pdapi) { 710 /* 711 * With a normal shutdown we assume the end of last record. 712 */ 713 SCTP_INP_READ_LOCK(stcb->sctp_ep); 714 asoc->control_pdapi->end_added = 1; 715 asoc->control_pdapi->pdapi_aborted = 1; 716 asoc->control_pdapi = NULL; 717 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 718 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 719 so = SCTP_INP_SO(stcb->sctp_ep); 720 atomic_add_int(&stcb->asoc.refcnt, 1); 721 SCTP_TCB_UNLOCK(stcb); 722 SCTP_SOCKET_LOCK(so, 1); 723 SCTP_TCB_LOCK(stcb); 724 atomic_subtract_int(&stcb->asoc.refcnt, 1); 725 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 726 /* assoc was freed while we were unlocked */ 727 SCTP_SOCKET_UNLOCK(so, 1); 728 return; 729 } 730 #endif 731 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 732 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 733 SCTP_SOCKET_UNLOCK(so, 1); 734 #endif 735 } 736 /* goto SHUTDOWN_RECEIVED state to block new requests */ 737 if (stcb->sctp_socket) { 738 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 739 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 740 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 741 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 742 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 743 /* 744 * notify upper layer that peer has initiated a 745 * shutdown 746 */ 747 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 748 749 /* reset time */ 750 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 751 } 752 } 753 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 754 /* 755 * stop the shutdown timer, since we WILL move to 756 * SHUTDOWN-ACK-SENT. 757 */ 758 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 759 } 760 /* Now is there unsent data on a stream somewhere? */ 761 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 762 763 if (!TAILQ_EMPTY(&asoc->send_queue) || 764 !TAILQ_EMPTY(&asoc->sent_queue) || 765 some_on_streamwheel) { 766 /* By returning we will push more data out */ 767 return; 768 } else { 769 /* no outstanding data to send, so move on... */ 770 /* send SHUTDOWN-ACK */ 771 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 772 /* move to SHUTDOWN-ACK-SENT state */ 773 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 774 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 775 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 776 } 777 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 778 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 779 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 780 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 781 /* start SHUTDOWN timer */ 782 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 783 stcb, net); 784 } 785 } 786 787 static void 788 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 789 struct sctp_tcb *stcb, struct sctp_nets *net) 790 { 791 struct sctp_association *asoc; 792 793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 794 struct socket *so; 795 796 so = SCTP_INP_SO(stcb->sctp_ep); 797 #endif 798 SCTPDBG(SCTP_DEBUG_INPUT2, 799 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 800 if (stcb == NULL) 801 return; 802 803 asoc = &stcb->asoc; 804 /* process according to association state */ 805 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 806 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 807 /* unexpected SHUTDOWN-ACK... so ignore... */ 808 SCTP_TCB_UNLOCK(stcb); 809 return; 810 } 811 if (asoc->control_pdapi) { 812 /* 813 * With a normal shutdown we assume the end of last record. 814 */ 815 SCTP_INP_READ_LOCK(stcb->sctp_ep); 816 asoc->control_pdapi->end_added = 1; 817 asoc->control_pdapi->pdapi_aborted = 1; 818 asoc->control_pdapi = NULL; 819 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 820 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 821 atomic_add_int(&stcb->asoc.refcnt, 1); 822 SCTP_TCB_UNLOCK(stcb); 823 SCTP_SOCKET_LOCK(so, 1); 824 SCTP_TCB_LOCK(stcb); 825 atomic_subtract_int(&stcb->asoc.refcnt, 1); 826 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 827 /* assoc was freed while we were unlocked */ 828 SCTP_SOCKET_UNLOCK(so, 1); 829 return; 830 } 831 #endif 832 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 833 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 834 SCTP_SOCKET_UNLOCK(so, 1); 835 #endif 836 } 837 /* are the queues empty? */ 838 if (!TAILQ_EMPTY(&asoc->send_queue) || 839 !TAILQ_EMPTY(&asoc->sent_queue) || 840 !TAILQ_EMPTY(&asoc->out_wheel)) { 841 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 842 } 843 /* stop the timer */ 844 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 845 /* send SHUTDOWN-COMPLETE */ 846 sctp_send_shutdown_complete(stcb, net); 847 /* notify upper layer protocol */ 848 if (stcb->sctp_socket) { 849 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 850 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 851 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 852 /* Set the connected flag to disconnected */ 853 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 854 } 855 } 856 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 857 /* free the TCB but first save off the ep */ 858 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 859 atomic_add_int(&stcb->asoc.refcnt, 1); 860 SCTP_TCB_UNLOCK(stcb); 861 SCTP_SOCKET_LOCK(so, 1); 862 SCTP_TCB_LOCK(stcb); 863 atomic_subtract_int(&stcb->asoc.refcnt, 1); 864 #endif 865 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 866 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 867 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 868 SCTP_SOCKET_UNLOCK(so, 1); 869 #endif 870 } 871 872 /* 873 * Skip past the param header and then we will find the chunk that caused the 874 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 875 * our peer must be broken. 876 */ 877 static void 878 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 879 struct sctp_nets *net) 880 { 881 struct sctp_chunkhdr *chk; 882 883 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 884 switch (chk->chunk_type) { 885 case SCTP_ASCONF_ACK: 886 case SCTP_ASCONF: 887 sctp_asconf_cleanup(stcb, net); 888 break; 889 case SCTP_FORWARD_CUM_TSN: 890 stcb->asoc.peer_supports_prsctp = 0; 891 break; 892 default: 893 SCTPDBG(SCTP_DEBUG_INPUT2, 894 "Peer does not support chunk type %d(%x)??\n", 895 chk->chunk_type, (uint32_t) chk->chunk_type); 896 break; 897 } 898 } 899 900 /* 901 * Skip past the param header and then we will find the param that caused the 902 * problem. There are a number of param's in a ASCONF OR the prsctp param 903 * these will turn of specific features. 904 */ 905 static void 906 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 907 { 908 struct sctp_paramhdr *pbad; 909 910 pbad = phdr + 1; 911 switch (ntohs(pbad->param_type)) { 912 /* pr-sctp draft */ 913 case SCTP_PRSCTP_SUPPORTED: 914 stcb->asoc.peer_supports_prsctp = 0; 915 break; 916 case SCTP_SUPPORTED_CHUNK_EXT: 917 break; 918 /* draft-ietf-tsvwg-addip-sctp */ 919 case SCTP_ECN_NONCE_SUPPORTED: 920 stcb->asoc.peer_supports_ecn_nonce = 0; 921 stcb->asoc.ecn_nonce_allowed = 0; 922 stcb->asoc.ecn_allowed = 0; 923 break; 924 case SCTP_ADD_IP_ADDRESS: 925 case SCTP_DEL_IP_ADDRESS: 926 case SCTP_SET_PRIM_ADDR: 927 stcb->asoc.peer_supports_asconf = 0; 928 break; 929 case SCTP_SUCCESS_REPORT: 930 case SCTP_ERROR_CAUSE_IND: 931 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 932 SCTPDBG(SCTP_DEBUG_INPUT2, 933 "Turning off ASCONF to this strange peer\n"); 934 stcb->asoc.peer_supports_asconf = 0; 935 break; 936 default: 937 SCTPDBG(SCTP_DEBUG_INPUT2, 938 "Peer does not support param type %d(%x)??\n", 939 pbad->param_type, (uint32_t) pbad->param_type); 940 break; 941 } 942 } 943 944 static int 945 sctp_handle_error(struct sctp_chunkhdr *ch, 946 struct sctp_tcb *stcb, struct sctp_nets *net) 947 { 948 int chklen; 949 struct sctp_paramhdr *phdr; 950 uint16_t error_type; 951 uint16_t error_len; 952 struct sctp_association *asoc; 953 int adjust; 954 955 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 956 struct socket *so; 957 958 #endif 959 960 /* parse through all of the errors and process */ 961 asoc = &stcb->asoc; 962 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 963 sizeof(struct sctp_chunkhdr)); 964 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 965 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 966 /* Process an Error Cause */ 967 error_type = ntohs(phdr->param_type); 968 error_len = ntohs(phdr->param_length); 969 if ((error_len > chklen) || (error_len == 0)) { 970 /* invalid param length for this param */ 971 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 972 chklen, error_len); 973 return (0); 974 } 975 switch (error_type) { 976 case SCTP_CAUSE_INVALID_STREAM: 977 case SCTP_CAUSE_MISSING_PARAM: 978 case SCTP_CAUSE_INVALID_PARAM: 979 case SCTP_CAUSE_NO_USER_DATA: 980 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 981 error_type); 982 break; 983 case SCTP_CAUSE_STALE_COOKIE: 984 /* 985 * We only act if we have echoed a cookie and are 986 * waiting. 987 */ 988 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 989 int *p; 990 991 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 992 /* Save the time doubled */ 993 asoc->cookie_preserve_req = ntohl(*p) << 1; 994 asoc->stale_cookie_count++; 995 if (asoc->stale_cookie_count > 996 asoc->max_init_times) { 997 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 998 /* now free the asoc */ 999 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1000 so = SCTP_INP_SO(stcb->sctp_ep); 1001 atomic_add_int(&stcb->asoc.refcnt, 1); 1002 SCTP_TCB_UNLOCK(stcb); 1003 SCTP_SOCKET_LOCK(so, 1); 1004 SCTP_TCB_LOCK(stcb); 1005 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1006 #endif 1007 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1008 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1009 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1010 SCTP_SOCKET_UNLOCK(so, 1); 1011 #endif 1012 return (-1); 1013 } 1014 /* blast back to INIT state */ 1015 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1016 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1017 1018 sctp_stop_all_cookie_timers(stcb); 1019 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1020 } 1021 break; 1022 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1023 /* 1024 * Nothing we can do here, we don't do hostname 1025 * addresses so if the peer does not like my IPv6 1026 * (or IPv4 for that matter) it does not matter. If 1027 * they don't support that type of address, they can 1028 * NOT possibly get that packet type... i.e. with no 1029 * IPv6 you can't recieve a IPv6 packet. so we can 1030 * safely ignore this one. If we ever added support 1031 * for HOSTNAME Addresses, then we would need to do 1032 * something here. 1033 */ 1034 break; 1035 case SCTP_CAUSE_UNRECOG_CHUNK: 1036 sctp_process_unrecog_chunk(stcb, phdr, net); 1037 break; 1038 case SCTP_CAUSE_UNRECOG_PARAM: 1039 sctp_process_unrecog_param(stcb, phdr); 1040 break; 1041 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1042 /* 1043 * We ignore this since the timer will drive out a 1044 * new cookie anyway and there timer will drive us 1045 * to send a SHUTDOWN_COMPLETE. We can't send one 1046 * here since we don't have their tag. 1047 */ 1048 break; 1049 case SCTP_CAUSE_DELETING_LAST_ADDR: 1050 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1051 case SCTP_CAUSE_DELETING_SRC_ADDR: 1052 /* 1053 * We should NOT get these here, but in a 1054 * ASCONF-ACK. 1055 */ 1056 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1057 error_type); 1058 break; 1059 case SCTP_CAUSE_OUT_OF_RESC: 1060 /* 1061 * And what, pray tell do we do with the fact that 1062 * the peer is out of resources? Not really sure we 1063 * could do anything but abort. I suspect this 1064 * should have came WITH an abort instead of in a 1065 * OP-ERROR. 1066 */ 1067 break; 1068 default: 1069 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1070 error_type); 1071 break; 1072 } 1073 adjust = SCTP_SIZE32(error_len); 1074 chklen -= adjust; 1075 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1076 } 1077 return (0); 1078 } 1079 1080 static int 1081 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1082 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1083 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 1084 { 1085 struct sctp_init_ack *init_ack; 1086 struct mbuf *op_err; 1087 1088 SCTPDBG(SCTP_DEBUG_INPUT2, 1089 "sctp_handle_init_ack: handling INIT-ACK\n"); 1090 1091 if (stcb == NULL) { 1092 SCTPDBG(SCTP_DEBUG_INPUT2, 1093 "sctp_handle_init_ack: TCB is null\n"); 1094 return (-1); 1095 } 1096 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1097 /* Invalid length */ 1098 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1099 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1100 op_err, 0); 1101 *abort_no_unlock = 1; 1102 return (-1); 1103 } 1104 init_ack = &cp->init; 1105 /* validate parameters */ 1106 if (init_ack->initiate_tag == 0) { 1107 /* protocol error... send an abort */ 1108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1109 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1110 op_err, 0); 1111 *abort_no_unlock = 1; 1112 return (-1); 1113 } 1114 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1115 /* protocol error... send an abort */ 1116 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1117 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1118 op_err, 0); 1119 *abort_no_unlock = 1; 1120 return (-1); 1121 } 1122 if (init_ack->num_inbound_streams == 0) { 1123 /* protocol error... send an abort */ 1124 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1125 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1126 op_err, 0); 1127 *abort_no_unlock = 1; 1128 return (-1); 1129 } 1130 if (init_ack->num_outbound_streams == 0) { 1131 /* protocol error... send an abort */ 1132 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1133 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1134 op_err, 0); 1135 *abort_no_unlock = 1; 1136 return (-1); 1137 } 1138 /* process according to association state... */ 1139 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1140 case SCTP_STATE_COOKIE_WAIT: 1141 /* this is the expected state for this chunk */ 1142 /* process the INIT-ACK parameters */ 1143 if (stcb->asoc.primary_destination->dest_state & 1144 SCTP_ADDR_UNCONFIRMED) { 1145 /* 1146 * The primary is where we sent the INIT, we can 1147 * always consider it confirmed when the INIT-ACK is 1148 * returned. Do this before we load addresses 1149 * though. 1150 */ 1151 stcb->asoc.primary_destination->dest_state &= 1152 ~SCTP_ADDR_UNCONFIRMED; 1153 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1154 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1155 } 1156 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1157 net, abort_no_unlock, vrf_id) < 0) { 1158 /* error in parsing parameters */ 1159 return (-1); 1160 } 1161 /* update our state */ 1162 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1163 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1164 1165 /* reset the RTO calc */ 1166 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 1167 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1168 stcb->asoc.overall_error_count, 1169 0, 1170 SCTP_FROM_SCTP_INPUT, 1171 __LINE__); 1172 } 1173 stcb->asoc.overall_error_count = 0; 1174 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1175 /* 1176 * collapse the init timer back in case of a exponential 1177 * backoff 1178 */ 1179 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1180 stcb, net); 1181 /* 1182 * the send at the end of the inbound data processing will 1183 * cause the cookie to be sent 1184 */ 1185 break; 1186 case SCTP_STATE_SHUTDOWN_SENT: 1187 /* incorrect state... discard */ 1188 break; 1189 case SCTP_STATE_COOKIE_ECHOED: 1190 /* incorrect state... discard */ 1191 break; 1192 case SCTP_STATE_OPEN: 1193 /* incorrect state... discard */ 1194 break; 1195 case SCTP_STATE_EMPTY: 1196 case SCTP_STATE_INUSE: 1197 default: 1198 /* incorrect state... discard */ 1199 return (-1); 1200 break; 1201 } 1202 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1203 return (0); 1204 } 1205 1206 1207 /* 1208 * handle a state cookie for an existing association m: input packet mbuf 1209 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1210 * "split" mbuf and the cookie signature does not exist offset: offset into 1211 * mbuf to the cookie-echo chunk 1212 */ 1213 static struct sctp_tcb * 1214 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1215 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1216 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1217 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1218 uint32_t vrf_id) 1219 { 1220 struct sctp_association *asoc; 1221 struct sctp_init_chunk *init_cp, init_buf; 1222 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1223 int chk_length; 1224 int init_offset, initack_offset, i; 1225 int retval; 1226 int spec_flag = 0; 1227 uint32_t how_indx; 1228 1229 /* I know that the TCB is non-NULL from the caller */ 1230 asoc = &stcb->asoc; 1231 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1232 if (asoc->cookie_how[how_indx] == 0) 1233 break; 1234 } 1235 if (how_indx < sizeof(asoc->cookie_how)) { 1236 asoc->cookie_how[how_indx] = 1; 1237 } 1238 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1239 /* SHUTDOWN came in after sending INIT-ACK */ 1240 struct mbuf *op_err; 1241 struct sctp_paramhdr *ph; 1242 1243 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1244 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1245 0, M_DONTWAIT, 1, MT_DATA); 1246 if (op_err == NULL) { 1247 /* FOOBAR */ 1248 return (NULL); 1249 } 1250 /* pre-reserve some space */ 1251 #ifdef INET6 1252 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1253 #else 1254 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 1255 #endif 1256 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1257 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1258 /* Set the len */ 1259 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1260 ph = mtod(op_err, struct sctp_paramhdr *); 1261 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1262 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1263 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1264 vrf_id); 1265 if (how_indx < sizeof(asoc->cookie_how)) 1266 asoc->cookie_how[how_indx] = 2; 1267 return (NULL); 1268 } 1269 /* 1270 * find and validate the INIT chunk in the cookie (peer's info) the 1271 * INIT should start after the cookie-echo header struct (chunk 1272 * header, state cookie header struct) 1273 */ 1274 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1275 1276 init_cp = (struct sctp_init_chunk *) 1277 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1278 (uint8_t *) & init_buf); 1279 if (init_cp == NULL) { 1280 /* could not pull a INIT chunk in cookie */ 1281 return (NULL); 1282 } 1283 chk_length = ntohs(init_cp->ch.chunk_length); 1284 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1285 return (NULL); 1286 } 1287 /* 1288 * find and validate the INIT-ACK chunk in the cookie (my info) the 1289 * INIT-ACK follows the INIT chunk 1290 */ 1291 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1292 initack_cp = (struct sctp_init_ack_chunk *) 1293 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1294 (uint8_t *) & initack_buf); 1295 if (initack_cp == NULL) { 1296 /* could not pull INIT-ACK chunk in cookie */ 1297 return (NULL); 1298 } 1299 chk_length = ntohs(initack_cp->ch.chunk_length); 1300 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1301 return (NULL); 1302 } 1303 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1304 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1305 /* 1306 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1307 * to get into the OPEN state 1308 */ 1309 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1310 /*- 1311 * Opps, this means that we somehow generated two vtag's 1312 * the same. I.e. we did: 1313 * Us Peer 1314 * <---INIT(tag=a)------ 1315 * ----INIT-ACK(tag=t)--> 1316 * ----INIT(tag=t)------> *1 1317 * <---INIT-ACK(tag=a)--- 1318 * <----CE(tag=t)------------- *2 1319 * 1320 * At point *1 we should be generating a different 1321 * tag t'. Which means we would throw away the CE and send 1322 * ours instead. Basically this is case C (throw away side). 1323 */ 1324 if (how_indx < sizeof(asoc->cookie_how)) 1325 asoc->cookie_how[how_indx] = 17; 1326 return (NULL); 1327 1328 } 1329 switch SCTP_GET_STATE 1330 (asoc) { 1331 case SCTP_STATE_COOKIE_WAIT: 1332 case SCTP_STATE_COOKIE_ECHOED: 1333 /* 1334 * INIT was sent but got a COOKIE_ECHO with the 1335 * correct tags... just accept it...but we must 1336 * process the init so that we can make sure we have 1337 * the right seq no's. 1338 */ 1339 /* First we must process the INIT !! */ 1340 retval = sctp_process_init(init_cp, stcb, net); 1341 if (retval < 0) { 1342 if (how_indx < sizeof(asoc->cookie_how)) 1343 asoc->cookie_how[how_indx] = 3; 1344 return (NULL); 1345 } 1346 /* we have already processed the INIT so no problem */ 1347 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1348 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1349 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1350 /* update current state */ 1351 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1352 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1353 else 1354 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1355 1356 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1357 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1358 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1359 stcb->sctp_ep, stcb, asoc->primary_destination); 1360 } 1361 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1362 sctp_stop_all_cookie_timers(stcb); 1363 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1364 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1365 (inp->sctp_socket->so_qlimit == 0) 1366 ) { 1367 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1368 struct socket *so; 1369 1370 #endif 1371 /* 1372 * Here is where collision would go if we 1373 * did a connect() and instead got a 1374 * init/init-ack/cookie done before the 1375 * init-ack came back.. 1376 */ 1377 stcb->sctp_ep->sctp_flags |= 1378 SCTP_PCB_FLAGS_CONNECTED; 1379 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1380 so = SCTP_INP_SO(stcb->sctp_ep); 1381 atomic_add_int(&stcb->asoc.refcnt, 1); 1382 SCTP_TCB_UNLOCK(stcb); 1383 SCTP_SOCKET_LOCK(so, 1); 1384 SCTP_TCB_LOCK(stcb); 1385 atomic_add_int(&stcb->asoc.refcnt, -1); 1386 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1387 SCTP_SOCKET_UNLOCK(so, 1); 1388 return (NULL); 1389 } 1390 #endif 1391 soisconnected(stcb->sctp_socket); 1392 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1393 SCTP_SOCKET_UNLOCK(so, 1); 1394 #endif 1395 } 1396 /* notify upper layer */ 1397 *notification = SCTP_NOTIFY_ASSOC_UP; 1398 /* 1399 * since we did not send a HB make sure we don't 1400 * double things 1401 */ 1402 net->hb_responded = 1; 1403 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1404 &cookie->time_entered, sctp_align_unsafe_makecopy); 1405 1406 if (stcb->asoc.sctp_autoclose_ticks && 1407 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1408 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1409 inp, stcb, NULL); 1410 } 1411 break; 1412 default: 1413 /* 1414 * we're in the OPEN state (or beyond), so peer must 1415 * have simply lost the COOKIE-ACK 1416 */ 1417 break; 1418 } /* end switch */ 1419 sctp_stop_all_cookie_timers(stcb); 1420 /* 1421 * We ignore the return code here.. not sure if we should 1422 * somehow abort.. but we do have an existing asoc. This 1423 * really should not fail. 1424 */ 1425 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1426 init_offset + sizeof(struct sctp_init_chunk), 1427 initack_offset, sh, init_src)) { 1428 if (how_indx < sizeof(asoc->cookie_how)) 1429 asoc->cookie_how[how_indx] = 4; 1430 return (NULL); 1431 } 1432 /* respond with a COOKIE-ACK */ 1433 sctp_toss_old_cookies(stcb, asoc); 1434 sctp_send_cookie_ack(stcb); 1435 if (how_indx < sizeof(asoc->cookie_how)) 1436 asoc->cookie_how[how_indx] = 5; 1437 return (stcb); 1438 } 1439 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1440 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1441 cookie->tie_tag_my_vtag == 0 && 1442 cookie->tie_tag_peer_vtag == 0) { 1443 /* 1444 * case C in Section 5.2.4 Table 2: XMOO silently discard 1445 */ 1446 if (how_indx < sizeof(asoc->cookie_how)) 1447 asoc->cookie_how[how_indx] = 6; 1448 return (NULL); 1449 } 1450 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1451 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1452 init_cp->init.initiate_tag == 0)) { 1453 /* 1454 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1455 * should be ok, re-accept peer info 1456 */ 1457 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1458 /* 1459 * Extension of case C. If we hit this, then the 1460 * random number generator returned the same vtag 1461 * when we first sent our INIT-ACK and when we later 1462 * sent our INIT. The side with the seq numbers that 1463 * are different will be the one that normnally 1464 * would have hit case C. This in effect "extends" 1465 * our vtags in this collision case to be 64 bits. 1466 * The same collision could occur aka you get both 1467 * vtag and seq number the same twice in a row.. but 1468 * is much less likely. If it did happen then we 1469 * would proceed through and bring up the assoc.. we 1470 * may end up with the wrong stream setup however.. 1471 * which would be bad.. but there is no way to 1472 * tell.. until we send on a stream that does not 1473 * exist :-) 1474 */ 1475 if (how_indx < sizeof(asoc->cookie_how)) 1476 asoc->cookie_how[how_indx] = 7; 1477 1478 return (NULL); 1479 } 1480 if (how_indx < sizeof(asoc->cookie_how)) 1481 asoc->cookie_how[how_indx] = 8; 1482 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1483 sctp_stop_all_cookie_timers(stcb); 1484 /* 1485 * since we did not send a HB make sure we don't double 1486 * things 1487 */ 1488 net->hb_responded = 1; 1489 if (stcb->asoc.sctp_autoclose_ticks && 1490 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1491 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1492 NULL); 1493 } 1494 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1495 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1496 1497 /* Note last_cwr_tsn? where is this used? */ 1498 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1499 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1500 /* 1501 * Ok the peer probably discarded our data (if we 1502 * echoed a cookie+data). So anything on the 1503 * sent_queue should be marked for retransmit, we 1504 * may not get something to kick us so it COULD 1505 * still take a timeout to move these.. but it can't 1506 * hurt to mark them. 1507 */ 1508 struct sctp_tmit_chunk *chk; 1509 1510 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1511 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1512 chk->sent = SCTP_DATAGRAM_RESEND; 1513 sctp_flight_size_decrease(chk); 1514 sctp_total_flight_decrease(stcb, chk); 1515 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1516 spec_flag++; 1517 } 1518 } 1519 1520 } 1521 /* process the INIT info (peer's info) */ 1522 retval = sctp_process_init(init_cp, stcb, net); 1523 if (retval < 0) { 1524 if (how_indx < sizeof(asoc->cookie_how)) 1525 asoc->cookie_how[how_indx] = 9; 1526 return (NULL); 1527 } 1528 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1529 init_offset + sizeof(struct sctp_init_chunk), 1530 initack_offset, sh, init_src)) { 1531 if (how_indx < sizeof(asoc->cookie_how)) 1532 asoc->cookie_how[how_indx] = 10; 1533 return (NULL); 1534 } 1535 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1536 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1537 *notification = SCTP_NOTIFY_ASSOC_UP; 1538 1539 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1540 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1541 (inp->sctp_socket->so_qlimit == 0)) { 1542 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1543 struct socket *so; 1544 1545 #endif 1546 stcb->sctp_ep->sctp_flags |= 1547 SCTP_PCB_FLAGS_CONNECTED; 1548 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1549 so = SCTP_INP_SO(stcb->sctp_ep); 1550 atomic_add_int(&stcb->asoc.refcnt, 1); 1551 SCTP_TCB_UNLOCK(stcb); 1552 SCTP_SOCKET_LOCK(so, 1); 1553 SCTP_TCB_LOCK(stcb); 1554 atomic_add_int(&stcb->asoc.refcnt, -1); 1555 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1556 SCTP_SOCKET_UNLOCK(so, 1); 1557 return (NULL); 1558 } 1559 #endif 1560 soisconnected(stcb->sctp_socket); 1561 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1562 SCTP_SOCKET_UNLOCK(so, 1); 1563 #endif 1564 } 1565 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1566 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1567 else 1568 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1569 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1570 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1571 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1572 } else { 1573 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1574 } 1575 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1576 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1577 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1578 stcb->sctp_ep, stcb, asoc->primary_destination); 1579 } 1580 sctp_stop_all_cookie_timers(stcb); 1581 sctp_toss_old_cookies(stcb, asoc); 1582 sctp_send_cookie_ack(stcb); 1583 if (spec_flag) { 1584 /* 1585 * only if we have retrans set do we do this. What 1586 * this call does is get only the COOKIE-ACK out and 1587 * then when we return the normal call to 1588 * sctp_chunk_output will get the retrans out behind 1589 * this. 1590 */ 1591 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1592 } 1593 if (how_indx < sizeof(asoc->cookie_how)) 1594 asoc->cookie_how[how_indx] = 11; 1595 1596 return (stcb); 1597 } 1598 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1599 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1600 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1601 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1602 cookie->tie_tag_peer_vtag != 0) { 1603 struct sctpasochead *head; 1604 1605 /* 1606 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1607 */ 1608 /* temp code */ 1609 if (how_indx < sizeof(asoc->cookie_how)) 1610 asoc->cookie_how[how_indx] = 12; 1611 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1612 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1613 1614 *sac_assoc_id = sctp_get_associd(stcb); 1615 /* notify upper layer */ 1616 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1617 atomic_add_int(&stcb->asoc.refcnt, 1); 1618 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1619 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1620 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1621 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1622 } 1623 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1624 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1625 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1626 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1627 } 1628 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1629 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1630 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1631 stcb->sctp_ep, stcb, asoc->primary_destination); 1632 1633 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1634 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1635 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1636 } 1637 asoc->pre_open_streams = 1638 ntohs(initack_cp->init.num_outbound_streams); 1639 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1640 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1641 1642 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1643 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1644 1645 asoc->str_reset_seq_in = asoc->init_seq_number; 1646 1647 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1648 if (asoc->mapping_array) { 1649 memset(asoc->mapping_array, 0, 1650 asoc->mapping_array_size); 1651 } 1652 SCTP_TCB_UNLOCK(stcb); 1653 SCTP_INP_INFO_WLOCK(); 1654 SCTP_INP_WLOCK(stcb->sctp_ep); 1655 SCTP_TCB_LOCK(stcb); 1656 atomic_add_int(&stcb->asoc.refcnt, -1); 1657 /* send up all the data */ 1658 SCTP_TCB_SEND_LOCK(stcb); 1659 1660 sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED); 1661 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1662 stcb->asoc.strmout[i].stream_no = i; 1663 stcb->asoc.strmout[i].next_sequence_sent = 0; 1664 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1665 } 1666 /* process the INIT-ACK info (my info) */ 1667 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1668 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1669 1670 /* pull from vtag hash */ 1671 LIST_REMOVE(stcb, sctp_asocs); 1672 /* re-insert to new vtag position */ 1673 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1674 sctppcbinfo.hashasocmark)]; 1675 /* 1676 * put it in the bucket in the vtag hash of assoc's for the 1677 * system 1678 */ 1679 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1680 1681 /* Is this the first restart? */ 1682 if (stcb->asoc.in_restart_hash == 0) { 1683 /* Ok add it to assoc_id vtag hash */ 1684 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1685 sctppcbinfo.hashrestartmark)]; 1686 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1687 stcb->asoc.in_restart_hash = 1; 1688 } 1689 /* process the INIT info (peer's info) */ 1690 SCTP_TCB_SEND_UNLOCK(stcb); 1691 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1692 SCTP_INP_INFO_WUNLOCK(); 1693 1694 retval = sctp_process_init(init_cp, stcb, net); 1695 if (retval < 0) { 1696 if (how_indx < sizeof(asoc->cookie_how)) 1697 asoc->cookie_how[how_indx] = 13; 1698 1699 return (NULL); 1700 } 1701 /* 1702 * since we did not send a HB make sure we don't double 1703 * things 1704 */ 1705 net->hb_responded = 1; 1706 1707 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1708 init_offset + sizeof(struct sctp_init_chunk), 1709 initack_offset, sh, init_src)) { 1710 if (how_indx < sizeof(asoc->cookie_how)) 1711 asoc->cookie_how[how_indx] = 14; 1712 1713 return (NULL); 1714 } 1715 /* respond with a COOKIE-ACK */ 1716 sctp_stop_all_cookie_timers(stcb); 1717 sctp_toss_old_cookies(stcb, asoc); 1718 sctp_send_cookie_ack(stcb); 1719 if (how_indx < sizeof(asoc->cookie_how)) 1720 asoc->cookie_how[how_indx] = 15; 1721 1722 return (stcb); 1723 } 1724 if (how_indx < sizeof(asoc->cookie_how)) 1725 asoc->cookie_how[how_indx] = 16; 1726 /* all other cases... */ 1727 return (NULL); 1728 } 1729 1730 1731 /* 1732 * handle a state cookie for a new association m: input packet mbuf chain-- 1733 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1734 * and the cookie signature does not exist offset: offset into mbuf to the 1735 * cookie-echo chunk length: length of the cookie chunk to: where the init 1736 * was from returns a new TCB 1737 */ 1738 static struct sctp_tcb * 1739 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1740 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1741 struct sctp_inpcb *inp, struct sctp_nets **netp, 1742 struct sockaddr *init_src, int *notification, 1743 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1744 uint32_t vrf_id) 1745 { 1746 struct sctp_tcb *stcb; 1747 struct sctp_init_chunk *init_cp, init_buf; 1748 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1749 struct sockaddr_storage sa_store; 1750 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1751 struct sockaddr_in *sin; 1752 struct sockaddr_in6 *sin6; 1753 struct sctp_association *asoc; 1754 int chk_length; 1755 int init_offset, initack_offset, initack_limit; 1756 int retval; 1757 int error = 0; 1758 uint32_t old_tag; 1759 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1760 1761 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1762 struct socket *so; 1763 1764 so = SCTP_INP_SO(inp); 1765 #endif 1766 1767 /* 1768 * find and validate the INIT chunk in the cookie (peer's info) the 1769 * INIT should start after the cookie-echo header struct (chunk 1770 * header, state cookie header struct) 1771 */ 1772 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1773 init_cp = (struct sctp_init_chunk *) 1774 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1775 (uint8_t *) & init_buf); 1776 if (init_cp == NULL) { 1777 /* could not pull a INIT chunk in cookie */ 1778 SCTPDBG(SCTP_DEBUG_INPUT1, 1779 "process_cookie_new: could not pull INIT chunk hdr\n"); 1780 return (NULL); 1781 } 1782 chk_length = ntohs(init_cp->ch.chunk_length); 1783 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1784 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1785 return (NULL); 1786 } 1787 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1788 /* 1789 * find and validate the INIT-ACK chunk in the cookie (my info) the 1790 * INIT-ACK follows the INIT chunk 1791 */ 1792 initack_cp = (struct sctp_init_ack_chunk *) 1793 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1794 (uint8_t *) & initack_buf); 1795 if (initack_cp == NULL) { 1796 /* could not pull INIT-ACK chunk in cookie */ 1797 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1798 return (NULL); 1799 } 1800 chk_length = ntohs(initack_cp->ch.chunk_length); 1801 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1802 return (NULL); 1803 } 1804 /* 1805 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1806 * "initack_limit" value. This is because the chk_length field 1807 * includes the length of the cookie, but the cookie is omitted when 1808 * the INIT and INIT_ACK are tacked onto the cookie... 1809 */ 1810 initack_limit = offset + cookie_len; 1811 1812 /* 1813 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1814 * and popluate 1815 */ 1816 1817 /* 1818 * Here we do a trick, we set in NULL for the proc/thread argument. 1819 * We do this since in effect we only use the p argument when the 1820 * socket is unbound and we must do an implicit bind. Since we are 1821 * getting a cookie, we cannot be unbound. 1822 */ 1823 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1824 ntohl(initack_cp->init.initiate_tag), vrf_id, 1825 (struct thread *)NULL 1826 ); 1827 if (stcb == NULL) { 1828 struct mbuf *op_err; 1829 1830 /* memory problem? */ 1831 SCTPDBG(SCTP_DEBUG_INPUT1, 1832 "process_cookie_new: no room for another TCB!\n"); 1833 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1834 1835 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1836 sh, op_err, vrf_id); 1837 return (NULL); 1838 } 1839 /* get the correct sctp_nets */ 1840 if (netp) 1841 *netp = sctp_findnet(stcb, init_src); 1842 1843 asoc = &stcb->asoc; 1844 /* get scope variables out of cookie */ 1845 asoc->ipv4_local_scope = cookie->ipv4_scope; 1846 asoc->site_scope = cookie->site_scope; 1847 asoc->local_scope = cookie->local_scope; 1848 asoc->loopback_scope = cookie->loopback_scope; 1849 1850 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1851 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1852 struct mbuf *op_err; 1853 1854 /* 1855 * Houston we have a problem. The EP changed while the 1856 * cookie was in flight. Only recourse is to abort the 1857 * association. 1858 */ 1859 atomic_add_int(&stcb->asoc.refcnt, 1); 1860 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1861 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1862 sh, op_err, vrf_id); 1863 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1864 SCTP_TCB_UNLOCK(stcb); 1865 SCTP_SOCKET_LOCK(so, 1); 1866 SCTP_TCB_LOCK(stcb); 1867 #endif 1868 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1869 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1870 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1871 SCTP_SOCKET_UNLOCK(so, 1); 1872 #endif 1873 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1874 return (NULL); 1875 } 1876 /* process the INIT-ACK info (my info) */ 1877 old_tag = asoc->my_vtag; 1878 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1879 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1880 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1881 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1882 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1883 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1884 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1885 asoc->str_reset_seq_in = asoc->init_seq_number; 1886 1887 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1888 1889 /* process the INIT info (peer's info) */ 1890 if (netp) 1891 retval = sctp_process_init(init_cp, stcb, *netp); 1892 else 1893 retval = 0; 1894 if (retval < 0) { 1895 atomic_add_int(&stcb->asoc.refcnt, 1); 1896 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1897 SCTP_TCB_UNLOCK(stcb); 1898 SCTP_SOCKET_LOCK(so, 1); 1899 SCTP_TCB_LOCK(stcb); 1900 #endif 1901 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1902 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1903 SCTP_SOCKET_UNLOCK(so, 1); 1904 #endif 1905 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1906 return (NULL); 1907 } 1908 /* load all addresses */ 1909 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1910 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1911 init_src)) { 1912 atomic_add_int(&stcb->asoc.refcnt, 1); 1913 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1914 SCTP_TCB_UNLOCK(stcb); 1915 SCTP_SOCKET_LOCK(so, 1); 1916 SCTP_TCB_LOCK(stcb); 1917 #endif 1918 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1919 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1920 SCTP_SOCKET_UNLOCK(so, 1); 1921 #endif 1922 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1923 return (NULL); 1924 } 1925 /* 1926 * verify any preceding AUTH chunk that was skipped 1927 */ 1928 /* pull the local authentication parameters from the cookie/init-ack */ 1929 sctp_auth_get_cookie_params(stcb, m, 1930 initack_offset + sizeof(struct sctp_init_ack_chunk), 1931 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1932 if (auth_skipped) { 1933 struct sctp_auth_chunk *auth; 1934 1935 auth = (struct sctp_auth_chunk *) 1936 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1937 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1938 /* auth HMAC failed, dump the assoc and packet */ 1939 SCTPDBG(SCTP_DEBUG_AUTH1, 1940 "COOKIE-ECHO: AUTH failed\n"); 1941 atomic_add_int(&stcb->asoc.refcnt, 1); 1942 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1943 SCTP_TCB_UNLOCK(stcb); 1944 SCTP_SOCKET_LOCK(so, 1); 1945 SCTP_TCB_LOCK(stcb); 1946 #endif 1947 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1948 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1949 SCTP_SOCKET_UNLOCK(so, 1); 1950 #endif 1951 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1952 return (NULL); 1953 } else { 1954 /* remaining chunks checked... good to go */ 1955 stcb->asoc.authenticated = 1; 1956 } 1957 } 1958 /* update current state */ 1959 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1960 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1961 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1962 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1963 stcb->sctp_ep, stcb, asoc->primary_destination); 1964 } 1965 sctp_stop_all_cookie_timers(stcb); 1966 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1967 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1968 1969 /* 1970 * if we're doing ASCONFs, check to see if we have any new local 1971 * addresses that need to get added to the peer (eg. addresses 1972 * changed while cookie echo in flight). This needs to be done 1973 * after we go to the OPEN state to do the correct asconf 1974 * processing. else, make sure we have the correct addresses in our 1975 * lists 1976 */ 1977 1978 /* warning, we re-use sin, sin6, sa_store here! */ 1979 /* pull in local_address (our "from" address) */ 1980 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1981 /* source addr is IPv4 */ 1982 sin = (struct sockaddr_in *)initack_src; 1983 memset(sin, 0, sizeof(*sin)); 1984 sin->sin_family = AF_INET; 1985 sin->sin_len = sizeof(struct sockaddr_in); 1986 sin->sin_addr.s_addr = cookie->laddress[0]; 1987 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 1988 /* source addr is IPv6 */ 1989 sin6 = (struct sockaddr_in6 *)initack_src; 1990 memset(sin6, 0, sizeof(*sin6)); 1991 sin6->sin6_family = AF_INET6; 1992 sin6->sin6_len = sizeof(struct sockaddr_in6); 1993 sin6->sin6_scope_id = cookie->scope_id; 1994 memcpy(&sin6->sin6_addr, cookie->laddress, 1995 sizeof(sin6->sin6_addr)); 1996 } else { 1997 atomic_add_int(&stcb->asoc.refcnt, 1); 1998 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1999 SCTP_TCB_UNLOCK(stcb); 2000 SCTP_SOCKET_LOCK(so, 1); 2001 SCTP_TCB_LOCK(stcb); 2002 #endif 2003 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2004 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2005 SCTP_SOCKET_UNLOCK(so, 1); 2006 #endif 2007 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2008 return (NULL); 2009 } 2010 2011 /* set up to notify upper layer */ 2012 *notification = SCTP_NOTIFY_ASSOC_UP; 2013 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2014 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2015 (inp->sctp_socket->so_qlimit == 0)) { 2016 /* 2017 * This is an endpoint that called connect() how it got a 2018 * cookie that is NEW is a bit of a mystery. It must be that 2019 * the INIT was sent, but before it got there.. a complete 2020 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2021 * should have went to the other code.. not here.. oh well.. 2022 * a bit of protection is worth having.. 2023 */ 2024 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2025 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2026 atomic_add_int(&stcb->asoc.refcnt, 1); 2027 SCTP_TCB_UNLOCK(stcb); 2028 SCTP_SOCKET_LOCK(so, 1); 2029 SCTP_TCB_LOCK(stcb); 2030 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2031 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2032 SCTP_SOCKET_UNLOCK(so, 1); 2033 return (NULL); 2034 } 2035 #endif 2036 soisconnected(stcb->sctp_socket); 2037 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2038 SCTP_SOCKET_UNLOCK(so, 1); 2039 #endif 2040 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2041 (inp->sctp_socket->so_qlimit)) { 2042 /* 2043 * We don't want to do anything with this one. Since it is 2044 * the listening guy. The timer will get started for 2045 * accepted connections in the caller. 2046 */ 2047 ; 2048 } 2049 /* since we did not send a HB make sure we don't double things */ 2050 if ((netp) && (*netp)) 2051 (*netp)->hb_responded = 1; 2052 2053 if (stcb->asoc.sctp_autoclose_ticks && 2054 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2055 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2056 } 2057 /* calculate the RTT */ 2058 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2059 if ((netp) && (*netp)) { 2060 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2061 &cookie->time_entered, sctp_align_unsafe_makecopy); 2062 } 2063 /* respond with a COOKIE-ACK */ 2064 sctp_send_cookie_ack(stcb); 2065 2066 /* 2067 * check the address lists for any ASCONFs that need to be sent 2068 * AFTER the cookie-ack is sent 2069 */ 2070 sctp_check_address_list(stcb, m, 2071 initack_offset + sizeof(struct sctp_init_ack_chunk), 2072 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2073 initack_src, cookie->local_scope, cookie->site_scope, 2074 cookie->ipv4_scope, cookie->loopback_scope); 2075 2076 2077 return (stcb); 2078 } 2079 2080 2081 /* 2082 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2083 * existing (non-NULL) TCB 2084 */ 2085 static struct mbuf * 2086 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2087 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2088 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2089 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2090 struct sctp_tcb **locked_tcb, uint32_t vrf_id) 2091 { 2092 struct sctp_state_cookie *cookie; 2093 struct sockaddr_in6 sin6; 2094 struct sockaddr_in sin; 2095 struct sctp_tcb *l_stcb = *stcb; 2096 struct sctp_inpcb *l_inp; 2097 struct sockaddr *to; 2098 sctp_assoc_t sac_restart_id; 2099 struct sctp_pcb *ep; 2100 struct mbuf *m_sig; 2101 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2102 uint8_t *sig; 2103 uint8_t cookie_ok = 0; 2104 unsigned int size_of_pkt, sig_offset, cookie_offset; 2105 unsigned int cookie_len; 2106 struct timeval now; 2107 struct timeval time_expires; 2108 struct sockaddr_storage dest_store; 2109 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2110 struct ip *iph; 2111 int notification = 0; 2112 struct sctp_nets *netl; 2113 int had_a_existing_tcb = 0; 2114 2115 SCTPDBG(SCTP_DEBUG_INPUT2, 2116 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2117 2118 if (inp_p == NULL) { 2119 return (NULL); 2120 } 2121 /* First get the destination address setup too. */ 2122 iph = mtod(m, struct ip *); 2123 switch (iph->ip_v) { 2124 case IPVERSION: 2125 { 2126 /* its IPv4 */ 2127 struct sockaddr_in *lsin; 2128 2129 lsin = (struct sockaddr_in *)(localep_sa); 2130 memset(lsin, 0, sizeof(*lsin)); 2131 lsin->sin_family = AF_INET; 2132 lsin->sin_len = sizeof(*lsin); 2133 lsin->sin_port = sh->dest_port; 2134 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2135 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2136 break; 2137 } 2138 #ifdef INET6 2139 case IPV6_VERSION >> 4: 2140 { 2141 /* its IPv6 */ 2142 struct ip6_hdr *ip6; 2143 struct sockaddr_in6 *lsin6; 2144 2145 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2146 memset(lsin6, 0, sizeof(*lsin6)); 2147 lsin6->sin6_family = AF_INET6; 2148 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2149 ip6 = mtod(m, struct ip6_hdr *); 2150 lsin6->sin6_port = sh->dest_port; 2151 lsin6->sin6_addr = ip6->ip6_dst; 2152 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2153 break; 2154 } 2155 #endif 2156 default: 2157 return (NULL); 2158 } 2159 2160 cookie = &cp->cookie; 2161 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2162 cookie_len = ntohs(cp->ch.chunk_length); 2163 2164 if ((cookie->peerport != sh->src_port) && 2165 (cookie->myport != sh->dest_port) && 2166 (cookie->my_vtag != sh->v_tag)) { 2167 /* 2168 * invalid ports or bad tag. Note that we always leave the 2169 * v_tag in the header in network order and when we stored 2170 * it in the my_vtag slot we also left it in network order. 2171 * This maintains the match even though it may be in the 2172 * opposite byte order of the machine :-> 2173 */ 2174 return (NULL); 2175 } 2176 if (cookie_len > size_of_pkt || 2177 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2178 sizeof(struct sctp_init_chunk) + 2179 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2180 /* cookie too long! or too small */ 2181 return (NULL); 2182 } 2183 /* 2184 * split off the signature into its own mbuf (since it should not be 2185 * calculated in the sctp_hmac_m() call). 2186 */ 2187 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2188 if (sig_offset > size_of_pkt) { 2189 /* packet not correct size! */ 2190 /* XXX this may already be accounted for earlier... */ 2191 return (NULL); 2192 } 2193 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2194 if (m_sig == NULL) { 2195 /* out of memory or ?? */ 2196 return (NULL); 2197 } 2198 #ifdef SCTP_MBUF_LOGGING 2199 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 2200 struct mbuf *mat; 2201 2202 mat = m_sig; 2203 while (mat) { 2204 if (SCTP_BUF_IS_EXTENDED(mat)) { 2205 sctp_log_mb(mat, SCTP_MBUF_SPLIT); 2206 } 2207 mat = SCTP_BUF_NEXT(mat); 2208 } 2209 } 2210 #endif 2211 2212 /* 2213 * compute the signature/digest for the cookie 2214 */ 2215 ep = &(*inp_p)->sctp_ep; 2216 l_inp = *inp_p; 2217 if (l_stcb) { 2218 SCTP_TCB_UNLOCK(l_stcb); 2219 } 2220 SCTP_INP_RLOCK(l_inp); 2221 if (l_stcb) { 2222 SCTP_TCB_LOCK(l_stcb); 2223 } 2224 /* which cookie is it? */ 2225 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2226 (ep->current_secret_number != ep->last_secret_number)) { 2227 /* it's the old cookie */ 2228 (void)sctp_hmac_m(SCTP_HMAC, 2229 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2230 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2231 } else { 2232 /* it's the current cookie */ 2233 (void)sctp_hmac_m(SCTP_HMAC, 2234 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2235 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2236 } 2237 /* get the signature */ 2238 SCTP_INP_RUNLOCK(l_inp); 2239 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2240 if (sig == NULL) { 2241 /* couldn't find signature */ 2242 sctp_m_freem(m_sig); 2243 return (NULL); 2244 } 2245 /* compare the received digest with the computed digest */ 2246 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2247 /* try the old cookie? */ 2248 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2249 (ep->current_secret_number != ep->last_secret_number)) { 2250 /* compute digest with old */ 2251 (void)sctp_hmac_m(SCTP_HMAC, 2252 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2253 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2254 /* compare */ 2255 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2256 cookie_ok = 1; 2257 } 2258 } else { 2259 cookie_ok = 1; 2260 } 2261 2262 /* 2263 * Now before we continue we must reconstruct our mbuf so that 2264 * normal processing of any other chunks will work. 2265 */ 2266 { 2267 struct mbuf *m_at; 2268 2269 m_at = m; 2270 while (SCTP_BUF_NEXT(m_at) != NULL) { 2271 m_at = SCTP_BUF_NEXT(m_at); 2272 } 2273 SCTP_BUF_NEXT(m_at) = m_sig; 2274 } 2275 2276 if (cookie_ok == 0) { 2277 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2278 SCTPDBG(SCTP_DEBUG_INPUT2, 2279 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2280 (uint32_t) offset, cookie_offset, sig_offset); 2281 return (NULL); 2282 } 2283 /* 2284 * check the cookie timestamps to be sure it's not stale 2285 */ 2286 (void)SCTP_GETTIME_TIMEVAL(&now); 2287 /* Expire time is in Ticks, so we convert to seconds */ 2288 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2289 time_expires.tv_usec = cookie->time_entered.tv_usec; 2290 if (timevalcmp(&now, &time_expires, >)) { 2291 /* cookie is stale! */ 2292 struct mbuf *op_err; 2293 struct sctp_stale_cookie_msg *scm; 2294 uint32_t tim; 2295 2296 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2297 0, M_DONTWAIT, 1, MT_DATA); 2298 if (op_err == NULL) { 2299 /* FOOBAR */ 2300 return (NULL); 2301 } 2302 /* pre-reserve some space */ 2303 #ifdef INET6 2304 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 2305 #else 2306 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 2307 #endif 2308 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 2309 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 2310 2311 /* Set the len */ 2312 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2313 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2314 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2315 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2316 (sizeof(uint32_t)))); 2317 /* seconds to usec */ 2318 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2319 /* add in usec */ 2320 if (tim == 0) 2321 tim = now.tv_usec - cookie->time_entered.tv_usec; 2322 scm->time_usec = htonl(tim); 2323 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 2324 vrf_id); 2325 return (NULL); 2326 } 2327 /* 2328 * Now we must see with the lookup address if we have an existing 2329 * asoc. This will only happen if we were in the COOKIE-WAIT state 2330 * and a INIT collided with us and somewhere the peer sent the 2331 * cookie on another address besides the single address our assoc 2332 * had for him. In this case we will have one of the tie-tags set at 2333 * least AND the address field in the cookie can be used to look it 2334 * up. 2335 */ 2336 to = NULL; 2337 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 2338 memset(&sin6, 0, sizeof(sin6)); 2339 sin6.sin6_family = AF_INET6; 2340 sin6.sin6_len = sizeof(sin6); 2341 sin6.sin6_port = sh->src_port; 2342 sin6.sin6_scope_id = cookie->scope_id; 2343 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2344 sizeof(sin6.sin6_addr.s6_addr)); 2345 to = (struct sockaddr *)&sin6; 2346 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 2347 memset(&sin, 0, sizeof(sin)); 2348 sin.sin_family = AF_INET; 2349 sin.sin_len = sizeof(sin); 2350 sin.sin_port = sh->src_port; 2351 sin.sin_addr.s_addr = cookie->address[0]; 2352 to = (struct sockaddr *)&sin; 2353 } else { 2354 /* This should not happen */ 2355 return (NULL); 2356 } 2357 if ((*stcb == NULL) && to) { 2358 /* Yep, lets check */ 2359 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2360 if (*stcb == NULL) { 2361 /* 2362 * We should have only got back the same inp. If we 2363 * got back a different ep we have a problem. The 2364 * original findep got back l_inp and now 2365 */ 2366 if (l_inp != *inp_p) { 2367 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2368 } 2369 } else { 2370 if (*locked_tcb == NULL) { 2371 /* 2372 * In this case we found the assoc only 2373 * after we locked the create lock. This 2374 * means we are in a colliding case and we 2375 * must make sure that we unlock the tcb if 2376 * its one of the cases where we throw away 2377 * the incoming packets. 2378 */ 2379 *locked_tcb = *stcb; 2380 2381 /* 2382 * We must also increment the inp ref count 2383 * since the ref_count flags was set when we 2384 * did not find the TCB, now we found it 2385 * which reduces the refcount.. we must 2386 * raise it back out to balance it all :-) 2387 */ 2388 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2389 if ((*stcb)->sctp_ep != l_inp) { 2390 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2391 (*stcb)->sctp_ep, l_inp); 2392 } 2393 } 2394 } 2395 } 2396 if (to == NULL) 2397 return (NULL); 2398 2399 cookie_len -= SCTP_SIGNATURE_SIZE; 2400 if (*stcb == NULL) { 2401 /* this is the "normal" case... get a new TCB */ 2402 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2403 cookie_len, *inp_p, netp, to, ¬ification, 2404 auth_skipped, auth_offset, auth_len, vrf_id); 2405 } else { 2406 /* this is abnormal... cookie-echo on existing TCB */ 2407 had_a_existing_tcb = 1; 2408 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2409 cookie, cookie_len, *inp_p, *stcb, *netp, to, 2410 ¬ification, &sac_restart_id, vrf_id); 2411 } 2412 2413 if (*stcb == NULL) { 2414 /* still no TCB... must be bad cookie-echo */ 2415 return (NULL); 2416 } 2417 /* 2418 * Ok, we built an association so confirm the address we sent the 2419 * INIT-ACK to. 2420 */ 2421 netl = sctp_findnet(*stcb, to); 2422 /* 2423 * This code should in theory NOT run but 2424 */ 2425 if (netl == NULL) { 2426 /* TSNH! Huh, why do I need to add this address here? */ 2427 int ret; 2428 2429 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2430 SCTP_IN_COOKIE_PROC); 2431 netl = sctp_findnet(*stcb, to); 2432 } 2433 if (netl) { 2434 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2435 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2436 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2437 netl); 2438 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2439 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2440 } 2441 } 2442 if (*stcb) { 2443 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2444 *stcb, NULL); 2445 } 2446 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2447 if (!had_a_existing_tcb || 2448 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2449 /* 2450 * If we have a NEW cookie or the connect never 2451 * reached the connected state during collision we 2452 * must do the TCP accept thing. 2453 */ 2454 struct socket *so, *oso; 2455 struct sctp_inpcb *inp; 2456 2457 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2458 /* 2459 * For a restart we will keep the same 2460 * socket, no need to do anything. I THINK!! 2461 */ 2462 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED); 2463 return (m); 2464 } 2465 oso = (*inp_p)->sctp_socket; 2466 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2467 SCTP_TCB_UNLOCK((*stcb)); 2468 so = sonewconn(oso, 0 2469 ); 2470 SCTP_TCB_LOCK((*stcb)); 2471 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2472 2473 if (so == NULL) { 2474 struct mbuf *op_err; 2475 2476 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2477 struct socket *pcb_so; 2478 2479 #endif 2480 /* Too many sockets */ 2481 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2482 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2483 sctp_abort_association(*inp_p, NULL, m, iphlen, 2484 sh, op_err, vrf_id); 2485 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2486 pcb_so = SCTP_INP_SO(*inp_p); 2487 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2488 SCTP_TCB_UNLOCK((*stcb)); 2489 SCTP_SOCKET_LOCK(pcb_so, 1); 2490 SCTP_TCB_LOCK((*stcb)); 2491 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2492 #endif 2493 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2494 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2495 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2496 #endif 2497 return (NULL); 2498 } 2499 inp = (struct sctp_inpcb *)so->so_pcb; 2500 SCTP_INP_INCR_REF(inp); 2501 /* 2502 * We add the unbound flag here so that if we get an 2503 * soabort() before we get the move_pcb done, we 2504 * will properly cleanup. 2505 */ 2506 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2507 SCTP_PCB_FLAGS_CONNECTED | 2508 SCTP_PCB_FLAGS_IN_TCPPOOL | 2509 SCTP_PCB_FLAGS_UNBOUND | 2510 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2511 SCTP_PCB_FLAGS_DONT_WAKE); 2512 inp->sctp_features = (*inp_p)->sctp_features; 2513 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2514 inp->sctp_socket = so; 2515 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2516 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2517 inp->sctp_context = (*inp_p)->sctp_context; 2518 inp->inp_starting_point_for_iterator = NULL; 2519 /* 2520 * copy in the authentication parameters from the 2521 * original endpoint 2522 */ 2523 if (inp->sctp_ep.local_hmacs) 2524 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2525 inp->sctp_ep.local_hmacs = 2526 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2527 if (inp->sctp_ep.local_auth_chunks) 2528 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2529 inp->sctp_ep.local_auth_chunks = 2530 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2531 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2532 &inp->sctp_ep.shared_keys); 2533 2534 /* 2535 * Now we must move it from one hash table to 2536 * another and get the tcb in the right place. 2537 */ 2538 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2539 2540 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2541 SCTP_TCB_UNLOCK((*stcb)); 2542 2543 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2544 0); 2545 SCTP_TCB_LOCK((*stcb)); 2546 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2547 2548 2549 /* 2550 * now we must check to see if we were aborted while 2551 * the move was going on and the lock/unlock 2552 * happened. 2553 */ 2554 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2555 /* 2556 * yep it was, we leave the assoc attached 2557 * to the socket since the sctp_inpcb_free() 2558 * call will send an abort for us. 2559 */ 2560 SCTP_INP_DECR_REF(inp); 2561 return (NULL); 2562 } 2563 SCTP_INP_DECR_REF(inp); 2564 /* Switch over to the new guy */ 2565 *inp_p = inp; 2566 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2567 2568 /* 2569 * Pull it from the incomplete queue and wake the 2570 * guy 2571 */ 2572 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2573 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2574 SCTP_TCB_UNLOCK((*stcb)); 2575 SCTP_SOCKET_LOCK(so, 1); 2576 #endif 2577 soisconnected(so); 2578 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2579 SCTP_TCB_LOCK((*stcb)); 2580 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2581 SCTP_SOCKET_UNLOCK(so, 1); 2582 #endif 2583 return (m); 2584 } 2585 } 2586 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2587 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2588 } 2589 return (m); 2590 } 2591 2592 static void 2593 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2594 struct sctp_tcb *stcb, struct sctp_nets *net) 2595 { 2596 /* cp must not be used, others call this without a c-ack :-) */ 2597 struct sctp_association *asoc; 2598 2599 SCTPDBG(SCTP_DEBUG_INPUT2, 2600 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2601 if (stcb == NULL) 2602 return; 2603 2604 asoc = &stcb->asoc; 2605 2606 sctp_stop_all_cookie_timers(stcb); 2607 /* process according to association state */ 2608 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2609 /* state change only needed when I am in right state */ 2610 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2611 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2612 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2613 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2614 stcb->sctp_ep, stcb, asoc->primary_destination); 2615 2616 } 2617 /* update RTO */ 2618 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2619 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2620 if (asoc->overall_error_count == 0) { 2621 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2622 &asoc->time_entered, sctp_align_safe_nocopy); 2623 } 2624 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2625 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2626 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2627 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2628 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2629 struct socket *so; 2630 2631 #endif 2632 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2633 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2634 so = SCTP_INP_SO(stcb->sctp_ep); 2635 atomic_add_int(&stcb->asoc.refcnt, 1); 2636 SCTP_TCB_UNLOCK(stcb); 2637 SCTP_SOCKET_LOCK(so, 1); 2638 SCTP_TCB_LOCK(stcb); 2639 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2640 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2641 SCTP_SOCKET_UNLOCK(so, 1); 2642 return; 2643 } 2644 #endif 2645 soisconnected(stcb->sctp_socket); 2646 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2647 SCTP_SOCKET_UNLOCK(so, 1); 2648 #endif 2649 } 2650 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2651 stcb, net); 2652 /* 2653 * since we did not send a HB make sure we don't double 2654 * things 2655 */ 2656 net->hb_responded = 1; 2657 2658 if (stcb->asoc.sctp_autoclose_ticks && 2659 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2660 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2661 stcb->sctp_ep, stcb, NULL); 2662 } 2663 /* 2664 * send ASCONF if parameters are pending and ASCONFs are 2665 * allowed (eg. addresses changed when init/cookie echo were 2666 * in flight) 2667 */ 2668 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2669 (stcb->asoc.peer_supports_asconf) && 2670 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2671 #ifdef SCTP_TIMER_BASED_ASCONF 2672 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2673 stcb->sctp_ep, stcb, 2674 stcb->asoc.primary_destination); 2675 #else 2676 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2677 SCTP_ADDR_NOT_LOCKED); 2678 #endif 2679 } 2680 } 2681 /* Toss the cookie if I can */ 2682 sctp_toss_old_cookies(stcb, asoc); 2683 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2684 /* Restart the timer if we have pending data */ 2685 struct sctp_tmit_chunk *chk; 2686 2687 chk = TAILQ_FIRST(&asoc->sent_queue); 2688 if (chk) { 2689 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2690 stcb, chk->whoTo); 2691 } 2692 } 2693 } 2694 2695 static void 2696 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2697 struct sctp_tcb *stcb) 2698 { 2699 struct sctp_nets *net; 2700 struct sctp_tmit_chunk *lchk; 2701 uint32_t tsn; 2702 2703 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2704 return; 2705 } 2706 SCTP_STAT_INCR(sctps_recvecne); 2707 tsn = ntohl(cp->tsn); 2708 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2709 /* Also we make sure we disable the nonce_wait */ 2710 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2711 if (lchk == NULL) { 2712 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2713 } else { 2714 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2715 } 2716 stcb->asoc.nonce_wait_for_ecne = 0; 2717 stcb->asoc.nonce_sum_check = 0; 2718 2719 /* Find where it was sent, if possible */ 2720 net = NULL; 2721 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2722 while (lchk) { 2723 if (lchk->rec.data.TSN_seq == tsn) { 2724 net = lchk->whoTo; 2725 break; 2726 } 2727 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2728 break; 2729 lchk = TAILQ_NEXT(lchk, sctp_next); 2730 } 2731 if (net == NULL) 2732 /* default is we use the primary */ 2733 net = stcb->asoc.primary_destination; 2734 2735 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2736 /* 2737 * JRS - Use the congestion control given in the pluggable 2738 * CC module 2739 */ 2740 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net); 2741 /* 2742 * we reduce once every RTT. So we will only lower cwnd at 2743 * the next sending seq i.e. the resync_tsn. 2744 */ 2745 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2746 } 2747 /* 2748 * We always send a CWR this way if our previous one was lost our 2749 * peer will get an update, or if it is not time again to reduce we 2750 * still get the cwr to the peer. 2751 */ 2752 sctp_send_cwr(stcb, net, tsn); 2753 } 2754 2755 static void 2756 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2757 { 2758 /* 2759 * Here we get a CWR from the peer. We must look in the outqueue and 2760 * make sure that we have a covered ECNE in teh control chunk part. 2761 * If so remove it. 2762 */ 2763 struct sctp_tmit_chunk *chk; 2764 struct sctp_ecne_chunk *ecne; 2765 2766 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2767 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2768 continue; 2769 } 2770 /* 2771 * Look for and remove if it is the right TSN. Since there 2772 * is only ONE ECNE on the control queue at any one time we 2773 * don't need to worry about more than one! 2774 */ 2775 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2776 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2777 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2778 /* this covers this ECNE, we can remove it */ 2779 stcb->asoc.ecn_echo_cnt_onq--; 2780 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2781 sctp_next); 2782 if (chk->data) { 2783 sctp_m_freem(chk->data); 2784 chk->data = NULL; 2785 } 2786 stcb->asoc.ctrl_queue_cnt--; 2787 sctp_free_a_chunk(stcb, chk); 2788 break; 2789 } 2790 } 2791 } 2792 2793 static void 2794 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2795 struct sctp_tcb *stcb, struct sctp_nets *net) 2796 { 2797 struct sctp_association *asoc; 2798 2799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2800 struct socket *so; 2801 2802 #endif 2803 2804 SCTPDBG(SCTP_DEBUG_INPUT2, 2805 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2806 if (stcb == NULL) 2807 return; 2808 2809 asoc = &stcb->asoc; 2810 /* process according to association state */ 2811 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2812 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2813 SCTPDBG(SCTP_DEBUG_INPUT2, 2814 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 2815 SCTP_TCB_UNLOCK(stcb); 2816 return; 2817 } 2818 /* notify upper layer protocol */ 2819 if (stcb->sctp_socket) { 2820 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2821 /* are the queues empty? they should be */ 2822 if (!TAILQ_EMPTY(&asoc->send_queue) || 2823 !TAILQ_EMPTY(&asoc->sent_queue) || 2824 !TAILQ_EMPTY(&asoc->out_wheel)) { 2825 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 2826 } 2827 } 2828 /* stop the timer */ 2829 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2830 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2831 /* free the TCB */ 2832 SCTPDBG(SCTP_DEBUG_INPUT2, 2833 "sctp_handle_shutdown_complete: calls free-asoc\n"); 2834 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2835 so = SCTP_INP_SO(stcb->sctp_ep); 2836 atomic_add_int(&stcb->asoc.refcnt, 1); 2837 SCTP_TCB_UNLOCK(stcb); 2838 SCTP_SOCKET_LOCK(so, 1); 2839 SCTP_TCB_LOCK(stcb); 2840 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2841 #endif 2842 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2843 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2844 SCTP_SOCKET_UNLOCK(so, 1); 2845 #endif 2846 return; 2847 } 2848 2849 static int 2850 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2851 struct sctp_nets *net, uint8_t flg) 2852 { 2853 switch (desc->chunk_type) { 2854 case SCTP_DATA: 2855 /* find the tsn to resend (possibly */ 2856 { 2857 uint32_t tsn; 2858 struct sctp_tmit_chunk *tp1; 2859 2860 tsn = ntohl(desc->tsn_ifany); 2861 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2862 while (tp1) { 2863 if (tp1->rec.data.TSN_seq == tsn) { 2864 /* found it */ 2865 break; 2866 } 2867 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2868 MAX_TSN)) { 2869 /* not found */ 2870 tp1 = NULL; 2871 break; 2872 } 2873 tp1 = TAILQ_NEXT(tp1, sctp_next); 2874 } 2875 if (tp1 == NULL) { 2876 /* 2877 * Do it the other way , aka without paying 2878 * attention to queue seq order. 2879 */ 2880 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2881 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2882 while (tp1) { 2883 if (tp1->rec.data.TSN_seq == tsn) { 2884 /* found it */ 2885 break; 2886 } 2887 tp1 = TAILQ_NEXT(tp1, sctp_next); 2888 } 2889 } 2890 if (tp1 == NULL) { 2891 SCTP_STAT_INCR(sctps_pdrptsnnf); 2892 } 2893 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2894 uint8_t *ddp; 2895 2896 if ((stcb->asoc.peers_rwnd == 0) && 2897 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2898 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2899 return (0); 2900 } 2901 if (stcb->asoc.peers_rwnd == 0 && 2902 (flg & SCTP_FROM_MIDDLE_BOX)) { 2903 SCTP_STAT_INCR(sctps_pdrpdizrw); 2904 return (0); 2905 } 2906 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2907 sizeof(struct sctp_data_chunk)); 2908 { 2909 unsigned int iii; 2910 2911 for (iii = 0; iii < sizeof(desc->data_bytes); 2912 iii++) { 2913 if (ddp[iii] != desc->data_bytes[iii]) { 2914 SCTP_STAT_INCR(sctps_pdrpbadd); 2915 return (-1); 2916 } 2917 } 2918 } 2919 /* 2920 * We zero out the nonce so resync not 2921 * needed 2922 */ 2923 tp1->rec.data.ect_nonce = 0; 2924 2925 if (tp1->do_rtt) { 2926 /* 2927 * this guy had a RTO calculation 2928 * pending on it, cancel it 2929 */ 2930 tp1->do_rtt = 0; 2931 } 2932 SCTP_STAT_INCR(sctps_pdrpmark); 2933 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2934 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2935 tp1->sent = SCTP_DATAGRAM_RESEND; 2936 /* 2937 * mark it as if we were doing a FR, since 2938 * we will be getting gap ack reports behind 2939 * the info from the router. 2940 */ 2941 tp1->rec.data.doing_fast_retransmit = 1; 2942 /* 2943 * mark the tsn with what sequences can 2944 * cause a new FR. 2945 */ 2946 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2947 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2948 } else { 2949 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2950 } 2951 2952 /* restart the timer */ 2953 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2954 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2955 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2956 stcb, tp1->whoTo); 2957 2958 /* fix counts and things */ 2959 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 2960 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2961 tp1->whoTo->flight_size, 2962 tp1->book_size, 2963 (uintptr_t) stcb, 2964 tp1->rec.data.TSN_seq); 2965 } 2966 sctp_flight_size_decrease(tp1); 2967 sctp_total_flight_decrease(stcb, tp1); 2968 } { 2969 /* audit code */ 2970 unsigned int audit; 2971 2972 audit = 0; 2973 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2974 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2975 audit++; 2976 } 2977 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2978 sctp_next) { 2979 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2980 audit++; 2981 } 2982 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2983 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2984 audit, stcb->asoc.sent_queue_retran_cnt); 2985 #ifndef SCTP_AUDITING_ENABLED 2986 stcb->asoc.sent_queue_retran_cnt = audit; 2987 #endif 2988 } 2989 } 2990 } 2991 break; 2992 case SCTP_ASCONF: 2993 { 2994 struct sctp_tmit_chunk *asconf; 2995 2996 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 2997 sctp_next) { 2998 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 2999 break; 3000 } 3001 } 3002 if (asconf) { 3003 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3004 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3005 asconf->sent = SCTP_DATAGRAM_RESEND; 3006 asconf->snd_count--; 3007 } 3008 } 3009 break; 3010 case SCTP_INITIATION: 3011 /* resend the INIT */ 3012 stcb->asoc.dropped_special_cnt++; 3013 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3014 /* 3015 * If we can get it in, in a few attempts we do 3016 * this, otherwise we let the timer fire. 3017 */ 3018 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3019 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3020 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3021 } 3022 break; 3023 case SCTP_SELECTIVE_ACK: 3024 /* resend the sack */ 3025 sctp_send_sack(stcb); 3026 break; 3027 case SCTP_HEARTBEAT_REQUEST: 3028 /* resend a demand HB */ 3029 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3030 /* 3031 * Only retransmit if we KNOW we wont destroy the 3032 * tcb 3033 */ 3034 (void)sctp_send_hb(stcb, 1, net); 3035 } 3036 break; 3037 case SCTP_SHUTDOWN: 3038 sctp_send_shutdown(stcb, net); 3039 break; 3040 case SCTP_SHUTDOWN_ACK: 3041 sctp_send_shutdown_ack(stcb, net); 3042 break; 3043 case SCTP_COOKIE_ECHO: 3044 { 3045 struct sctp_tmit_chunk *cookie; 3046 3047 cookie = NULL; 3048 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3049 sctp_next) { 3050 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3051 break; 3052 } 3053 } 3054 if (cookie) { 3055 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3056 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3057 cookie->sent = SCTP_DATAGRAM_RESEND; 3058 sctp_stop_all_cookie_timers(stcb); 3059 } 3060 } 3061 break; 3062 case SCTP_COOKIE_ACK: 3063 sctp_send_cookie_ack(stcb); 3064 break; 3065 case SCTP_ASCONF_ACK: 3066 /* resend last asconf ack */ 3067 sctp_send_asconf_ack(stcb); 3068 break; 3069 case SCTP_FORWARD_CUM_TSN: 3070 send_forward_tsn(stcb, &stcb->asoc); 3071 break; 3072 /* can't do anything with these */ 3073 case SCTP_PACKET_DROPPED: 3074 case SCTP_INITIATION_ACK: /* this should not happen */ 3075 case SCTP_HEARTBEAT_ACK: 3076 case SCTP_ABORT_ASSOCIATION: 3077 case SCTP_OPERATION_ERROR: 3078 case SCTP_SHUTDOWN_COMPLETE: 3079 case SCTP_ECN_ECHO: 3080 case SCTP_ECN_CWR: 3081 default: 3082 break; 3083 } 3084 return (0); 3085 } 3086 3087 void 3088 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3089 { 3090 int i; 3091 uint16_t temp; 3092 3093 /* 3094 * We set things to 0xffff since this is the last delivered sequence 3095 * and we will be sending in 0 after the reset. 3096 */ 3097 3098 if (number_entries) { 3099 for (i = 0; i < number_entries; i++) { 3100 temp = ntohs(list[i]); 3101 if (temp >= stcb->asoc.streamincnt) { 3102 continue; 3103 } 3104 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3105 } 3106 } else { 3107 list = NULL; 3108 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3109 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3110 } 3111 } 3112 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3113 } 3114 3115 static void 3116 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3117 { 3118 int i; 3119 3120 if (number_entries == 0) { 3121 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3122 stcb->asoc.strmout[i].next_sequence_sent = 0; 3123 } 3124 } else if (number_entries) { 3125 for (i = 0; i < number_entries; i++) { 3126 uint16_t temp; 3127 3128 temp = ntohs(list[i]); 3129 if (temp >= stcb->asoc.streamoutcnt) { 3130 /* no such stream */ 3131 continue; 3132 } 3133 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3134 } 3135 } 3136 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3137 } 3138 3139 3140 struct sctp_stream_reset_out_request * 3141 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3142 { 3143 struct sctp_association *asoc; 3144 struct sctp_stream_reset_out_req *req; 3145 struct sctp_stream_reset_out_request *r; 3146 struct sctp_tmit_chunk *chk; 3147 int len, clen; 3148 3149 asoc = &stcb->asoc; 3150 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3151 asoc->stream_reset_outstanding = 0; 3152 return (NULL); 3153 } 3154 if (stcb->asoc.str_reset == NULL) { 3155 asoc->stream_reset_outstanding = 0; 3156 return (NULL); 3157 } 3158 chk = stcb->asoc.str_reset; 3159 if (chk->data == NULL) { 3160 return (NULL); 3161 } 3162 if (bchk) { 3163 /* he wants a copy of the chk pointer */ 3164 *bchk = chk; 3165 } 3166 clen = chk->send_size; 3167 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3168 r = &req->sr_req; 3169 if (ntohl(r->request_seq) == seq) { 3170 /* found it */ 3171 return (r); 3172 } 3173 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3174 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3175 /* move to the next one, there can only be a max of two */ 3176 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3177 if (ntohl(r->request_seq) == seq) { 3178 return (r); 3179 } 3180 } 3181 /* that seq is not here */ 3182 return (NULL); 3183 } 3184 3185 static void 3186 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3187 { 3188 struct sctp_association *asoc; 3189 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3190 3191 if (stcb->asoc.str_reset == NULL) { 3192 return; 3193 } 3194 asoc = &stcb->asoc; 3195 3196 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3197 TAILQ_REMOVE(&asoc->control_send_queue, 3198 chk, 3199 sctp_next); 3200 if (chk->data) { 3201 sctp_m_freem(chk->data); 3202 chk->data = NULL; 3203 } 3204 asoc->ctrl_queue_cnt--; 3205 sctp_free_a_chunk(stcb, chk); 3206 /* sa_ignore NO_NULL_CHK */ 3207 stcb->asoc.str_reset = NULL; 3208 } 3209 3210 3211 static int 3212 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3213 uint32_t seq, uint32_t action, 3214 struct sctp_stream_reset_response *respin) 3215 { 3216 uint16_t type; 3217 int lparm_len; 3218 struct sctp_association *asoc = &stcb->asoc; 3219 struct sctp_tmit_chunk *chk; 3220 struct sctp_stream_reset_out_request *srparam; 3221 int number_entries; 3222 3223 if (asoc->stream_reset_outstanding == 0) { 3224 /* duplicate */ 3225 return (0); 3226 } 3227 if (seq == stcb->asoc.str_reset_seq_out) { 3228 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3229 if (srparam) { 3230 stcb->asoc.str_reset_seq_out++; 3231 type = ntohs(srparam->ph.param_type); 3232 lparm_len = ntohs(srparam->ph.param_length); 3233 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3234 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3235 asoc->stream_reset_out_is_outstanding = 0; 3236 if (asoc->stream_reset_outstanding) 3237 asoc->stream_reset_outstanding--; 3238 if (action == SCTP_STREAM_RESET_PERFORMED) { 3239 /* do it */ 3240 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3241 } else { 3242 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3243 } 3244 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3245 /* Answered my request */ 3246 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3247 if (asoc->stream_reset_outstanding) 3248 asoc->stream_reset_outstanding--; 3249 if (action != SCTP_STREAM_RESET_PERFORMED) { 3250 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3251 } 3252 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3253 /** 3254 * a) Adopt the new in tsn. 3255 * b) reset the map 3256 * c) Adopt the new out-tsn 3257 */ 3258 struct sctp_stream_reset_response_tsn *resp; 3259 struct sctp_forward_tsn_chunk fwdtsn; 3260 int abort_flag = 0; 3261 3262 if (respin == NULL) { 3263 /* huh ? */ 3264 return (0); 3265 } 3266 if (action == SCTP_STREAM_RESET_PERFORMED) { 3267 resp = (struct sctp_stream_reset_response_tsn *)respin; 3268 asoc->stream_reset_outstanding--; 3269 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3270 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3271 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3272 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3273 if (abort_flag) { 3274 return (1); 3275 } 3276 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3277 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3278 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3279 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3280 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3281 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3282 3283 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3284 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3285 3286 } 3287 } 3288 /* get rid of the request and get the request flags */ 3289 if (asoc->stream_reset_outstanding == 0) { 3290 sctp_clean_up_stream_reset(stcb); 3291 } 3292 } 3293 } 3294 return (0); 3295 } 3296 3297 static void 3298 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3299 struct sctp_tmit_chunk *chk, 3300 struct sctp_stream_reset_in_request *req, int trunc) 3301 { 3302 uint32_t seq; 3303 int len, i; 3304 int number_entries; 3305 uint16_t temp; 3306 3307 /* 3308 * peer wants me to send a str-reset to him for my outgoing seq's if 3309 * seq_in is right. 3310 */ 3311 struct sctp_association *asoc = &stcb->asoc; 3312 3313 seq = ntohl(req->request_seq); 3314 if (asoc->str_reset_seq_in == seq) { 3315 if (trunc) { 3316 /* Can't do it, since they exceeded our buffer size */ 3317 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3318 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3319 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3320 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3321 len = ntohs(req->ph.param_length); 3322 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3323 for (i = 0; i < number_entries; i++) { 3324 temp = ntohs(req->list_of_streams[i]); 3325 req->list_of_streams[i] = temp; 3326 } 3327 /* move the reset action back one */ 3328 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3329 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3330 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3331 asoc->str_reset_seq_out, 3332 seq, (asoc->sending_seq - 1)); 3333 asoc->stream_reset_out_is_outstanding = 1; 3334 asoc->str_reset = chk; 3335 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3336 stcb->asoc.stream_reset_outstanding++; 3337 } else { 3338 /* Can't do it, since we have sent one out */ 3339 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3340 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 3341 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3342 } 3343 asoc->str_reset_seq_in++; 3344 } else if (asoc->str_reset_seq_in - 1 == seq) { 3345 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3346 } else if (asoc->str_reset_seq_in - 2 == seq) { 3347 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3348 } else { 3349 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3350 } 3351 } 3352 3353 static int 3354 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3355 struct sctp_tmit_chunk *chk, 3356 struct sctp_stream_reset_tsn_request *req) 3357 { 3358 /* reset all in and out and update the tsn */ 3359 /* 3360 * A) reset my str-seq's on in and out. B) Select a receive next, 3361 * and set cum-ack to it. Also process this selected number as a 3362 * fwd-tsn as well. C) set in the response my next sending seq. 3363 */ 3364 struct sctp_forward_tsn_chunk fwdtsn; 3365 struct sctp_association *asoc = &stcb->asoc; 3366 int abort_flag = 0; 3367 uint32_t seq; 3368 3369 seq = ntohl(req->request_seq); 3370 if (asoc->str_reset_seq_in == seq) { 3371 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3372 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3373 fwdtsn.ch.chunk_flags = 0; 3374 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3375 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3376 if (abort_flag) { 3377 return (1); 3378 } 3379 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3380 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3381 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 3382 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3383 atomic_add_int(&stcb->asoc.sending_seq, 1); 3384 /* save off historical data for retrans */ 3385 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 3386 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 3387 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3388 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3389 3390 sctp_add_stream_reset_result_tsn(chk, 3391 ntohl(req->request_seq), 3392 SCTP_STREAM_RESET_PERFORMED, 3393 stcb->asoc.sending_seq, 3394 stcb->asoc.mapping_array_base_tsn); 3395 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3396 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3397 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3398 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3399 3400 asoc->str_reset_seq_in++; 3401 } else if (asoc->str_reset_seq_in - 1 == seq) { 3402 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3403 stcb->asoc.last_sending_seq[0], 3404 stcb->asoc.last_base_tsnsent[0] 3405 ); 3406 } else if (asoc->str_reset_seq_in - 2 == seq) { 3407 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3408 stcb->asoc.last_sending_seq[1], 3409 stcb->asoc.last_base_tsnsent[1] 3410 ); 3411 } else { 3412 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3413 } 3414 return (0); 3415 } 3416 3417 static void 3418 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3419 struct sctp_tmit_chunk *chk, 3420 struct sctp_stream_reset_out_request *req, int trunc) 3421 { 3422 uint32_t seq, tsn; 3423 int number_entries, len; 3424 struct sctp_association *asoc = &stcb->asoc; 3425 3426 seq = ntohl(req->request_seq); 3427 3428 /* now if its not a duplicate we process it */ 3429 if (asoc->str_reset_seq_in == seq) { 3430 len = ntohs(req->ph.param_length); 3431 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3432 /* 3433 * the sender is resetting, handle the list issue.. we must 3434 * a) verify if we can do the reset, if so no problem b) If 3435 * we can't do the reset we must copy the request. c) queue 3436 * it, and setup the data in processor to trigger it off 3437 * when needed and dequeue all the queued data. 3438 */ 3439 tsn = ntohl(req->send_reset_at_tsn); 3440 3441 /* move the reset action back one */ 3442 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3443 if (trunc) { 3444 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3445 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3446 } else if ((tsn == asoc->cumulative_tsn) || 3447 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3448 /* we can do it now */ 3449 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3450 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3451 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3452 } else { 3453 /* 3454 * we must queue it up and thus wait for the TSN's 3455 * to arrive that are at or before tsn 3456 */ 3457 struct sctp_stream_reset_list *liste; 3458 int siz; 3459 3460 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3461 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3462 siz, SCTP_M_STRESET); 3463 if (liste == NULL) { 3464 /* gak out of memory */ 3465 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3466 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3467 return; 3468 } 3469 liste->tsn = tsn; 3470 liste->number_entries = number_entries; 3471 memcpy(&liste->req, req, 3472 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3473 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3474 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3475 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3476 } 3477 asoc->str_reset_seq_in++; 3478 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3479 /* 3480 * one seq back, just echo back last action since my 3481 * response was lost. 3482 */ 3483 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3484 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3485 /* 3486 * two seq back, just echo back last action since my 3487 * response was lost. 3488 */ 3489 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3490 } else { 3491 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3492 } 3493 } 3494 3495 #ifdef __GNUC__ 3496 __attribute__((noinline)) 3497 #endif 3498 static int 3499 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3500 struct sctp_stream_reset_out_req *sr_req) 3501 { 3502 int chk_length, param_len, ptype; 3503 struct sctp_paramhdr pstore; 3504 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3505 3506 uint32_t seq; 3507 int num_req = 0; 3508 int trunc = 0; 3509 struct sctp_tmit_chunk *chk; 3510 struct sctp_chunkhdr *ch; 3511 struct sctp_paramhdr *ph; 3512 int ret_code = 0; 3513 int num_param = 0; 3514 3515 /* now it may be a reset or a reset-response */ 3516 chk_length = ntohs(sr_req->ch.chunk_length); 3517 3518 /* setup for adding the response */ 3519 sctp_alloc_a_chunk(stcb, chk); 3520 if (chk == NULL) { 3521 return (ret_code); 3522 } 3523 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3524 chk->rec.chunk_id.can_take_data = 0; 3525 chk->asoc = &stcb->asoc; 3526 chk->no_fr_allowed = 0; 3527 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3528 chk->book_size_scale = 0; 3529 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3530 if (chk->data == NULL) { 3531 strres_nochunk: 3532 if (chk->data) { 3533 sctp_m_freem(chk->data); 3534 chk->data = NULL; 3535 } 3536 sctp_free_a_chunk(stcb, chk); 3537 return (ret_code); 3538 } 3539 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3540 3541 /* setup chunk parameters */ 3542 chk->sent = SCTP_DATAGRAM_UNSENT; 3543 chk->snd_count = 0; 3544 chk->whoTo = stcb->asoc.primary_destination; 3545 atomic_add_int(&chk->whoTo->ref_count, 1); 3546 3547 ch = mtod(chk->data, struct sctp_chunkhdr *); 3548 ch->chunk_type = SCTP_STREAM_RESET; 3549 ch->chunk_flags = 0; 3550 ch->chunk_length = htons(chk->send_size); 3551 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3552 offset += sizeof(struct sctp_chunkhdr); 3553 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3554 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 3555 if (ph == NULL) 3556 break; 3557 param_len = ntohs(ph->param_length); 3558 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3559 /* bad param */ 3560 break; 3561 } 3562 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 3563 (uint8_t *) & cstore); 3564 ptype = ntohs(ph->param_type); 3565 num_param++; 3566 if (param_len > (int)sizeof(cstore)) { 3567 trunc = 1; 3568 } else { 3569 trunc = 0; 3570 } 3571 3572 if (num_param > SCTP_MAX_RESET_PARAMS) { 3573 /* hit the max of parameters already sorry.. */ 3574 break; 3575 } 3576 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3577 struct sctp_stream_reset_out_request *req_out; 3578 3579 req_out = (struct sctp_stream_reset_out_request *)ph; 3580 num_req++; 3581 if (stcb->asoc.stream_reset_outstanding) { 3582 seq = ntohl(req_out->response_seq); 3583 if (seq == stcb->asoc.str_reset_seq_out) { 3584 /* implicit ack */ 3585 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3586 } 3587 } 3588 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 3589 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3590 struct sctp_stream_reset_in_request *req_in; 3591 3592 num_req++; 3593 3594 req_in = (struct sctp_stream_reset_in_request *)ph; 3595 3596 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 3597 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3598 struct sctp_stream_reset_tsn_request *req_tsn; 3599 3600 num_req++; 3601 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3602 3603 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3604 ret_code = 1; 3605 goto strres_nochunk; 3606 } 3607 /* no more */ 3608 break; 3609 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3610 struct sctp_stream_reset_response *resp; 3611 uint32_t result; 3612 3613 resp = (struct sctp_stream_reset_response *)ph; 3614 seq = ntohl(resp->response_seq); 3615 result = ntohl(resp->result); 3616 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3617 ret_code = 1; 3618 goto strres_nochunk; 3619 } 3620 } else { 3621 break; 3622 } 3623 offset += SCTP_SIZE32(param_len); 3624 chk_length -= SCTP_SIZE32(param_len); 3625 } 3626 if (num_req == 0) { 3627 /* we have no response free the stuff */ 3628 goto strres_nochunk; 3629 } 3630 /* ok we have a chunk to link in */ 3631 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3632 chk, 3633 sctp_next); 3634 stcb->asoc.ctrl_queue_cnt++; 3635 return (ret_code); 3636 } 3637 3638 /* 3639 * Handle a router or endpoints report of a packet loss, there are two ways 3640 * to handle this, either we get the whole packet and must disect it 3641 * ourselves (possibly with truncation and or corruption) or it is a summary 3642 * from a middle box that did the disectting for us. 3643 */ 3644 static void 3645 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3646 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 3647 { 3648 uint32_t bottle_bw, on_queue; 3649 uint16_t trunc_len; 3650 unsigned int chlen; 3651 unsigned int at; 3652 struct sctp_chunk_desc desc; 3653 struct sctp_chunkhdr *ch; 3654 3655 chlen = ntohs(cp->ch.chunk_length); 3656 chlen -= sizeof(struct sctp_pktdrop_chunk); 3657 /* XXX possible chlen underflow */ 3658 if (chlen == 0) { 3659 ch = NULL; 3660 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3661 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3662 } else { 3663 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3664 chlen -= sizeof(struct sctphdr); 3665 /* XXX possible chlen underflow */ 3666 memset(&desc, 0, sizeof(desc)); 3667 } 3668 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3669 if (trunc_len > limit) { 3670 trunc_len = limit; 3671 } 3672 /* now the chunks themselves */ 3673 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3674 desc.chunk_type = ch->chunk_type; 3675 /* get amount we need to move */ 3676 at = ntohs(ch->chunk_length); 3677 if (at < sizeof(struct sctp_chunkhdr)) { 3678 /* corrupt chunk, maybe at the end? */ 3679 SCTP_STAT_INCR(sctps_pdrpcrupt); 3680 break; 3681 } 3682 if (trunc_len == 0) { 3683 /* we are supposed to have all of it */ 3684 if (at > chlen) { 3685 /* corrupt skip it */ 3686 SCTP_STAT_INCR(sctps_pdrpcrupt); 3687 break; 3688 } 3689 } else { 3690 /* is there enough of it left ? */ 3691 if (desc.chunk_type == SCTP_DATA) { 3692 if (chlen < (sizeof(struct sctp_data_chunk) + 3693 sizeof(desc.data_bytes))) { 3694 break; 3695 } 3696 } else { 3697 if (chlen < sizeof(struct sctp_chunkhdr)) { 3698 break; 3699 } 3700 } 3701 } 3702 if (desc.chunk_type == SCTP_DATA) { 3703 /* can we get out the tsn? */ 3704 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3705 SCTP_STAT_INCR(sctps_pdrpmbda); 3706 3707 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3708 /* yep */ 3709 struct sctp_data_chunk *dcp; 3710 uint8_t *ddp; 3711 unsigned int iii; 3712 3713 dcp = (struct sctp_data_chunk *)ch; 3714 ddp = (uint8_t *) (dcp + 1); 3715 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3716 desc.data_bytes[iii] = ddp[iii]; 3717 } 3718 desc.tsn_ifany = dcp->dp.tsn; 3719 } else { 3720 /* nope we are done. */ 3721 SCTP_STAT_INCR(sctps_pdrpnedat); 3722 break; 3723 } 3724 } else { 3725 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3726 SCTP_STAT_INCR(sctps_pdrpmbct); 3727 } 3728 3729 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3730 SCTP_STAT_INCR(sctps_pdrppdbrk); 3731 break; 3732 } 3733 if (SCTP_SIZE32(at) > chlen) { 3734 break; 3735 } 3736 chlen -= SCTP_SIZE32(at); 3737 if (chlen < sizeof(struct sctp_chunkhdr)) { 3738 /* done, none left */ 3739 break; 3740 } 3741 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3742 } 3743 /* Now update any rwnd --- possibly */ 3744 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3745 /* From a peer, we get a rwnd report */ 3746 uint32_t a_rwnd; 3747 3748 SCTP_STAT_INCR(sctps_pdrpfehos); 3749 3750 bottle_bw = ntohl(cp->bottle_bw); 3751 on_queue = ntohl(cp->current_onq); 3752 if (bottle_bw && on_queue) { 3753 /* a rwnd report is in here */ 3754 if (bottle_bw > on_queue) 3755 a_rwnd = bottle_bw - on_queue; 3756 else 3757 a_rwnd = 0; 3758 3759 if (a_rwnd == 0) 3760 stcb->asoc.peers_rwnd = 0; 3761 else { 3762 if (a_rwnd > stcb->asoc.total_flight) { 3763 stcb->asoc.peers_rwnd = 3764 a_rwnd - stcb->asoc.total_flight; 3765 } else { 3766 stcb->asoc.peers_rwnd = 0; 3767 } 3768 if (stcb->asoc.peers_rwnd < 3769 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3770 /* SWS sender side engages */ 3771 stcb->asoc.peers_rwnd = 0; 3772 } 3773 } 3774 } 3775 } else { 3776 SCTP_STAT_INCR(sctps_pdrpfmbox); 3777 } 3778 3779 /* now middle boxes in sat networks get a cwnd bump */ 3780 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3781 (stcb->asoc.sat_t3_loss_recovery == 0) && 3782 (stcb->asoc.sat_network)) { 3783 /* 3784 * This is debateable but for sat networks it makes sense 3785 * Note if a T3 timer has went off, we will prohibit any 3786 * changes to cwnd until we exit the t3 loss recovery. 3787 */ 3788 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 3789 net, cp, &bottle_bw, &on_queue); 3790 } 3791 } 3792 3793 /* 3794 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3795 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3796 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3797 * length of the complete packet outputs: - length: modified to remaining 3798 * length after control processing - netp: modified to new sctp_nets after 3799 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3800 * bad packet,...) otherwise return the tcb for this packet 3801 */ 3802 #ifdef __GNUC__ 3803 __attribute__((noinline)) 3804 #endif 3805 static struct sctp_tcb * 3806 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3807 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3808 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3809 uint32_t vrf_id) 3810 { 3811 struct sctp_association *asoc; 3812 uint32_t vtag_in; 3813 int num_chunks = 0; /* number of control chunks processed */ 3814 uint32_t chk_length; 3815 int ret; 3816 int abort_no_unlock = 0; 3817 3818 /* 3819 * How big should this be, and should it be alloc'd? Lets try the 3820 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3821 * until we get into jumbo grams and such.. 3822 */ 3823 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3824 struct sctp_tcb *locked_tcb = stcb; 3825 int got_auth = 0; 3826 uint32_t auth_offset = 0, auth_len = 0; 3827 int auth_skipped = 0; 3828 int asconf_cnt = 0; 3829 3830 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3831 struct socket *so; 3832 3833 #endif 3834 3835 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3836 iphlen, *offset, length, stcb); 3837 3838 /* validate chunk header length... */ 3839 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3840 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 3841 ntohs(ch->chunk_length)); 3842 if (locked_tcb) { 3843 SCTP_TCB_UNLOCK(locked_tcb); 3844 } 3845 return (NULL); 3846 } 3847 /* 3848 * validate the verification tag 3849 */ 3850 vtag_in = ntohl(sh->v_tag); 3851 3852 if (locked_tcb) { 3853 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3854 } 3855 if (ch->chunk_type == SCTP_INITIATION) { 3856 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 3857 ntohs(ch->chunk_length), vtag_in); 3858 if (vtag_in != 0) { 3859 /* protocol error- silently discard... */ 3860 SCTP_STAT_INCR(sctps_badvtag); 3861 if (locked_tcb) { 3862 SCTP_TCB_UNLOCK(locked_tcb); 3863 } 3864 return (NULL); 3865 } 3866 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3867 /* 3868 * If there is no stcb, skip the AUTH chunk and process 3869 * later after a stcb is found (to validate the lookup was 3870 * valid. 3871 */ 3872 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3873 (stcb == NULL) && !sctp_auth_disable) { 3874 /* save this chunk for later processing */ 3875 auth_skipped = 1; 3876 auth_offset = *offset; 3877 auth_len = ntohs(ch->chunk_length); 3878 3879 /* (temporarily) move past this chunk */ 3880 *offset += SCTP_SIZE32(auth_len); 3881 if (*offset >= length) { 3882 /* no more data left in the mbuf chain */ 3883 *offset = length; 3884 if (locked_tcb) { 3885 SCTP_TCB_UNLOCK(locked_tcb); 3886 } 3887 return (NULL); 3888 } 3889 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3890 sizeof(struct sctp_chunkhdr), chunk_buf); 3891 } 3892 if (ch == NULL) { 3893 /* Help */ 3894 *offset = length; 3895 if (locked_tcb) { 3896 SCTP_TCB_UNLOCK(locked_tcb); 3897 } 3898 return (NULL); 3899 } 3900 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3901 goto process_control_chunks; 3902 } 3903 /* 3904 * first check if it's an ASCONF with an unknown src addr we 3905 * need to look inside to find the association 3906 */ 3907 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3908 struct sctp_chunkhdr *asconf_ch = ch; 3909 uint32_t asconf_offset = 0, asconf_len = 0; 3910 3911 /* inp's refcount may be reduced */ 3912 SCTP_INP_INCR_REF(inp); 3913 3914 asconf_offset = *offset; 3915 do { 3916 asconf_len = ntohs(asconf_ch->chunk_length); 3917 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 3918 break; 3919 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3920 *offset, sh, &inp, netp); 3921 if (stcb != NULL) 3922 break; 3923 asconf_offset += SCTP_SIZE32(asconf_len); 3924 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 3925 sizeof(struct sctp_chunkhdr), chunk_buf); 3926 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 3927 if (stcb == NULL) { 3928 /* 3929 * reduce inp's refcount if not reduced in 3930 * sctp_findassociation_ep_asconf(). 3931 */ 3932 SCTP_INP_DECR_REF(inp); 3933 } else { 3934 locked_tcb = stcb; 3935 } 3936 3937 /* now go back and verify any auth chunk to be sure */ 3938 if (auth_skipped && (stcb != NULL)) { 3939 struct sctp_auth_chunk *auth; 3940 3941 auth = (struct sctp_auth_chunk *) 3942 sctp_m_getptr(m, auth_offset, 3943 auth_len, chunk_buf); 3944 got_auth = 1; 3945 auth_skipped = 0; 3946 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3947 auth_offset)) { 3948 /* auth HMAC failed so dump it */ 3949 *offset = length; 3950 if (locked_tcb) { 3951 SCTP_TCB_UNLOCK(locked_tcb); 3952 } 3953 return (NULL); 3954 } else { 3955 /* remaining chunks are HMAC checked */ 3956 stcb->asoc.authenticated = 1; 3957 } 3958 } 3959 } 3960 if (stcb == NULL) { 3961 /* no association, so it's out of the blue... */ 3962 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3963 vrf_id); 3964 *offset = length; 3965 if (locked_tcb) { 3966 SCTP_TCB_UNLOCK(locked_tcb); 3967 } 3968 return (NULL); 3969 } 3970 asoc = &stcb->asoc; 3971 /* ABORT and SHUTDOWN can use either v_tag... */ 3972 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3973 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3974 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3975 if ((vtag_in == asoc->my_vtag) || 3976 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3977 (vtag_in == asoc->peer_vtag))) { 3978 /* this is valid */ 3979 } else { 3980 /* drop this packet... */ 3981 SCTP_STAT_INCR(sctps_badvtag); 3982 if (locked_tcb) { 3983 SCTP_TCB_UNLOCK(locked_tcb); 3984 } 3985 return (NULL); 3986 } 3987 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 3988 if (vtag_in != asoc->my_vtag) { 3989 /* 3990 * this could be a stale SHUTDOWN-ACK or the 3991 * peer never got the SHUTDOWN-COMPLETE and 3992 * is still hung; we have started a new asoc 3993 * but it won't complete until the shutdown 3994 * is completed 3995 */ 3996 if (locked_tcb) { 3997 SCTP_TCB_UNLOCK(locked_tcb); 3998 } 3999 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 4000 NULL, vrf_id); 4001 return (NULL); 4002 } 4003 } else { 4004 /* for all other chunks, vtag must match */ 4005 if (vtag_in != asoc->my_vtag) { 4006 /* invalid vtag... */ 4007 SCTPDBG(SCTP_DEBUG_INPUT3, 4008 "invalid vtag: %xh, expect %xh\n", 4009 vtag_in, asoc->my_vtag); 4010 SCTP_STAT_INCR(sctps_badvtag); 4011 if (locked_tcb) { 4012 SCTP_TCB_UNLOCK(locked_tcb); 4013 } 4014 *offset = length; 4015 return (NULL); 4016 } 4017 } 4018 } /* end if !SCTP_COOKIE_ECHO */ 4019 /* 4020 * process all control chunks... 4021 */ 4022 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4023 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4024 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4025 /* implied cookie-ack.. we must have lost the ack */ 4026 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4027 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4028 stcb->asoc.overall_error_count, 4029 0, 4030 SCTP_FROM_SCTP_INPUT, 4031 __LINE__); 4032 } 4033 stcb->asoc.overall_error_count = 0; 4034 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4035 *netp); 4036 } 4037 process_control_chunks: 4038 while (IS_SCTP_CONTROL(ch)) { 4039 /* validate chunk length */ 4040 chk_length = ntohs(ch->chunk_length); 4041 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4042 ch->chunk_type, chk_length); 4043 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4044 if (chk_length < sizeof(*ch) || 4045 (*offset + (int)chk_length) > length) { 4046 *offset = length; 4047 if (locked_tcb) { 4048 SCTP_TCB_UNLOCK(locked_tcb); 4049 } 4050 return (NULL); 4051 } 4052 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4053 /* 4054 * INIT-ACK only gets the init ack "header" portion only 4055 * because we don't have to process the peer's COOKIE. All 4056 * others get a complete chunk. 4057 */ 4058 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4059 (ch->chunk_type == SCTP_INITIATION)) { 4060 /* get an init-ack chunk */ 4061 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4062 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4063 if (ch == NULL) { 4064 *offset = length; 4065 if (locked_tcb) { 4066 SCTP_TCB_UNLOCK(locked_tcb); 4067 } 4068 return (NULL); 4069 } 4070 } else { 4071 /* For cookies and all other chunks. */ 4072 if (chk_length > sizeof(chunk_buf)) { 4073 /* 4074 * use just the size of the chunk buffer so 4075 * the front part of our chunks fit in 4076 * contiguous space up to the chunk buffer 4077 * size (508 bytes). For chunks that need to 4078 * get more than that they must use the 4079 * sctp_m_getptr() function or other means 4080 * (e.g. know how to parse mbuf chains). 4081 * Cookies do this already. 4082 */ 4083 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4084 (sizeof(chunk_buf) - 4), 4085 chunk_buf); 4086 if (ch == NULL) { 4087 *offset = length; 4088 if (locked_tcb) { 4089 SCTP_TCB_UNLOCK(locked_tcb); 4090 } 4091 return (NULL); 4092 } 4093 } else { 4094 /* We can fit it all */ 4095 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4096 chk_length, chunk_buf); 4097 if (ch == NULL) { 4098 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4099 *offset = length; 4100 if (locked_tcb) { 4101 SCTP_TCB_UNLOCK(locked_tcb); 4102 } 4103 return (NULL); 4104 } 4105 } 4106 } 4107 num_chunks++; 4108 /* Save off the last place we got a control from */ 4109 if (stcb != NULL) { 4110 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4111 /* 4112 * allow last_control to be NULL if 4113 * ASCONF... ASCONF processing will find the 4114 * right net later 4115 */ 4116 if ((netp != NULL) && (*netp != NULL)) 4117 stcb->asoc.last_control_chunk_from = *netp; 4118 } 4119 } 4120 #ifdef SCTP_AUDITING_ENABLED 4121 sctp_audit_log(0xB0, ch->chunk_type); 4122 #endif 4123 4124 /* check to see if this chunk required auth, but isn't */ 4125 if ((stcb != NULL) && !sctp_auth_disable && 4126 sctp_auth_is_required_chunk(ch->chunk_type, 4127 stcb->asoc.local_auth_chunks) && 4128 !stcb->asoc.authenticated) { 4129 /* "silently" ignore */ 4130 SCTP_STAT_INCR(sctps_recvauthmissing); 4131 goto next_chunk; 4132 } 4133 switch (ch->chunk_type) { 4134 case SCTP_INITIATION: 4135 /* must be first and only chunk */ 4136 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4137 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4138 /* We are not interested anymore? */ 4139 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4140 /* 4141 * collision case where we are 4142 * sending to them too 4143 */ 4144 ; 4145 } else { 4146 if (locked_tcb) { 4147 SCTP_TCB_UNLOCK(locked_tcb); 4148 } 4149 *offset = length; 4150 return (NULL); 4151 } 4152 } 4153 if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) || 4154 (num_chunks > 1) || 4155 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4156 *offset = length; 4157 if (locked_tcb) { 4158 SCTP_TCB_UNLOCK(locked_tcb); 4159 } 4160 return (NULL); 4161 } 4162 if ((stcb != NULL) && 4163 (SCTP_GET_STATE(&stcb->asoc) == 4164 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4165 sctp_send_shutdown_ack(stcb, 4166 stcb->asoc.primary_destination); 4167 *offset = length; 4168 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4169 if (locked_tcb) { 4170 SCTP_TCB_UNLOCK(locked_tcb); 4171 } 4172 return (NULL); 4173 } 4174 if (netp) { 4175 sctp_handle_init(m, iphlen, *offset, sh, 4176 (struct sctp_init_chunk *)ch, inp, 4177 stcb, *netp, &abort_no_unlock, vrf_id); 4178 } 4179 if (abort_no_unlock) 4180 return (NULL); 4181 4182 *offset = length; 4183 if (locked_tcb) { 4184 SCTP_TCB_UNLOCK(locked_tcb); 4185 } 4186 return (NULL); 4187 break; 4188 case SCTP_PAD_CHUNK: 4189 break; 4190 case SCTP_INITIATION_ACK: 4191 /* must be first and only chunk */ 4192 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4193 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4194 /* We are not interested anymore */ 4195 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4196 ; 4197 } else { 4198 if (locked_tcb) { 4199 SCTP_TCB_UNLOCK(locked_tcb); 4200 } 4201 *offset = length; 4202 if (stcb) { 4203 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4204 so = SCTP_INP_SO(inp); 4205 atomic_add_int(&stcb->asoc.refcnt, 1); 4206 SCTP_TCB_UNLOCK(stcb); 4207 SCTP_SOCKET_LOCK(so, 1); 4208 SCTP_TCB_LOCK(stcb); 4209 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4210 #endif 4211 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4212 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4213 SCTP_SOCKET_UNLOCK(so, 1); 4214 #endif 4215 } 4216 return (NULL); 4217 } 4218 } 4219 if ((num_chunks > 1) || 4220 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4221 *offset = length; 4222 if (locked_tcb) { 4223 SCTP_TCB_UNLOCK(locked_tcb); 4224 } 4225 return (NULL); 4226 } 4227 if ((netp) && (*netp)) { 4228 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4229 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 4230 } else { 4231 ret = -1; 4232 } 4233 /* 4234 * Special case, I must call the output routine to 4235 * get the cookie echoed 4236 */ 4237 if (abort_no_unlock) 4238 return (NULL); 4239 4240 if ((stcb) && ret == 0) 4241 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4242 *offset = length; 4243 if (locked_tcb) { 4244 SCTP_TCB_UNLOCK(locked_tcb); 4245 } 4246 return (NULL); 4247 break; 4248 case SCTP_SELECTIVE_ACK: 4249 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4250 SCTP_STAT_INCR(sctps_recvsacks); 4251 { 4252 struct sctp_sack_chunk *sack; 4253 int abort_now = 0; 4254 uint32_t a_rwnd, cum_ack; 4255 uint16_t num_seg; 4256 int nonce_sum_flag; 4257 4258 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 4259 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 4260 ignore_sack: 4261 *offset = length; 4262 if (locked_tcb) { 4263 SCTP_TCB_UNLOCK(locked_tcb); 4264 } 4265 return (NULL); 4266 } 4267 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4268 /*- 4269 * If we have sent a shutdown-ack, we will pay no 4270 * attention to a sack sent in to us since 4271 * we don't care anymore. 4272 */ 4273 goto ignore_sack; 4274 } 4275 sack = (struct sctp_sack_chunk *)ch; 4276 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 4277 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4278 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4279 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4280 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4281 cum_ack, 4282 num_seg, 4283 a_rwnd 4284 ); 4285 stcb->asoc.seen_a_sack_this_pkt = 1; 4286 if ((stcb->asoc.pr_sctp_cnt == 0) && 4287 (num_seg == 0) && 4288 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 4289 (cum_ack == stcb->asoc.last_acked_seq)) && 4290 (stcb->asoc.saw_sack_with_frags == 0) && 4291 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4292 ) { 4293 /* 4294 * We have a SIMPLE sack having no 4295 * prior segments and data on sent 4296 * queue to be acked.. Use the 4297 * faster path sack processing. We 4298 * also allow window update sacks 4299 * with no missing segments to go 4300 * this way too. 4301 */ 4302 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 4303 &abort_now); 4304 } else { 4305 if (netp && *netp) 4306 sctp_handle_sack(m, *offset, 4307 sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 4308 } 4309 if (abort_now) { 4310 /* ABORT signal from sack processing */ 4311 *offset = length; 4312 return (NULL); 4313 } 4314 } 4315 break; 4316 case SCTP_HEARTBEAT_REQUEST: 4317 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4318 if ((stcb) && netp && *netp) { 4319 SCTP_STAT_INCR(sctps_recvheartbeat); 4320 sctp_send_heartbeat_ack(stcb, m, *offset, 4321 chk_length, *netp); 4322 4323 /* He's alive so give him credit */ 4324 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4325 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4326 stcb->asoc.overall_error_count, 4327 0, 4328 SCTP_FROM_SCTP_INPUT, 4329 __LINE__); 4330 } 4331 stcb->asoc.overall_error_count = 0; 4332 } 4333 break; 4334 case SCTP_HEARTBEAT_ACK: 4335 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 4336 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 4337 /* Its not ours */ 4338 *offset = length; 4339 if (locked_tcb) { 4340 SCTP_TCB_UNLOCK(locked_tcb); 4341 } 4342 return (NULL); 4343 } 4344 /* He's alive so give him credit */ 4345 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4346 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4347 stcb->asoc.overall_error_count, 4348 0, 4349 SCTP_FROM_SCTP_INPUT, 4350 __LINE__); 4351 } 4352 stcb->asoc.overall_error_count = 0; 4353 SCTP_STAT_INCR(sctps_recvheartbeatack); 4354 if (netp && *netp) 4355 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4356 stcb, *netp); 4357 break; 4358 case SCTP_ABORT_ASSOCIATION: 4359 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4360 stcb); 4361 if ((stcb) && netp && *netp) 4362 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4363 stcb, *netp); 4364 *offset = length; 4365 return (NULL); 4366 break; 4367 case SCTP_SHUTDOWN: 4368 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4369 stcb); 4370 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4371 *offset = length; 4372 if (locked_tcb) { 4373 SCTP_TCB_UNLOCK(locked_tcb); 4374 } 4375 return (NULL); 4376 } 4377 if (netp && *netp) { 4378 int abort_flag = 0; 4379 4380 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4381 stcb, *netp, &abort_flag); 4382 if (abort_flag) { 4383 *offset = length; 4384 return (NULL); 4385 } 4386 } 4387 break; 4388 case SCTP_SHUTDOWN_ACK: 4389 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4390 if ((stcb) && (netp) && (*netp)) 4391 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4392 *offset = length; 4393 return (NULL); 4394 break; 4395 4396 case SCTP_OPERATION_ERROR: 4397 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4398 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4399 4400 *offset = length; 4401 return (NULL); 4402 } 4403 break; 4404 case SCTP_COOKIE_ECHO: 4405 SCTPDBG(SCTP_DEBUG_INPUT3, 4406 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4407 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4408 ; 4409 } else { 4410 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4411 /* We are not interested anymore */ 4412 *offset = length; 4413 return (NULL); 4414 } 4415 } 4416 /* 4417 * First are we accepting? We do this again here 4418 * sincen it is possible that a previous endpoint 4419 * WAS listening responded to a INIT-ACK and then 4420 * closed. We opened and bound.. and are now no 4421 * longer listening. 4422 */ 4423 4424 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 4425 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4426 (sctp_abort_if_one_2_one_hits_limit)) { 4427 struct mbuf *oper; 4428 struct sctp_paramhdr *phdr; 4429 4430 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4431 0, M_DONTWAIT, 1, MT_DATA); 4432 if (oper) { 4433 SCTP_BUF_LEN(oper) = 4434 sizeof(struct sctp_paramhdr); 4435 phdr = mtod(oper, 4436 struct sctp_paramhdr *); 4437 phdr->param_type = 4438 htons(SCTP_CAUSE_OUT_OF_RESC); 4439 phdr->param_length = 4440 htons(sizeof(struct sctp_paramhdr)); 4441 } 4442 sctp_abort_association(inp, stcb, m, 4443 iphlen, sh, oper, vrf_id); 4444 } 4445 *offset = length; 4446 return (NULL); 4447 } else { 4448 struct mbuf *ret_buf; 4449 struct sctp_inpcb *linp; 4450 4451 if (stcb) { 4452 linp = NULL; 4453 } else { 4454 linp = inp; 4455 } 4456 4457 if (linp) { 4458 SCTP_ASOC_CREATE_LOCK(linp); 4459 } 4460 if (netp) { 4461 ret_buf = 4462 sctp_handle_cookie_echo(m, iphlen, 4463 *offset, sh, 4464 (struct sctp_cookie_echo_chunk *)ch, 4465 &inp, &stcb, netp, 4466 auth_skipped, 4467 auth_offset, 4468 auth_len, 4469 &locked_tcb, 4470 vrf_id); 4471 } else { 4472 ret_buf = NULL; 4473 } 4474 if (linp) { 4475 SCTP_ASOC_CREATE_UNLOCK(linp); 4476 } 4477 if (ret_buf == NULL) { 4478 if (locked_tcb) { 4479 SCTP_TCB_UNLOCK(locked_tcb); 4480 } 4481 SCTPDBG(SCTP_DEBUG_INPUT3, 4482 "GAK, null buffer\n"); 4483 auth_skipped = 0; 4484 *offset = length; 4485 return (NULL); 4486 } 4487 /* if AUTH skipped, see if it verified... */ 4488 if (auth_skipped) { 4489 got_auth = 1; 4490 auth_skipped = 0; 4491 } 4492 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4493 /* 4494 * Restart the timer if we have 4495 * pending data 4496 */ 4497 struct sctp_tmit_chunk *chk; 4498 4499 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4500 if (chk) { 4501 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4502 stcb->sctp_ep, stcb, 4503 chk->whoTo); 4504 } 4505 } 4506 } 4507 break; 4508 case SCTP_COOKIE_ACK: 4509 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 4510 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4511 if (locked_tcb) { 4512 SCTP_TCB_UNLOCK(locked_tcb); 4513 } 4514 return (NULL); 4515 } 4516 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4517 /* We are not interested anymore */ 4518 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4519 ; 4520 } else if (stcb) { 4521 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4522 so = SCTP_INP_SO(inp); 4523 atomic_add_int(&stcb->asoc.refcnt, 1); 4524 SCTP_TCB_UNLOCK(stcb); 4525 SCTP_SOCKET_LOCK(so, 1); 4526 SCTP_TCB_LOCK(stcb); 4527 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4528 #endif 4529 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4530 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4531 SCTP_SOCKET_UNLOCK(so, 1); 4532 #endif 4533 *offset = length; 4534 return (NULL); 4535 } 4536 } 4537 /* He's alive so give him credit */ 4538 if ((stcb) && netp && *netp) { 4539 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4540 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4541 stcb->asoc.overall_error_count, 4542 0, 4543 SCTP_FROM_SCTP_INPUT, 4544 __LINE__); 4545 } 4546 stcb->asoc.overall_error_count = 0; 4547 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4548 } 4549 break; 4550 case SCTP_ECN_ECHO: 4551 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4552 /* He's alive so give him credit */ 4553 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4554 /* Its not ours */ 4555 if (locked_tcb) { 4556 SCTP_TCB_UNLOCK(locked_tcb); 4557 } 4558 *offset = length; 4559 return (NULL); 4560 } 4561 if (stcb) { 4562 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4563 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4564 stcb->asoc.overall_error_count, 4565 0, 4566 SCTP_FROM_SCTP_INPUT, 4567 __LINE__); 4568 } 4569 stcb->asoc.overall_error_count = 0; 4570 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4571 stcb); 4572 } 4573 break; 4574 case SCTP_ECN_CWR: 4575 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4576 /* He's alive so give him credit */ 4577 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4578 /* Its not ours */ 4579 if (locked_tcb) { 4580 SCTP_TCB_UNLOCK(locked_tcb); 4581 } 4582 *offset = length; 4583 return (NULL); 4584 } 4585 if (stcb) { 4586 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4587 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4588 stcb->asoc.overall_error_count, 4589 0, 4590 SCTP_FROM_SCTP_INPUT, 4591 __LINE__); 4592 } 4593 stcb->asoc.overall_error_count = 0; 4594 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4595 } 4596 break; 4597 case SCTP_SHUTDOWN_COMPLETE: 4598 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 4599 /* must be first and only chunk */ 4600 if ((num_chunks > 1) || 4601 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4602 *offset = length; 4603 if (locked_tcb) { 4604 SCTP_TCB_UNLOCK(locked_tcb); 4605 } 4606 return (NULL); 4607 } 4608 if ((stcb) && netp && *netp) { 4609 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4610 stcb, *netp); 4611 } 4612 *offset = length; 4613 return (NULL); 4614 break; 4615 case SCTP_ASCONF: 4616 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4617 /* He's alive so give him credit */ 4618 if (stcb) { 4619 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4620 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4621 stcb->asoc.overall_error_count, 4622 0, 4623 SCTP_FROM_SCTP_INPUT, 4624 __LINE__); 4625 } 4626 stcb->asoc.overall_error_count = 0; 4627 sctp_handle_asconf(m, *offset, 4628 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 4629 asconf_cnt++; 4630 } 4631 break; 4632 case SCTP_ASCONF_ACK: 4633 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4634 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4635 /* Its not ours */ 4636 if (locked_tcb) { 4637 SCTP_TCB_UNLOCK(locked_tcb); 4638 } 4639 *offset = length; 4640 return (NULL); 4641 } 4642 if ((stcb) && netp && *netp) { 4643 /* He's alive so give him credit */ 4644 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4645 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4646 stcb->asoc.overall_error_count, 4647 0, 4648 SCTP_FROM_SCTP_INPUT, 4649 __LINE__); 4650 } 4651 stcb->asoc.overall_error_count = 0; 4652 sctp_handle_asconf_ack(m, *offset, 4653 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 4654 if (abort_no_unlock) 4655 return (NULL); 4656 } 4657 break; 4658 case SCTP_FORWARD_CUM_TSN: 4659 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4660 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4661 /* Its not ours */ 4662 if (locked_tcb) { 4663 SCTP_TCB_UNLOCK(locked_tcb); 4664 } 4665 *offset = length; 4666 return (NULL); 4667 } 4668 /* He's alive so give him credit */ 4669 if (stcb) { 4670 int abort_flag = 0; 4671 4672 stcb->asoc.overall_error_count = 0; 4673 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4674 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4675 stcb->asoc.overall_error_count, 4676 0, 4677 SCTP_FROM_SCTP_INPUT, 4678 __LINE__); 4679 } 4680 *fwd_tsn_seen = 1; 4681 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4682 /* We are not interested anymore */ 4683 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4684 so = SCTP_INP_SO(inp); 4685 atomic_add_int(&stcb->asoc.refcnt, 1); 4686 SCTP_TCB_UNLOCK(stcb); 4687 SCTP_SOCKET_LOCK(so, 1); 4688 SCTP_TCB_LOCK(stcb); 4689 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4690 #endif 4691 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4692 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4693 SCTP_SOCKET_UNLOCK(so, 1); 4694 #endif 4695 *offset = length; 4696 return (NULL); 4697 } 4698 sctp_handle_forward_tsn(stcb, 4699 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 4700 if (abort_flag) { 4701 *offset = length; 4702 return (NULL); 4703 } else { 4704 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4705 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4706 stcb->asoc.overall_error_count, 4707 0, 4708 SCTP_FROM_SCTP_INPUT, 4709 __LINE__); 4710 } 4711 stcb->asoc.overall_error_count = 0; 4712 } 4713 4714 } 4715 break; 4716 case SCTP_STREAM_RESET: 4717 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4718 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4719 /* Its not ours */ 4720 if (locked_tcb) { 4721 SCTP_TCB_UNLOCK(locked_tcb); 4722 } 4723 *offset = length; 4724 return (NULL); 4725 } 4726 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4727 /* We are not interested anymore */ 4728 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4729 so = SCTP_INP_SO(inp); 4730 atomic_add_int(&stcb->asoc.refcnt, 1); 4731 SCTP_TCB_UNLOCK(stcb); 4732 SCTP_SOCKET_LOCK(so, 1); 4733 SCTP_TCB_LOCK(stcb); 4734 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4735 #endif 4736 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 4737 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4738 SCTP_SOCKET_UNLOCK(so, 1); 4739 #endif 4740 *offset = length; 4741 return (NULL); 4742 } 4743 if (stcb->asoc.peer_supports_strreset == 0) { 4744 /* 4745 * hmm, peer should have announced this, but 4746 * we will turn it on since he is sending us 4747 * a stream reset. 4748 */ 4749 stcb->asoc.peer_supports_strreset = 1; 4750 } 4751 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 4752 /* stop processing */ 4753 *offset = length; 4754 return (NULL); 4755 } 4756 break; 4757 case SCTP_PACKET_DROPPED: 4758 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4759 /* re-get it all please */ 4760 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4761 /* Its not ours */ 4762 if (locked_tcb) { 4763 SCTP_TCB_UNLOCK(locked_tcb); 4764 } 4765 *offset = length; 4766 return (NULL); 4767 } 4768 if (ch && (stcb) && netp && (*netp)) { 4769 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4770 stcb, *netp, 4771 min(chk_length, (sizeof(chunk_buf) - 4))); 4772 4773 } 4774 break; 4775 4776 case SCTP_AUTHENTICATION: 4777 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4778 if (sctp_auth_disable) 4779 goto unknown_chunk; 4780 4781 if (stcb == NULL) { 4782 /* save the first AUTH for later processing */ 4783 if (auth_skipped == 0) { 4784 auth_offset = *offset; 4785 auth_len = chk_length; 4786 auth_skipped = 1; 4787 } 4788 /* skip this chunk (temporarily) */ 4789 goto next_chunk; 4790 } 4791 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4792 (chk_length > (sizeof(struct sctp_auth_chunk) + 4793 SCTP_AUTH_DIGEST_LEN_MAX))) { 4794 /* Its not ours */ 4795 if (locked_tcb) { 4796 SCTP_TCB_UNLOCK(locked_tcb); 4797 } 4798 *offset = length; 4799 return (NULL); 4800 } 4801 if (got_auth == 1) { 4802 /* skip this chunk... it's already auth'd */ 4803 goto next_chunk; 4804 } 4805 got_auth = 1; 4806 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4807 m, *offset)) { 4808 /* auth HMAC failed so dump the packet */ 4809 *offset = length; 4810 return (stcb); 4811 } else { 4812 /* remaining chunks are HMAC checked */ 4813 stcb->asoc.authenticated = 1; 4814 } 4815 break; 4816 4817 default: 4818 unknown_chunk: 4819 /* it's an unknown chunk! */ 4820 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4821 struct mbuf *mm; 4822 struct sctp_paramhdr *phd; 4823 4824 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4825 0, M_DONTWAIT, 1, MT_DATA); 4826 if (mm) { 4827 phd = mtod(mm, struct sctp_paramhdr *); 4828 /* 4829 * We cheat and use param type since 4830 * we did not bother to define a 4831 * error cause struct. They are the 4832 * same basic format with different 4833 * names. 4834 */ 4835 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4836 phd->param_length = htons(chk_length + sizeof(*phd)); 4837 SCTP_BUF_LEN(mm) = sizeof(*phd); 4838 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4839 M_DONTWAIT); 4840 if (SCTP_BUF_NEXT(mm)) { 4841 #ifdef SCTP_MBUF_LOGGING 4842 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 4843 struct mbuf *mat; 4844 4845 mat = SCTP_BUF_NEXT(mm); 4846 while (mat) { 4847 if (SCTP_BUF_IS_EXTENDED(mat)) { 4848 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 4849 } 4850 mat = SCTP_BUF_NEXT(mat); 4851 } 4852 } 4853 #endif 4854 sctp_queue_op_err(stcb, mm); 4855 } else { 4856 sctp_m_freem(mm); 4857 } 4858 } 4859 } 4860 if ((ch->chunk_type & 0x80) == 0) { 4861 /* discard this packet */ 4862 *offset = length; 4863 return (stcb); 4864 } /* else skip this bad chunk and continue... */ 4865 break; 4866 } /* switch (ch->chunk_type) */ 4867 4868 4869 next_chunk: 4870 /* get the next chunk */ 4871 *offset += SCTP_SIZE32(chk_length); 4872 if (*offset >= length) { 4873 /* no more data left in the mbuf chain */ 4874 break; 4875 } 4876 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4877 sizeof(struct sctp_chunkhdr), chunk_buf); 4878 if (ch == NULL) { 4879 if (locked_tcb) { 4880 SCTP_TCB_UNLOCK(locked_tcb); 4881 } 4882 *offset = length; 4883 return (NULL); 4884 } 4885 } /* while */ 4886 4887 if (asconf_cnt > 0 && stcb != NULL) { 4888 sctp_send_asconf_ack(stcb); 4889 } 4890 return (stcb); 4891 } 4892 4893 4894 /* 4895 * Process the ECN bits we have something set so we must look to see if it is 4896 * ECN(0) or ECN(1) or CE 4897 */ 4898 static void 4899 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4900 uint8_t ecn_bits) 4901 { 4902 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4903 ; 4904 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4905 /* 4906 * we only add to the nonce sum for ECT1, ECT0 does not 4907 * change the NS bit (that we have yet to find a way to send 4908 * it yet). 4909 */ 4910 4911 /* ECN Nonce stuff */ 4912 stcb->asoc.receiver_nonce_sum++; 4913 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4914 4915 /* 4916 * Drag up the last_echo point if cumack is larger since we 4917 * don't want the point falling way behind by more than 4918 * 2^^31 and then having it be incorrect. 4919 */ 4920 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4921 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4922 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4923 } 4924 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4925 /* 4926 * Drag up the last_echo point if cumack is larger since we 4927 * don't want the point falling way behind by more than 4928 * 2^^31 and then having it be incorrect. 4929 */ 4930 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4931 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4932 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4933 } 4934 } 4935 } 4936 4937 static void 4938 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4939 uint32_t high_tsn, uint8_t ecn_bits) 4940 { 4941 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4942 /* 4943 * we possibly must notify the sender that a congestion 4944 * window reduction is in order. We do this by adding a ECNE 4945 * chunk to the output chunk queue. The incoming CWR will 4946 * remove this chunk. 4947 */ 4948 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4949 MAX_TSN)) { 4950 /* Yep, we need to add a ECNE */ 4951 sctp_send_ecn_echo(stcb, net, high_tsn); 4952 stcb->asoc.last_echo_tsn = high_tsn; 4953 } 4954 } 4955 } 4956 4957 #ifdef INVARIANTS 4958 static void 4959 sctp_validate_no_locks(struct sctp_inpcb *inp) 4960 { 4961 struct sctp_tcb *stcb; 4962 4963 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 4964 if (mtx_owned(&stcb->tcb_mtx)) { 4965 panic("Own lock on stcb at return from input"); 4966 } 4967 } 4968 } 4969 4970 #endif 4971 4972 /* 4973 * common input chunk processing (v4 and v6) 4974 */ 4975 void 4976 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4977 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4978 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4979 uint8_t ecn_bits, uint32_t vrf_id) 4980 { 4981 /* 4982 * Control chunk processing 4983 */ 4984 uint32_t high_tsn; 4985 int fwd_tsn_seen = 0, data_processed = 0; 4986 struct mbuf *m = *mm; 4987 int abort_flag = 0; 4988 int un_sent; 4989 4990 SCTP_STAT_INCR(sctps_recvdatagrams); 4991 #ifdef SCTP_AUDITING_ENABLED 4992 sctp_audit_log(0xE0, 1); 4993 sctp_auditing(0, inp, stcb, net); 4994 #endif 4995 4996 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d stcb:%p\n", 4997 m, iphlen, offset, stcb); 4998 if (stcb) { 4999 /* always clear this before beginning a packet */ 5000 stcb->asoc.authenticated = 0; 5001 stcb->asoc.seen_a_sack_this_pkt = 0; 5002 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5003 stcb, stcb->asoc.state); 5004 5005 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5006 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5007 /*- 5008 * If we hit here, we had a ref count 5009 * up when the assoc was aborted and the 5010 * timer is clearing out the assoc, we should 5011 * NOT respond to any packet.. its OOTB. 5012 */ 5013 SCTP_TCB_UNLOCK(stcb); 5014 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5015 vrf_id); 5016 goto out_now; 5017 } 5018 } 5019 if (IS_SCTP_CONTROL(ch)) { 5020 /* process the control portion of the SCTP packet */ 5021 /* sa_ignore NO_NULL_CHK */ 5022 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 5023 inp, stcb, &net, &fwd_tsn_seen, vrf_id); 5024 if (stcb) { 5025 /* 5026 * This covers us if the cookie-echo was there and 5027 * it changes our INP. 5028 */ 5029 inp = stcb->sctp_ep; 5030 } 5031 } else { 5032 /* 5033 * no control chunks, so pre-process DATA chunks (these 5034 * checks are taken care of by control processing) 5035 */ 5036 5037 /* 5038 * if DATA only packet, and auth is required, then punt... 5039 * can't have authenticated without any AUTH (control) 5040 * chunks 5041 */ 5042 if ((stcb != NULL) && !sctp_auth_disable && 5043 sctp_auth_is_required_chunk(SCTP_DATA, 5044 stcb->asoc.local_auth_chunks)) { 5045 /* "silently" ignore */ 5046 SCTP_STAT_INCR(sctps_recvauthmissing); 5047 SCTP_TCB_UNLOCK(stcb); 5048 goto out_now; 5049 } 5050 if (stcb == NULL) { 5051 /* out of the blue DATA chunk */ 5052 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5053 vrf_id); 5054 goto out_now; 5055 } 5056 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5057 /* v_tag mismatch! */ 5058 SCTP_STAT_INCR(sctps_badvtag); 5059 SCTP_TCB_UNLOCK(stcb); 5060 goto out_now; 5061 } 5062 } 5063 5064 if (stcb == NULL) { 5065 /* 5066 * no valid TCB for this packet, or we found it's a bad 5067 * packet while processing control, or we're done with this 5068 * packet (done or skip rest of data), so we drop it... 5069 */ 5070 goto out_now; 5071 } 5072 /* 5073 * DATA chunk processing 5074 */ 5075 /* plow through the data chunks while length > offset */ 5076 5077 /* 5078 * Rest should be DATA only. Check authentication state if AUTH for 5079 * DATA is required. 5080 */ 5081 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable && 5082 sctp_auth_is_required_chunk(SCTP_DATA, 5083 stcb->asoc.local_auth_chunks) && 5084 !stcb->asoc.authenticated) { 5085 /* "silently" ignore */ 5086 SCTP_STAT_INCR(sctps_recvauthmissing); 5087 SCTPDBG(SCTP_DEBUG_AUTH1, 5088 "Data chunk requires AUTH, skipped\n"); 5089 goto trigger_send; 5090 } 5091 if (length > offset) { 5092 int retval; 5093 5094 /* 5095 * First check to make sure our state is correct. We would 5096 * not get here unless we really did have a tag, so we don't 5097 * abort if this happens, just dump the chunk silently. 5098 */ 5099 switch (SCTP_GET_STATE(&stcb->asoc)) { 5100 case SCTP_STATE_COOKIE_ECHOED: 5101 /* 5102 * we consider data with valid tags in this state 5103 * shows us the cookie-ack was lost. Imply it was 5104 * there. 5105 */ 5106 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 5107 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5108 stcb->asoc.overall_error_count, 5109 0, 5110 SCTP_FROM_SCTP_INPUT, 5111 __LINE__); 5112 } 5113 stcb->asoc.overall_error_count = 0; 5114 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5115 break; 5116 case SCTP_STATE_COOKIE_WAIT: 5117 /* 5118 * We consider OOTB any data sent during asoc setup. 5119 */ 5120 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5121 vrf_id); 5122 SCTP_TCB_UNLOCK(stcb); 5123 goto out_now; 5124 /* sa_ignore NOTREACHED */ 5125 break; 5126 case SCTP_STATE_EMPTY: /* should not happen */ 5127 case SCTP_STATE_INUSE: /* should not happen */ 5128 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5129 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5130 default: 5131 SCTP_TCB_UNLOCK(stcb); 5132 goto out_now; 5133 /* sa_ignore NOTREACHED */ 5134 break; 5135 case SCTP_STATE_OPEN: 5136 case SCTP_STATE_SHUTDOWN_SENT: 5137 break; 5138 } 5139 /* take care of ECN, part 1. */ 5140 if (stcb->asoc.ecn_allowed && 5141 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5142 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 5143 } 5144 /* plow through the data chunks while length > offset */ 5145 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5146 inp, stcb, net, &high_tsn); 5147 if (retval == 2) { 5148 /* 5149 * The association aborted, NO UNLOCK needed since 5150 * the association is destroyed. 5151 */ 5152 goto out_now; 5153 } 5154 data_processed = 1; 5155 if (retval == 0) { 5156 /* take care of ecn part 2. */ 5157 if (stcb->asoc.ecn_allowed && 5158 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5159 sctp_process_ecn_marked_b(stcb, net, high_tsn, 5160 ecn_bits); 5161 } 5162 } 5163 /* 5164 * Anything important needs to have been m_copy'ed in 5165 * process_data 5166 */ 5167 } 5168 if ((data_processed == 0) && (fwd_tsn_seen)) { 5169 int was_a_gap = 0; 5170 5171 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 5172 stcb->asoc.cumulative_tsn, MAX_TSN)) { 5173 /* there was a gap before this data was processed */ 5174 was_a_gap = 1; 5175 } 5176 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 5177 if (abort_flag) { 5178 /* Again, we aborted so NO UNLOCK needed */ 5179 goto out_now; 5180 } 5181 } 5182 /* trigger send of any chunks in queue... */ 5183 trigger_send: 5184 #ifdef SCTP_AUDITING_ENABLED 5185 sctp_audit_log(0xE0, 2); 5186 sctp_auditing(1, inp, stcb, net); 5187 #endif 5188 SCTPDBG(SCTP_DEBUG_INPUT1, 5189 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5190 stcb->asoc.peers_rwnd, 5191 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5192 stcb->asoc.total_flight); 5193 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5194 5195 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 5196 ((un_sent) && 5197 (stcb->asoc.peers_rwnd > 0 || 5198 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5199 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5200 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5201 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5202 } 5203 #ifdef SCTP_AUDITING_ENABLED 5204 sctp_audit_log(0xE0, 3); 5205 sctp_auditing(2, inp, stcb, net); 5206 #endif 5207 SCTP_TCB_UNLOCK(stcb); 5208 out_now: 5209 #ifdef INVARIANTS 5210 sctp_validate_no_locks(inp); 5211 #endif 5212 return; 5213 } 5214 5215 5216 5217 void 5218 sctp_input(i_pak, off) 5219 struct mbuf *i_pak; 5220 int off; 5221 5222 { 5223 #ifdef SCTP_MBUF_LOGGING 5224 struct mbuf *mat; 5225 5226 #endif 5227 struct mbuf *m; 5228 int iphlen; 5229 uint32_t vrf_id = 0; 5230 uint8_t ecn_bits; 5231 struct ip *ip; 5232 struct sctphdr *sh; 5233 struct sctp_inpcb *inp = NULL; 5234 5235 uint32_t check, calc_check; 5236 struct sctp_nets *net; 5237 struct sctp_tcb *stcb = NULL; 5238 struct sctp_chunkhdr *ch; 5239 int refcount_up = 0; 5240 int length, mlen, offset; 5241 5242 5243 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5244 SCTP_RELEASE_PKT(i_pak); 5245 return; 5246 } 5247 mlen = SCTP_HEADER_LEN(i_pak); 5248 iphlen = off; 5249 m = SCTP_HEADER_TO_CHAIN(i_pak); 5250 5251 net = NULL; 5252 SCTP_STAT_INCR(sctps_recvpackets); 5253 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5254 5255 5256 #ifdef SCTP_MBUF_LOGGING 5257 /* Log in any input mbufs */ 5258 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 5259 mat = m; 5260 while (mat) { 5261 if (SCTP_BUF_IS_EXTENDED(mat)) { 5262 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5263 } 5264 mat = SCTP_BUF_NEXT(mat); 5265 } 5266 } 5267 #endif 5268 #ifdef SCTP_PACKET_LOGGING 5269 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 5270 sctp_packet_log(m, mlen); 5271 #endif 5272 /* 5273 * Must take out the iphlen, since mlen expects this (only effect lb 5274 * case) 5275 */ 5276 mlen -= iphlen; 5277 5278 /* 5279 * Get IP, SCTP, and first chunk header together in first mbuf. 5280 */ 5281 ip = mtod(m, struct ip *); 5282 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5283 if (SCTP_BUF_LEN(m) < offset) { 5284 if ((m = m_pullup(m, offset)) == 0) { 5285 SCTP_STAT_INCR(sctps_hdrops); 5286 return; 5287 } 5288 ip = mtod(m, struct ip *); 5289 } 5290 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5291 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5292 SCTPDBG(SCTP_DEBUG_INPUT1, 5293 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5294 5295 /* SCTP does not allow broadcasts or multicasts */ 5296 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5297 goto bad; 5298 } 5299 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5300 /* 5301 * We only look at broadcast if its a front state, All 5302 * others we will not have a tcb for anyway. 5303 */ 5304 goto bad; 5305 } 5306 /* validate SCTP checksum */ 5307 check = sh->checksum; /* save incoming checksum */ 5308 if ((check == 0) && (sctp_no_csum_on_loopback) && 5309 ((ip->ip_src.s_addr == ip->ip_dst.s_addr) || 5310 (SCTP_IS_IT_LOOPBACK(m))) 5311 ) { 5312 goto sctp_skip_csum_4; 5313 } 5314 sh->checksum = 0; /* prepare for calc */ 5315 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 5316 if (calc_check != check) { 5317 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5318 calc_check, check, m, mlen, iphlen); 5319 5320 stcb = sctp_findassociation_addr(m, iphlen, 5321 offset - sizeof(*ch), 5322 sh, ch, &inp, &net, 5323 vrf_id); 5324 if ((inp) && (stcb)) { 5325 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 5326 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5327 } else if ((inp != NULL) && (stcb == NULL)) { 5328 refcount_up = 1; 5329 } 5330 SCTP_STAT_INCR(sctps_badsum); 5331 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5332 goto bad; 5333 } 5334 sh->checksum = calc_check; 5335 sctp_skip_csum_4: 5336 /* destination port of 0 is illegal, based on RFC2960. */ 5337 if (sh->dest_port == 0) { 5338 SCTP_STAT_INCR(sctps_hdrops); 5339 goto bad; 5340 } 5341 /* validate mbuf chain length with IP payload length */ 5342 if (mlen < (ip->ip_len - iphlen)) { 5343 SCTP_STAT_INCR(sctps_hdrops); 5344 goto bad; 5345 } 5346 /* 5347 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 5348 * IP/SCTP/first chunk header... 5349 */ 5350 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 5351 sh, ch, &inp, &net, vrf_id); 5352 /* inp's ref-count increased && stcb locked */ 5353 if (inp == NULL) { 5354 struct sctp_init_chunk *init_chk, chunk_buf; 5355 5356 SCTP_STAT_INCR(sctps_noport); 5357 #ifdef ICMP_BANDLIM 5358 /* 5359 * we use the bandwidth limiting to protect against sending 5360 * too many ABORTS all at once. In this case these count the 5361 * same as an ICMP message. 5362 */ 5363 if (badport_bandlim(0) < 0) 5364 goto bad; 5365 #endif /* ICMP_BANDLIM */ 5366 SCTPDBG(SCTP_DEBUG_INPUT1, 5367 "Sending a ABORT from packet entry!\n"); 5368 if (ch->chunk_type == SCTP_INITIATION) { 5369 /* 5370 * we do a trick here to get the INIT tag, dig in 5371 * and get the tag from the INIT and put it in the 5372 * common header. 5373 */ 5374 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 5375 iphlen + sizeof(*sh), sizeof(*init_chk), 5376 (uint8_t *) & chunk_buf); 5377 if (init_chk != NULL) 5378 sh->v_tag = init_chk->init.initiate_tag; 5379 } 5380 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5381 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id); 5382 goto bad; 5383 } 5384 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5385 goto bad; 5386 } 5387 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 5388 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id); 5389 goto bad; 5390 } else if (stcb == NULL) { 5391 refcount_up = 1; 5392 } 5393 #ifdef IPSEC 5394 /* 5395 * I very much doubt any of the IPSEC stuff will work but I have no 5396 * idea, so I will leave it in place. 5397 */ 5398 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5399 ipsec4stat.in_polvio++; 5400 SCTP_STAT_INCR(sctps_hdrops); 5401 goto bad; 5402 } 5403 #endif /* IPSEC */ 5404 5405 /* 5406 * common chunk processing 5407 */ 5408 length = ip->ip_len + iphlen; 5409 offset -= sizeof(struct sctp_chunkhdr); 5410 5411 ecn_bits = ip->ip_tos; 5412 5413 /* sa_ignore NO_NULL_CHK */ 5414 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 5415 inp, stcb, net, ecn_bits, vrf_id); 5416 /* inp's ref-count reduced && stcb unlocked */ 5417 if (m) { 5418 sctp_m_freem(m); 5419 } 5420 if ((inp) && (refcount_up)) { 5421 /* reduce ref-count */ 5422 SCTP_INP_DECR_REF(inp); 5423 } 5424 return; 5425 bad: 5426 if (stcb) { 5427 SCTP_TCB_UNLOCK(stcb); 5428 } 5429 if ((inp) && (refcount_up)) { 5430 /* reduce ref-count */ 5431 SCTP_INP_DECR_REF(inp); 5432 } 5433 if (m) { 5434 sctp_m_freem(m); 5435 } 5436 return; 5437 } 5438